[SCSI] mptscsih: remove unused page 1 setting function
[linux-2.6] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Logic Fibre Channel PCI chip/adapters
5  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2005 LSI Logic Corporation
8  *
9  */
10 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11 /*
12     This program is free software; you can redistribute it and/or modify
13     it under the terms of the GNU General Public License as published by
14     the Free Software Foundation; version 2 of the License.
15
16     This program is distributed in the hope that it will be useful,
17     but WITHOUT ANY WARRANTY; without even the implied warranty of
18     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19     GNU General Public License for more details.
20
21     NO WARRANTY
22     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26     solely responsible for determining the appropriateness of using and
27     distributing the Program and assumes all risks associated with its
28     exercise of rights under this Agreement, including but not limited to
29     the risks and costs of program errors, damage to or loss of data,
30     programs or equipment, and unavailability or interruption of operations.
31
32     DISCLAIMER OF LIABILITY
33     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40
41     You should have received a copy of the GNU General Public License
42     along with this program; if not, write to the Free Software
43     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
44 */
45
46 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
47 /*
48  * Define statements used for debugging
49  */
50 //#define MPT_LAN_IO_DEBUG
51
52 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
53
54 #include "mptlan.h"
55 #include <linux/init.h>
56 #include <linux/module.h>
57 #include <linux/fs.h>
58
59 #define MYNAM           "mptlan"
60
61 MODULE_LICENSE("GPL");
62
63 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
64 /*
65  * MPT LAN message sizes without variable part.
66  */
67 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
68         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
69
70 #define MPT_LAN_TRANSACTION32_SIZE \
71         (sizeof(SGETransaction32_t) - sizeof(u32))
72
73 /*
74  *  Fusion MPT LAN private structures
75  */
76
77 struct NAA_Hosed {
78         u16 NAA;
79         u8 ieee[FC_ALEN];
80         struct NAA_Hosed *next;
81 };
82
83 struct BufferControl {
84         struct sk_buff  *skb;
85         dma_addr_t      dma;
86         unsigned int    len;
87 };
88
89 struct mpt_lan_priv {
90         MPT_ADAPTER *mpt_dev;
91         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
92
93         atomic_t buckets_out;           /* number of unused buckets on IOC */
94         int bucketthresh;               /* Send more when this many left */
95
96         int *mpt_txfidx; /* Free Tx Context list */
97         int mpt_txfidx_tail;
98         spinlock_t txfidx_lock;
99
100         int *mpt_rxfidx; /* Free Rx Context list */
101         int mpt_rxfidx_tail;
102         spinlock_t rxfidx_lock;
103
104         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
105         struct BufferControl *SendCtl;  /* Send BufferControl structs */
106
107         int max_buckets_out;            /* Max buckets to send to IOC */
108         int tx_max_out;                 /* IOC's Tx queue len */
109
110         u32 total_posted;
111         u32 total_received;
112         struct net_device_stats stats;  /* Per device statistics */
113
114         struct work_struct post_buckets_task;
115         unsigned long post_buckets_active;
116 };
117
118 struct mpt_lan_ohdr {
119         u16     dtype;
120         u8      daddr[FC_ALEN];
121         u16     stype;
122         u8      saddr[FC_ALEN];
123 };
124
125 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
126
127 /*
128  *  Forward protos...
129  */
130 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
131                        MPT_FRAME_HDR *reply);
132 static int  mpt_lan_open(struct net_device *dev);
133 static int  mpt_lan_reset(struct net_device *dev);
134 static int  mpt_lan_close(struct net_device *dev);
135 static void mpt_lan_post_receive_buckets(void *dev_id);
136 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
137                                            int priority);
138 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
139 static int  mpt_lan_receive_post_reply(struct net_device *dev,
140                                        LANReceivePostReply_t *pRecvRep);
141 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
142 static int  mpt_lan_send_reply(struct net_device *dev,
143                                LANSendReply_t *pSendRep);
144 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
145 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
146 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
147                                          struct net_device *dev);
148
149 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
150 /*
151  *  Fusion MPT LAN private data
152  */
153 static int LanCtx = -1;
154
155 static u32 max_buckets_out = 127;
156 static u32 tx_max_out_p = 127 - 16;
157
158 #ifdef QLOGIC_NAA_WORKAROUND
159 static struct NAA_Hosed *mpt_bad_naa = NULL;
160 DEFINE_RWLOCK(bad_naa_lock);
161 #endif
162
163 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
164 /*
165  * Fusion MPT LAN external data
166  */
167 extern int mpt_lan_index;
168
169 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
170 /**
171  *      lan_reply - Handle all data sent from the hardware.
172  *      @ioc: Pointer to MPT_ADAPTER structure
173  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
174  *      @reply: Pointer to MPT reply frame
175  *
176  *      Returns 1 indicating original alloc'd request frame ptr
177  *      should be freed, or 0 if it shouldn't.
178  */
179 static int
180 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
181 {
182         struct net_device *dev = ioc->netdev;
183         int FreeReqFrame = 0;
184
185         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
186                   IOC_AND_NETDEV_NAMES_s_s(dev)));
187
188 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
189 //                      mf, reply));
190
191         if (mf == NULL) {
192                 u32 tmsg = CAST_PTR_TO_U32(reply);
193
194                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
195                                 IOC_AND_NETDEV_NAMES_s_s(dev),
196                                 tmsg));
197
198                 switch (GET_LAN_FORM(tmsg)) {
199
200                 // NOTE!  (Optimization) First case here is now caught in
201                 //  mptbase.c::mpt_interrupt() routine and callcack here
202                 //  is now skipped for this case!
203 #if 0
204                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
205 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
206 //                                "MessageContext turbo reply received\n"));
207                         FreeReqFrame = 1;
208                         break;
209 #endif
210
211                 case LAN_REPLY_FORM_SEND_SINGLE:
212 //                      dioprintk((MYNAM "/lan_reply: "
213 //                                "calling mpt_lan_send_reply (turbo)\n"));
214
215                         // Potential BUG here?
216                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
217                         //  If/when mpt_lan_send_turbo would return 1 here,
218                         //  calling routine (mptbase.c|mpt_interrupt)
219                         //  would Oops because mf has already been set
220                         //  to NULL.  So after return from this func,
221                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
222                         //  item back onto its adapter FreeQ - Oops!:-(
223                         //  It's Ok, since mpt_lan_send_turbo() *currently*
224                         //  always returns 0, but..., just in case:
225
226                         (void) mpt_lan_send_turbo(dev, tmsg);
227                         FreeReqFrame = 0;
228
229                         break;
230
231                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
232 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
233 //                                "rcv-Turbo = %08x\n", tmsg));
234                         mpt_lan_receive_post_turbo(dev, tmsg);
235                         break;
236
237                 default:
238                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
239                                 "that I don't know what to do with\n");
240
241                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
242
243                         break;
244                 }
245
246                 return FreeReqFrame;
247         }
248
249 //      msg = (u32 *) reply;
250 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
251 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
252 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
253 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
254 //                reply->u.hdr.Function));
255
256         switch (reply->u.hdr.Function) {
257
258         case MPI_FUNCTION_LAN_SEND:
259         {
260                 LANSendReply_t *pSendRep;
261
262                 pSendRep = (LANSendReply_t *) reply;
263                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
264                 break;
265         }
266
267         case MPI_FUNCTION_LAN_RECEIVE:
268         {
269                 LANReceivePostReply_t *pRecvRep;
270
271                 pRecvRep = (LANReceivePostReply_t *) reply;
272                 if (pRecvRep->NumberOfContexts) {
273                         mpt_lan_receive_post_reply(dev, pRecvRep);
274                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
275                                 FreeReqFrame = 1;
276                 } else
277                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
278                                   "ReceivePostReply received.\n"));
279                 break;
280         }
281
282         case MPI_FUNCTION_LAN_RESET:
283                 /* Just a default reply. Might want to check it to
284                  * make sure that everything went ok.
285                  */
286                 FreeReqFrame = 1;
287                 break;
288
289         case MPI_FUNCTION_EVENT_NOTIFICATION:
290         case MPI_FUNCTION_EVENT_ACK:
291                 /*  _EVENT_NOTIFICATION should NOT come down this path any more.
292                  *  Should be routed to mpt_lan_event_process(), but just in case...
293                  */
294                 FreeReqFrame = 1;
295                 break;
296
297         default:
298                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
299                         "reply that I don't know what to do with\n");
300
301                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
302                 FreeReqFrame = 1;
303
304                 break;
305         }
306
307         return FreeReqFrame;
308 }
309
310 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
311 static int
312 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
313 {
314         struct net_device *dev = ioc->netdev;
315         struct mpt_lan_priv *priv;
316
317         if (dev == NULL)
318                 return(1);
319         else
320                 priv = netdev_priv(dev);
321
322         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
323                         reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
324                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
325
326         if (priv->mpt_rxfidx == NULL)
327                 return (1);
328
329         if (reset_phase == MPT_IOC_SETUP_RESET) {
330                 ;
331         } else if (reset_phase == MPT_IOC_PRE_RESET) {
332                 int i;
333                 unsigned long flags;
334
335                 netif_stop_queue(dev);
336
337                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
338
339                 atomic_set(&priv->buckets_out, 0);
340
341                 /* Reset Rx Free Tail index and re-populate the queue. */
342                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
343                 priv->mpt_rxfidx_tail = -1;
344                 for (i = 0; i < priv->max_buckets_out; i++)
345                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
346                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
347         } else {
348                 mpt_lan_post_receive_buckets(dev);
349                 netif_wake_queue(dev);
350         }
351
352         return 1;
353 }
354
355 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
356 static int
357 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
358 {
359         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
360
361         switch (le32_to_cpu(pEvReply->Event)) {
362         case MPI_EVENT_NONE:                            /* 00 */
363         case MPI_EVENT_LOG_DATA:                        /* 01 */
364         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
365         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
366         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
367         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
368         case MPI_EVENT_RESCAN:                          /* 06 */
369                 /* Ok, do we need to do anything here? As far as
370                    I can tell, this is when a new device gets added
371                    to the loop. */
372         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
373         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
374         case MPI_EVENT_LOGOUT:                          /* 09 */
375         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
376         default:
377                 break;
378         }
379
380         /*
381          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
382          *  Do NOT do it here now!
383          */
384
385         return 1;
386 }
387
388 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
389 static int
390 mpt_lan_open(struct net_device *dev)
391 {
392         struct mpt_lan_priv *priv = netdev_priv(dev);
393         int i;
394
395         if (mpt_lan_reset(dev) != 0) {
396                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
397
398                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
399
400                 if (mpt_dev->active)
401                         printk ("The ioc is active. Perhaps it needs to be"
402                                 " reset?\n");
403                 else
404                         printk ("The ioc in inactive, most likely in the "
405                                 "process of being reset. Please try again in "
406                                 "a moment.\n");
407         }
408
409         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
410         if (priv->mpt_txfidx == NULL)
411                 goto out;
412         priv->mpt_txfidx_tail = -1;
413
414         priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
415                                 GFP_KERNEL);
416         if (priv->SendCtl == NULL)
417                 goto out_mpt_txfidx;
418         for (i = 0; i < priv->tx_max_out; i++)
419                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
420
421         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
422
423         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
424                                    GFP_KERNEL);
425         if (priv->mpt_rxfidx == NULL)
426                 goto out_SendCtl;
427         priv->mpt_rxfidx_tail = -1;
428
429         priv->RcvCtl = kcalloc(priv->max_buckets_out,
430                                sizeof(struct BufferControl),
431                                GFP_KERNEL);
432         if (priv->RcvCtl == NULL)
433                 goto out_mpt_rxfidx;
434         for (i = 0; i < priv->max_buckets_out; i++)
435                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
436
437 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
438 /**/    for (i = 0; i < priv->tx_max_out; i++)
439 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
440 /**/    dlprintk(("\n"));
441
442         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
443
444         mpt_lan_post_receive_buckets(dev);
445         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
446                         IOC_AND_NETDEV_NAMES_s_s(dev));
447
448         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
449                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
450                         " Notifications. This is a bad thing! We're not going "
451                         "to go ahead, but I'd be leery of system stability at "
452                         "this point.\n");
453         }
454
455         netif_start_queue(dev);
456         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
457
458         return 0;
459 out_mpt_rxfidx:
460         kfree(priv->mpt_rxfidx);
461         priv->mpt_rxfidx = NULL;
462 out_SendCtl:
463         kfree(priv->SendCtl);
464         priv->SendCtl = NULL;
465 out_mpt_txfidx:
466         kfree(priv->mpt_txfidx);
467         priv->mpt_txfidx = NULL;
468 out:    return -ENOMEM;
469 }
470
471 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
472 /* Send a LanReset message to the FW. This should result in the FW returning
473    any buckets it still has. */
474 static int
475 mpt_lan_reset(struct net_device *dev)
476 {
477         MPT_FRAME_HDR *mf;
478         LANResetRequest_t *pResetReq;
479         struct mpt_lan_priv *priv = netdev_priv(dev);
480
481         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
482
483         if (mf == NULL) {
484 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
485                 "Unable to allocate a request frame.\n"));
486 */
487                 return -1;
488         }
489
490         pResetReq = (LANResetRequest_t *) mf;
491
492         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
493         pResetReq->ChainOffset  = 0;
494         pResetReq->Reserved     = 0;
495         pResetReq->PortNumber   = priv->pnum;
496         pResetReq->MsgFlags     = 0;
497         pResetReq->Reserved2    = 0;
498
499         mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
500
501         return 0;
502 }
503
504 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
505 static int
506 mpt_lan_close(struct net_device *dev)
507 {
508         struct mpt_lan_priv *priv = netdev_priv(dev);
509         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
510         unsigned long timeout;
511         int i;
512
513         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
514
515         mpt_event_deregister(LanCtx);
516
517         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
518                   "since driver was loaded, %d still out\n",
519                   priv->total_posted,atomic_read(&priv->buckets_out)));
520
521         netif_stop_queue(dev);
522
523         mpt_lan_reset(dev);
524
525         timeout = jiffies + 2 * HZ;
526         while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
527                 schedule_timeout_interruptible(1);
528
529         for (i = 0; i < priv->max_buckets_out; i++) {
530                 if (priv->RcvCtl[i].skb != NULL) {
531 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
532 /**/                              "is still out\n", i));
533                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
534                                          priv->RcvCtl[i].len,
535                                          PCI_DMA_FROMDEVICE);
536                         dev_kfree_skb(priv->RcvCtl[i].skb);
537                 }
538         }
539
540         kfree(priv->RcvCtl);
541         kfree(priv->mpt_rxfidx);
542
543         for (i = 0; i < priv->tx_max_out; i++) {
544                 if (priv->SendCtl[i].skb != NULL) {
545                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
546                                          priv->SendCtl[i].len,
547                                          PCI_DMA_TODEVICE);
548                         dev_kfree_skb(priv->SendCtl[i].skb);
549                 }
550         }
551
552         kfree(priv->SendCtl);
553         kfree(priv->mpt_txfidx);
554
555         atomic_set(&priv->buckets_out, 0);
556
557         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
558                         IOC_AND_NETDEV_NAMES_s_s(dev));
559
560         return 0;
561 }
562
563 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
564 static struct net_device_stats *
565 mpt_lan_get_stats(struct net_device *dev)
566 {
567         struct mpt_lan_priv *priv = netdev_priv(dev);
568
569         return (struct net_device_stats *) &priv->stats;
570 }
571
572 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
573 static int
574 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
575 {
576         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
577                 return -EINVAL;
578         dev->mtu = new_mtu;
579         return 0;
580 }
581
582 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
583 /* Tx timeout handler. */
584 static void
585 mpt_lan_tx_timeout(struct net_device *dev)
586 {
587         struct mpt_lan_priv *priv = netdev_priv(dev);
588         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
589
590         if (mpt_dev->active) {
591                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
592                 netif_wake_queue(dev);
593         }
594 }
595
596 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
597 //static inline int
598 static int
599 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
600 {
601         struct mpt_lan_priv *priv = netdev_priv(dev);
602         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
603         struct sk_buff *sent;
604         unsigned long flags;
605         u32 ctx;
606
607         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
608         sent = priv->SendCtl[ctx].skb;
609
610         priv->stats.tx_packets++;
611         priv->stats.tx_bytes += sent->len;
612
613         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
614                         IOC_AND_NETDEV_NAMES_s_s(dev),
615                         __FUNCTION__, sent));
616
617         priv->SendCtl[ctx].skb = NULL;
618         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
619                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
620         dev_kfree_skb_irq(sent);
621
622         spin_lock_irqsave(&priv->txfidx_lock, flags);
623         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
624         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
625
626         netif_wake_queue(dev);
627         return 0;
628 }
629
630 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
631 static int
632 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
633 {
634         struct mpt_lan_priv *priv = netdev_priv(dev);
635         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
636         struct sk_buff *sent;
637         unsigned long flags;
638         int FreeReqFrame = 0;
639         u32 *pContext;
640         u32 ctx;
641         u8 count;
642
643         count = pSendRep->NumberOfContexts;
644
645         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
646                  le16_to_cpu(pSendRep->IOCStatus)));
647
648         /* Add check for Loginfo Flag in IOCStatus */
649
650         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
651         case MPI_IOCSTATUS_SUCCESS:
652                 priv->stats.tx_packets += count;
653                 break;
654
655         case MPI_IOCSTATUS_LAN_CANCELED:
656         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
657                 break;
658
659         case MPI_IOCSTATUS_INVALID_SGL:
660                 priv->stats.tx_errors += count;
661                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
662                                 IOC_AND_NETDEV_NAMES_s_s(dev));
663                 goto out;
664
665         default:
666                 priv->stats.tx_errors += count;
667                 break;
668         }
669
670         pContext = &pSendRep->BufferContext;
671
672         spin_lock_irqsave(&priv->txfidx_lock, flags);
673         while (count > 0) {
674                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
675
676                 sent = priv->SendCtl[ctx].skb;
677                 priv->stats.tx_bytes += sent->len;
678
679                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
680                                 IOC_AND_NETDEV_NAMES_s_s(dev),
681                                 __FUNCTION__, sent));
682
683                 priv->SendCtl[ctx].skb = NULL;
684                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
685                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
686                 dev_kfree_skb_irq(sent);
687
688                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
689
690                 pContext++;
691                 count--;
692         }
693         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
694
695 out:
696         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
697                 FreeReqFrame = 1;
698
699         netif_wake_queue(dev);
700         return FreeReqFrame;
701 }
702
703 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
704 static int
705 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
706 {
707         struct mpt_lan_priv *priv = netdev_priv(dev);
708         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
709         MPT_FRAME_HDR *mf;
710         LANSendRequest_t *pSendReq;
711         SGETransaction32_t *pTrans;
712         SGESimple64_t *pSimple;
713         dma_addr_t dma;
714         unsigned long flags;
715         int ctx;
716         u16 cur_naa = 0x1000;
717
718         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
719                         __FUNCTION__, skb));
720
721         spin_lock_irqsave(&priv->txfidx_lock, flags);
722         if (priv->mpt_txfidx_tail < 0) {
723                 netif_stop_queue(dev);
724                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
725
726                 printk (KERN_ERR "%s: no tx context available: %u\n",
727                         __FUNCTION__, priv->mpt_txfidx_tail);
728                 return 1;
729         }
730
731         mf = mpt_get_msg_frame(LanCtx, mpt_dev);
732         if (mf == NULL) {
733                 netif_stop_queue(dev);
734                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
735
736                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
737                         __FUNCTION__);
738                 return 1;
739         }
740
741         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
742         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
743
744 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
745 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
746
747         pSendReq = (LANSendRequest_t *) mf;
748
749         /* Set the mac.raw pointer, since this apparently isn't getting
750          * done before we get the skb. Pull the data pointer past the mac data.
751          */
752         skb->mac.raw = skb->data;
753         skb_pull(skb, 12);
754
755         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
756                              PCI_DMA_TODEVICE);
757
758         priv->SendCtl[ctx].skb = skb;
759         priv->SendCtl[ctx].dma = dma;
760         priv->SendCtl[ctx].len = skb->len;
761
762         /* Message Header */
763         pSendReq->Reserved    = 0;
764         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
765         pSendReq->ChainOffset = 0;
766         pSendReq->Reserved2   = 0;
767         pSendReq->MsgFlags    = 0;
768         pSendReq->PortNumber  = priv->pnum;
769
770         /* Transaction Context Element */
771         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
772
773         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
774         pTrans->ContextSize   = sizeof(u32);
775         pTrans->DetailsLength = 2 * sizeof(u32);
776         pTrans->Flags         = 0;
777         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
778
779 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
780 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
781 //                      ctx, skb, skb->data));
782
783 #ifdef QLOGIC_NAA_WORKAROUND
784 {
785         struct NAA_Hosed *nh;
786
787         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
788            RFC 2625. The longer I look at this, the more my opinion of Qlogic
789            drops. */
790         read_lock_irq(&bad_naa_lock);
791         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
792                 if ((nh->ieee[0] == skb->mac.raw[0]) &&
793                     (nh->ieee[1] == skb->mac.raw[1]) &&
794                     (nh->ieee[2] == skb->mac.raw[2]) &&
795                     (nh->ieee[3] == skb->mac.raw[3]) &&
796                     (nh->ieee[4] == skb->mac.raw[4]) &&
797                     (nh->ieee[5] == skb->mac.raw[5])) {
798                         cur_naa = nh->NAA;
799                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
800                                   "= %04x.\n", cur_naa));
801                         break;
802                 }
803         }
804         read_unlock_irq(&bad_naa_lock);
805 }
806 #endif
807
808         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
809                                                     (skb->mac.raw[0] <<  8) |
810                                                     (skb->mac.raw[1] <<  0));
811         pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
812                                                     (skb->mac.raw[3] << 16) |
813                                                     (skb->mac.raw[4] <<  8) |
814                                                     (skb->mac.raw[5] <<  0));
815
816         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
817
818         /* If we ever decide to send more than one Simple SGE per LANSend, then
819            we will need to make sure that LAST_ELEMENT only gets set on the
820            last one. Otherwise, bad voodoo and evil funkiness will commence. */
821         pSimple->FlagsLength = cpu_to_le32(
822                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
823                           MPI_SGE_FLAGS_END_OF_BUFFER |
824                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
825                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
826                           MPI_SGE_FLAGS_HOST_TO_IOC |
827                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
828                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
829                         skb->len);
830         pSimple->Address.Low = cpu_to_le32((u32) dma);
831         if (sizeof(dma_addr_t) > sizeof(u32))
832                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
833         else
834                 pSimple->Address.High = 0;
835
836         mpt_put_msg_frame (LanCtx, mpt_dev, mf);
837         dev->trans_start = jiffies;
838
839         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
840                         IOC_AND_NETDEV_NAMES_s_s(dev),
841                         le32_to_cpu(pSimple->FlagsLength)));
842
843         return 0;
844 }
845
846 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
847 static void
848 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
849 /*
850  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
851  */
852 {
853         struct mpt_lan_priv *priv = dev->priv;
854         
855         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
856                 if (priority) {
857                         schedule_work(&priv->post_buckets_task);
858                 } else {
859                         schedule_delayed_work(&priv->post_buckets_task, 1);
860                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
861                                    "timer.\n"));
862                 }
863                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
864                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
865         }
866 }
867
868 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
869 static int
870 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
871 {
872         struct mpt_lan_priv *priv = dev->priv;
873
874         skb->protocol = mpt_lan_type_trans(skb, dev);
875
876         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
877                  "delivered to upper level.\n",
878                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
879
880         priv->stats.rx_bytes += skb->len;
881         priv->stats.rx_packets++;
882
883         skb->dev = dev;
884         netif_rx(skb);
885
886         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
887                  atomic_read(&priv->buckets_out)));
888
889         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
890                 mpt_lan_wake_post_buckets_task(dev, 1);
891
892         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
893                   "remaining, %d received back since sod\n",
894                   atomic_read(&priv->buckets_out), priv->total_received));
895
896         return 0;
897 }
898
899 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
900 //static inline int
901 static int
902 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
903 {
904         struct mpt_lan_priv *priv = dev->priv;
905         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
906         struct sk_buff *skb, *old_skb;
907         unsigned long flags;
908         u32 ctx, len;
909
910         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
911         skb = priv->RcvCtl[ctx].skb;
912
913         len = GET_LAN_PACKET_LENGTH(tmsg);
914
915         if (len < MPT_LAN_RX_COPYBREAK) {
916                 old_skb = skb;
917
918                 skb = (struct sk_buff *)dev_alloc_skb(len);
919                 if (!skb) {
920                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
921                                         IOC_AND_NETDEV_NAMES_s_s(dev),
922                                         __FILE__, __LINE__);
923                         return -ENOMEM;
924                 }
925
926                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
927                                             priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
928
929                 memcpy(skb_put(skb, len), old_skb->data, len);
930
931                 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
932                                                priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
933                 goto out;
934         }
935
936         skb_put(skb, len);
937
938         priv->RcvCtl[ctx].skb = NULL;
939
940         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
941                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
942
943 out:
944         spin_lock_irqsave(&priv->rxfidx_lock, flags);
945         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
946         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
947
948         atomic_dec(&priv->buckets_out);
949         priv->total_received++;
950
951         return mpt_lan_receive_skb(dev, skb);
952 }
953
954 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
955 static int
956 mpt_lan_receive_post_free(struct net_device *dev,
957                           LANReceivePostReply_t *pRecvRep)
958 {
959         struct mpt_lan_priv *priv = dev->priv;
960         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
961         unsigned long flags;
962         struct sk_buff *skb;
963         u32 ctx;
964         int count;
965         int i;
966
967         count = pRecvRep->NumberOfContexts;
968
969 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
970                   "IOC returned %d buckets, freeing them...\n", count));
971
972         spin_lock_irqsave(&priv->rxfidx_lock, flags);
973         for (i = 0; i < count; i++) {
974                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
975
976                 skb = priv->RcvCtl[ctx].skb;
977
978 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
979 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
980 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
981 //                              priv, &(priv->buckets_out)));
982 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
983
984                 priv->RcvCtl[ctx].skb = NULL;
985                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
986                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
987                 dev_kfree_skb_any(skb);
988
989                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
990         }
991         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
992
993         atomic_sub(count, &priv->buckets_out);
994
995 //      for (i = 0; i < priv->max_buckets_out; i++)
996 //              if (priv->RcvCtl[i].skb != NULL)
997 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
998 //                                "is still out\n", i));
999
1000 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1001                   count));
1002 */
1003 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1004 /**/              "remaining, %d received back since sod.\n",
1005 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1006         return 0;
1007 }
1008
1009 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1010 static int
1011 mpt_lan_receive_post_reply(struct net_device *dev,
1012                            LANReceivePostReply_t *pRecvRep)
1013 {
1014         struct mpt_lan_priv *priv = dev->priv;
1015         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1016         struct sk_buff *skb, *old_skb;
1017         unsigned long flags;
1018         u32 len, ctx, offset;
1019         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1020         int count;
1021         int i, l;
1022
1023         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1024         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1025                  le16_to_cpu(pRecvRep->IOCStatus)));
1026
1027         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1028                                                 MPI_IOCSTATUS_LAN_CANCELED)
1029                 return mpt_lan_receive_post_free(dev, pRecvRep);
1030
1031         len = le32_to_cpu(pRecvRep->PacketLength);
1032         if (len == 0) {
1033                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1034                         "ReceivePostReply w/ PacketLength zero!\n",
1035                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1036                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1037                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1038                 return -1;
1039         }
1040
1041         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1042         count  = pRecvRep->NumberOfContexts;
1043         skb    = priv->RcvCtl[ctx].skb;
1044
1045         offset = le32_to_cpu(pRecvRep->PacketOffset);
1046 //      if (offset != 0) {
1047 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1048 //                      "w/ PacketOffset %u\n",
1049 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1050 //                              offset);
1051 //      }
1052
1053         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1054                         IOC_AND_NETDEV_NAMES_s_s(dev),
1055                         offset, len));
1056
1057         if (count > 1) {
1058                 int szrem = len;
1059
1060 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1061 //                      "for single packet, concatenating...\n",
1062 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1063
1064                 skb = (struct sk_buff *)dev_alloc_skb(len);
1065                 if (!skb) {
1066                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1067                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1068                                         __FILE__, __LINE__);
1069                         return -ENOMEM;
1070                 }
1071
1072                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1073                 for (i = 0; i < count; i++) {
1074
1075                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1076                         old_skb = priv->RcvCtl[ctx].skb;
1077
1078                         l = priv->RcvCtl[ctx].len;
1079                         if (szrem < l)
1080                                 l = szrem;
1081
1082 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1083 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1084 //                                      i, l));
1085
1086                         pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1087                                                     priv->RcvCtl[ctx].dma,
1088                                                     priv->RcvCtl[ctx].len,
1089                                                     PCI_DMA_FROMDEVICE);
1090                         memcpy(skb_put(skb, l), old_skb->data, l);
1091
1092                         pci_dma_sync_single_for_device(mpt_dev->pcidev,
1093                                                        priv->RcvCtl[ctx].dma,
1094                                                        priv->RcvCtl[ctx].len,
1095                                                        PCI_DMA_FROMDEVICE);
1096
1097                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1098                         szrem -= l;
1099                 }
1100                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1101
1102         } else if (len < MPT_LAN_RX_COPYBREAK) {
1103
1104                 old_skb = skb;
1105
1106                 skb = (struct sk_buff *)dev_alloc_skb(len);
1107                 if (!skb) {
1108                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1109                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1110                                         __FILE__, __LINE__);
1111                         return -ENOMEM;
1112                 }
1113
1114                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1115                                             priv->RcvCtl[ctx].dma,
1116                                             priv->RcvCtl[ctx].len,
1117                                             PCI_DMA_FROMDEVICE);
1118
1119                 memcpy(skb_put(skb, len), old_skb->data, len);
1120
1121                 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1122                                                priv->RcvCtl[ctx].dma,
1123                                                priv->RcvCtl[ctx].len,
1124                                                PCI_DMA_FROMDEVICE);
1125
1126                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1127                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1128                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1129
1130         } else {
1131                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1132
1133                 priv->RcvCtl[ctx].skb = NULL;
1134
1135                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1136                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1137                 priv->RcvCtl[ctx].dma = 0;
1138
1139                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1140                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1141
1142                 skb_put(skb,len);
1143         }
1144
1145         atomic_sub(count, &priv->buckets_out);
1146         priv->total_received += count;
1147
1148         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1149                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1150                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1151                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1152                                 priv->mpt_rxfidx_tail,
1153                                 MPT_LAN_MAX_BUCKETS_OUT);
1154
1155                 return -1;
1156         }
1157
1158         if (remaining == 0)
1159                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1160                         "(priv->buckets_out = %d)\n",
1161                         IOC_AND_NETDEV_NAMES_s_s(dev),
1162                         atomic_read(&priv->buckets_out));
1163         else if (remaining < 10)
1164                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1165                         "(priv->buckets_out = %d)\n",
1166                         IOC_AND_NETDEV_NAMES_s_s(dev),
1167                         remaining, atomic_read(&priv->buckets_out));
1168         
1169         if ((remaining < priv->bucketthresh) &&
1170             ((atomic_read(&priv->buckets_out) - remaining) >
1171              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1172                 
1173                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1174                         "buckets_out count and fw's BucketsRemaining "
1175                         "count has crossed the threshold, issuing a "
1176                         "LanReset to clear the fw's hashtable. You may "
1177                         "want to check your /var/log/messages for \"CRC "
1178                         "error\" event notifications.\n");
1179                 
1180                 mpt_lan_reset(dev);
1181                 mpt_lan_wake_post_buckets_task(dev, 0);
1182         }
1183         
1184         return mpt_lan_receive_skb(dev, skb);
1185 }
1186
1187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1188 /* Simple SGE's only at the moment */
1189
1190 static void
1191 mpt_lan_post_receive_buckets(void *dev_id)
1192 {
1193         struct net_device *dev = dev_id;
1194         struct mpt_lan_priv *priv = dev->priv;
1195         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1196         MPT_FRAME_HDR *mf;
1197         LANReceivePostRequest_t *pRecvReq;
1198         SGETransaction32_t *pTrans;
1199         SGESimple64_t *pSimple;
1200         struct sk_buff *skb;
1201         dma_addr_t dma;
1202         u32 curr, buckets, count, max;
1203         u32 len = (dev->mtu + dev->hard_header_len + 4);
1204         unsigned long flags;
1205         int i;
1206
1207         curr = atomic_read(&priv->buckets_out);
1208         buckets = (priv->max_buckets_out - curr);
1209
1210         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1211                         IOC_AND_NETDEV_NAMES_s_s(dev),
1212                         __FUNCTION__, buckets, curr));
1213
1214         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1215                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1216
1217         while (buckets) {
1218                 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1219                 if (mf == NULL) {
1220                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1221                                 __FUNCTION__);
1222                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1223                                  __FUNCTION__, buckets));
1224                         goto out;
1225                 }
1226                 pRecvReq = (LANReceivePostRequest_t *) mf;
1227
1228                 count = buckets;
1229                 if (count > max)
1230                         count = max;
1231
1232                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1233                 pRecvReq->ChainOffset = 0;
1234                 pRecvReq->MsgFlags    = 0;
1235                 pRecvReq->PortNumber  = priv->pnum;
1236
1237                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1238                 pSimple = NULL;
1239
1240                 for (i = 0; i < count; i++) {
1241                         int ctx;
1242
1243                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1244                         if (priv->mpt_rxfidx_tail < 0) {
1245                                 printk (KERN_ERR "%s: Can't alloc context\n",
1246                                         __FUNCTION__);
1247                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1248                                                        flags);
1249                                 break;
1250                         }
1251
1252                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1253
1254                         skb = priv->RcvCtl[ctx].skb;
1255                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1256                                 pci_unmap_single(mpt_dev->pcidev,
1257                                                  priv->RcvCtl[ctx].dma,
1258                                                  priv->RcvCtl[ctx].len,
1259                                                  PCI_DMA_FROMDEVICE);
1260                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1261                                 skb = priv->RcvCtl[ctx].skb = NULL;
1262                         }
1263
1264                         if (skb == NULL) {
1265                                 skb = dev_alloc_skb(len);
1266                                 if (skb == NULL) {
1267                                         printk (KERN_WARNING
1268                                                 MYNAM "/%s: Can't alloc skb\n",
1269                                                 __FUNCTION__);
1270                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1271                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1272                                         break;
1273                                 }
1274
1275                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1276                                                      len, PCI_DMA_FROMDEVICE);
1277
1278                                 priv->RcvCtl[ctx].skb = skb;
1279                                 priv->RcvCtl[ctx].dma = dma;
1280                                 priv->RcvCtl[ctx].len = len;
1281                         }
1282
1283                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1284
1285                         pTrans->ContextSize   = sizeof(u32);
1286                         pTrans->DetailsLength = 0;
1287                         pTrans->Flags         = 0;
1288                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1289
1290                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1291
1292                         pSimple->FlagsLength = cpu_to_le32(
1293                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1294                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1295                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1296                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1297                         if (sizeof(dma_addr_t) > sizeof(u32))
1298                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1299                         else
1300                                 pSimple->Address.High = 0;
1301
1302                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1303                 }
1304
1305                 if (pSimple == NULL) {
1306 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1307 /**/                            __FUNCTION__);
1308                         mpt_free_msg_frame(mpt_dev, mf);
1309                         goto out;
1310                 }
1311
1312                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1313
1314                 pRecvReq->BucketCount = cpu_to_le32(i);
1315
1316 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1317  *      for (i = 0; i < j + 2; i ++)
1318  *          printk (" %08x", le32_to_cpu(msg[i]));
1319  *      printk ("\n");
1320  */
1321
1322                 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1323
1324                 priv->total_posted += i;
1325                 buckets -= i;
1326                 atomic_add(i, &priv->buckets_out);
1327         }
1328
1329 out:
1330         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1331                   __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1332         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1333         __FUNCTION__, priv->total_posted, priv->total_received));
1334
1335         clear_bit(0, &priv->post_buckets_active);
1336 }
1337
1338 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1339 static struct net_device *
1340 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1341 {
1342         struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1343         struct mpt_lan_priv *priv = NULL;
1344         u8 HWaddr[FC_ALEN], *a;
1345
1346         if (!dev)
1347                 return NULL;
1348
1349         dev->mtu = MPT_LAN_MTU;
1350
1351         priv = netdev_priv(dev);
1352
1353         priv->mpt_dev = mpt_dev;
1354         priv->pnum = pnum;
1355
1356         memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
1357         INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1358         priv->post_buckets_active = 0;
1359
1360         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1361                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1362
1363         atomic_set(&priv->buckets_out, 0);
1364         priv->total_posted = 0;
1365         priv->total_received = 0;
1366         priv->max_buckets_out = max_buckets_out;
1367         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1368                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1369
1370         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1371                         __LINE__,
1372                         mpt_dev->pfacts[0].MaxLanBuckets,
1373                         max_buckets_out,
1374                         priv->max_buckets_out));
1375
1376         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1377         spin_lock_init(&priv->txfidx_lock);
1378         spin_lock_init(&priv->rxfidx_lock);
1379
1380         memset(&priv->stats, 0, sizeof(priv->stats));
1381
1382         /*  Grab pre-fetched LANPage1 stuff. :-) */
1383         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1384
1385         HWaddr[0] = a[5];
1386         HWaddr[1] = a[4];
1387         HWaddr[2] = a[3];
1388         HWaddr[3] = a[2];
1389         HWaddr[4] = a[1];
1390         HWaddr[5] = a[0];
1391
1392         dev->addr_len = FC_ALEN;
1393         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1394         memset(dev->broadcast, 0xff, FC_ALEN);
1395
1396         /* The Tx queue is 127 deep on the 909.
1397          * Give ourselves some breathing room.
1398          */
1399         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1400                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1401
1402         dev->open = mpt_lan_open;
1403         dev->stop = mpt_lan_close;
1404         dev->get_stats = mpt_lan_get_stats;
1405         dev->set_multicast_list = NULL;
1406         dev->change_mtu = mpt_lan_change_mtu;
1407         dev->hard_start_xmit = mpt_lan_sdu_send;
1408
1409 /* Not in 2.3.42. Need 2.3.45+ */
1410         dev->tx_timeout = mpt_lan_tx_timeout;
1411         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1412
1413         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1414                 "and setting initial values\n"));
1415
1416         SET_MODULE_OWNER(dev);
1417
1418         if (register_netdev(dev) != 0) {
1419                 free_netdev(dev);
1420                 dev = NULL;
1421         }
1422         return dev;
1423 }
1424
1425 static int
1426 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1427 {
1428         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1429         struct net_device       *dev;
1430         int                     i;
1431
1432         for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1433                 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1434                        "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1435                        ioc->name, ioc->pfacts[i].PortNumber,
1436                        ioc->pfacts[i].ProtocolFlags,
1437                        MPT_PROTOCOL_FLAGS_c_c_c_c(
1438                                ioc->pfacts[i].ProtocolFlags));
1439
1440                 if (!(ioc->pfacts[i].ProtocolFlags &
1441                                         MPI_PORTFACTS_PROTOCOL_LAN)) {
1442                         printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1443                                "seems to be disabled on this adapter port!\n",
1444                                ioc->name);
1445                         continue;
1446                 }
1447
1448                 dev = mpt_register_lan_device(ioc, i);
1449                 if (!dev) {
1450                         printk(KERN_ERR MYNAM ": %s: Unable to register "
1451                                "port%d as a LAN device\n", ioc->name,
1452                                ioc->pfacts[i].PortNumber);
1453                         continue;
1454                 }
1455                 
1456                 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1457                        "registered as '%s'\n", ioc->name, dev->name);
1458                 printk(KERN_INFO MYNAM ": %s/%s: "
1459                        "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1460                        IOC_AND_NETDEV_NAMES_s_s(dev),
1461                        dev->dev_addr[0], dev->dev_addr[1],
1462                        dev->dev_addr[2], dev->dev_addr[3],
1463                        dev->dev_addr[4], dev->dev_addr[5]);
1464         
1465                 ioc->netdev = dev;
1466
1467                 return 0;
1468         }
1469
1470         return -ENODEV;
1471 }
1472
1473 static void
1474 mptlan_remove(struct pci_dev *pdev)
1475 {
1476         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1477         struct net_device       *dev = ioc->netdev;
1478
1479         if(dev != NULL) {
1480                 unregister_netdev(dev);
1481                 free_netdev(dev);
1482         }
1483 }
1484
1485 static struct mpt_pci_driver mptlan_driver = {
1486         .probe          = mptlan_probe,
1487         .remove         = mptlan_remove,
1488 };
1489
1490 static int __init mpt_lan_init (void)
1491 {
1492         show_mptmod_ver(LANAME, LANVER);
1493
1494         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1495                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1496                 return -EBUSY;
1497         }
1498
1499         /* Set the callback index to be used by driver core for turbo replies */
1500         mpt_lan_index = LanCtx;
1501
1502         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1503
1504         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1505                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1506                        "handler with mptbase! The world is at an end! "
1507                        "Everything is fading to black! Goodbye.\n");
1508                 return -EBUSY;
1509         }
1510
1511         dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1512         
1513         if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
1514                 dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
1515         return 0;
1516 }
1517
1518 static void __exit mpt_lan_exit(void)
1519 {
1520         mpt_device_driver_deregister(MPTLAN_DRIVER);
1521         mpt_reset_deregister(LanCtx);
1522
1523         if (LanCtx >= 0) {
1524                 mpt_deregister(LanCtx);
1525                 LanCtx = -1;
1526                 mpt_lan_index = 0;
1527         }
1528 }
1529
1530 module_init(mpt_lan_init);
1531 module_exit(mpt_lan_exit);
1532
1533 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1534 static unsigned short
1535 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1536 {
1537         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1538         struct fcllc *fcllc;
1539
1540         skb->mac.raw = skb->data;
1541         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1542
1543         if (fch->dtype == htons(0xffff)) {
1544                 u32 *p = (u32 *) fch;
1545
1546                 swab32s(p + 0);
1547                 swab32s(p + 1);
1548                 swab32s(p + 2);
1549                 swab32s(p + 3);
1550
1551                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1552                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1553                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1554                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1555                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1556         }
1557
1558         if (*fch->daddr & 1) {
1559                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1560                         skb->pkt_type = PACKET_BROADCAST;
1561                 } else {
1562                         skb->pkt_type = PACKET_MULTICAST;
1563                 }
1564         } else {
1565                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1566                         skb->pkt_type = PACKET_OTHERHOST;
1567                 } else {
1568                         skb->pkt_type = PACKET_HOST;
1569                 }
1570         }
1571
1572         fcllc = (struct fcllc *)skb->data;
1573
1574 #ifdef QLOGIC_NAA_WORKAROUND
1575 {
1576         u16 source_naa = fch->stype, found = 0;
1577
1578         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1579            value. */
1580
1581         if ((source_naa & 0xF000) == 0)
1582                 source_naa = swab16(source_naa);
1583
1584         if (fcllc->ethertype == htons(ETH_P_ARP))
1585             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1586                       "%04x.\n", source_naa));
1587
1588         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1589            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1590                 struct NAA_Hosed *nh, *prevnh;
1591                 int i;
1592
1593                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1594                           "system with non-RFC 2625 NAA value (%04x).\n",
1595                           source_naa));
1596
1597                 write_lock_irq(&bad_naa_lock);
1598                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1599                      prevnh=nh, nh=nh->next) {
1600                         if ((nh->ieee[0] == fch->saddr[0]) &&
1601                             (nh->ieee[1] == fch->saddr[1]) &&
1602                             (nh->ieee[2] == fch->saddr[2]) &&
1603                             (nh->ieee[3] == fch->saddr[3]) &&
1604                             (nh->ieee[4] == fch->saddr[4]) &&
1605                             (nh->ieee[5] == fch->saddr[5])) {
1606                                 found = 1;
1607                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1608                                          "q/Rep w/ bad NAA from system already"
1609                                          " in DB.\n"));
1610                                 break;
1611                         }
1612                 }
1613
1614                 if ((!found) && (nh == NULL)) {
1615
1616                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1617                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1618                                  " bad NAA from system not yet in DB.\n"));
1619
1620                         if (nh != NULL) {
1621                                 nh->next = NULL;
1622                                 if (!mpt_bad_naa)
1623                                         mpt_bad_naa = nh;
1624                                 if (prevnh)
1625                                         prevnh->next = nh;
1626
1627                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1628                                 for (i = 0; i < FC_ALEN; i++)
1629                                         nh->ieee[i] = fch->saddr[i];
1630                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1631                                           "%02x:%02x with non-compliant S_NAA value.\n",
1632                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1633                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1634                         } else {
1635                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1636                                         " kmalloc a NAA_Hosed struct.\n");
1637                         }
1638                 } else if (!found) {
1639                         printk (KERN_ERR "mptlan/type_trans: found not"
1640                                 " set, but nh isn't null. Evil "
1641                                 "funkiness abounds.\n");
1642                 }
1643                 write_unlock_irq(&bad_naa_lock);
1644         }
1645 }
1646 #endif
1647
1648         /* Strip the SNAP header from ARP packets since we don't
1649          * pass them through to the 802.2/SNAP layers.
1650          */
1651         if (fcllc->dsap == EXTENDED_SAP &&
1652                 (fcllc->ethertype == htons(ETH_P_IP) ||
1653                  fcllc->ethertype == htons(ETH_P_ARP))) {
1654                 skb_pull(skb, sizeof(struct fcllc));
1655                 return fcllc->ethertype;
1656         }
1657
1658         return htons(ETH_P_802_2);
1659 }
1660
1661 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/