Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[linux-2.6] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59
60 #define my_VERSION      MPT_LINUX_VERSION_COMMON
61 #define MYNAM           "mptlan"
62
63 MODULE_LICENSE("GPL");
64 MODULE_VERSION(my_VERSION);
65
66 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
67 /*
68  * MPT LAN message sizes without variable part.
69  */
70 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
71         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
72
73 #define MPT_LAN_TRANSACTION32_SIZE \
74         (sizeof(SGETransaction32_t) - sizeof(u32))
75
76 /*
77  *  Fusion MPT LAN private structures
78  */
79
80 struct NAA_Hosed {
81         u16 NAA;
82         u8 ieee[FC_ALEN];
83         struct NAA_Hosed *next;
84 };
85
86 struct BufferControl {
87         struct sk_buff  *skb;
88         dma_addr_t      dma;
89         unsigned int    len;
90 };
91
92 struct mpt_lan_priv {
93         MPT_ADAPTER *mpt_dev;
94         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
95
96         atomic_t buckets_out;           /* number of unused buckets on IOC */
97         int bucketthresh;               /* Send more when this many left */
98
99         int *mpt_txfidx; /* Free Tx Context list */
100         int mpt_txfidx_tail;
101         spinlock_t txfidx_lock;
102
103         int *mpt_rxfidx; /* Free Rx Context list */
104         int mpt_rxfidx_tail;
105         spinlock_t rxfidx_lock;
106
107         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
108         struct BufferControl *SendCtl;  /* Send BufferControl structs */
109
110         int max_buckets_out;            /* Max buckets to send to IOC */
111         int tx_max_out;                 /* IOC's Tx queue len */
112
113         u32 total_posted;
114         u32 total_received;
115         struct net_device_stats stats;  /* Per device statistics */
116
117         struct delayed_work post_buckets_task;
118         struct net_device *dev;
119         unsigned long post_buckets_active;
120 };
121
122 struct mpt_lan_ohdr {
123         u16     dtype;
124         u8      daddr[FC_ALEN];
125         u16     stype;
126         u8      saddr[FC_ALEN];
127 };
128
129 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
130
131 /*
132  *  Forward protos...
133  */
134 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
135                        MPT_FRAME_HDR *reply);
136 static int  mpt_lan_open(struct net_device *dev);
137 static int  mpt_lan_reset(struct net_device *dev);
138 static int  mpt_lan_close(struct net_device *dev);
139 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
140 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
141                                            int priority);
142 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
143 static int  mpt_lan_receive_post_reply(struct net_device *dev,
144                                        LANReceivePostReply_t *pRecvRep);
145 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
146 static int  mpt_lan_send_reply(struct net_device *dev,
147                                LANSendReply_t *pSendRep);
148 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
149 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
150 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
151                                          struct net_device *dev);
152
153 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
154 /*
155  *  Fusion MPT LAN private data
156  */
157 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
158
159 static u32 max_buckets_out = 127;
160 static u32 tx_max_out_p = 127 - 16;
161
162 #ifdef QLOGIC_NAA_WORKAROUND
163 static struct NAA_Hosed *mpt_bad_naa = NULL;
164 DEFINE_RWLOCK(bad_naa_lock);
165 #endif
166
167 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
168 /**
169  *      lan_reply - Handle all data sent from the hardware.
170  *      @ioc: Pointer to MPT_ADAPTER structure
171  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
172  *      @reply: Pointer to MPT reply frame
173  *
174  *      Returns 1 indicating original alloc'd request frame ptr
175  *      should be freed, or 0 if it shouldn't.
176  */
177 static int
178 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
179 {
180         struct net_device *dev = ioc->netdev;
181         int FreeReqFrame = 0;
182
183         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
184                   IOC_AND_NETDEV_NAMES_s_s(dev)));
185
186 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
187 //                      mf, reply));
188
189         if (mf == NULL) {
190                 u32 tmsg = CAST_PTR_TO_U32(reply);
191
192                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
193                                 IOC_AND_NETDEV_NAMES_s_s(dev),
194                                 tmsg));
195
196                 switch (GET_LAN_FORM(tmsg)) {
197
198                 // NOTE!  (Optimization) First case here is now caught in
199                 //  mptbase.c::mpt_interrupt() routine and callcack here
200                 //  is now skipped for this case!
201 #if 0
202                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
203 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
204 //                                "MessageContext turbo reply received\n"));
205                         FreeReqFrame = 1;
206                         break;
207 #endif
208
209                 case LAN_REPLY_FORM_SEND_SINGLE:
210 //                      dioprintk((MYNAM "/lan_reply: "
211 //                                "calling mpt_lan_send_reply (turbo)\n"));
212
213                         // Potential BUG here?
214                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
215                         //  If/when mpt_lan_send_turbo would return 1 here,
216                         //  calling routine (mptbase.c|mpt_interrupt)
217                         //  would Oops because mf has already been set
218                         //  to NULL.  So after return from this func,
219                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
220                         //  item back onto its adapter FreeQ - Oops!:-(
221                         //  It's Ok, since mpt_lan_send_turbo() *currently*
222                         //  always returns 0, but..., just in case:
223
224                         (void) mpt_lan_send_turbo(dev, tmsg);
225                         FreeReqFrame = 0;
226
227                         break;
228
229                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
230 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
231 //                                "rcv-Turbo = %08x\n", tmsg));
232                         mpt_lan_receive_post_turbo(dev, tmsg);
233                         break;
234
235                 default:
236                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
237                                 "that I don't know what to do with\n");
238
239                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
240
241                         break;
242                 }
243
244                 return FreeReqFrame;
245         }
246
247 //      msg = (u32 *) reply;
248 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
249 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
250 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
251 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
252 //                reply->u.hdr.Function));
253
254         switch (reply->u.hdr.Function) {
255
256         case MPI_FUNCTION_LAN_SEND:
257         {
258                 LANSendReply_t *pSendRep;
259
260                 pSendRep = (LANSendReply_t *) reply;
261                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
262                 break;
263         }
264
265         case MPI_FUNCTION_LAN_RECEIVE:
266         {
267                 LANReceivePostReply_t *pRecvRep;
268
269                 pRecvRep = (LANReceivePostReply_t *) reply;
270                 if (pRecvRep->NumberOfContexts) {
271                         mpt_lan_receive_post_reply(dev, pRecvRep);
272                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
273                                 FreeReqFrame = 1;
274                 } else
275                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
276                                   "ReceivePostReply received.\n"));
277                 break;
278         }
279
280         case MPI_FUNCTION_LAN_RESET:
281                 /* Just a default reply. Might want to check it to
282                  * make sure that everything went ok.
283                  */
284                 FreeReqFrame = 1;
285                 break;
286
287         case MPI_FUNCTION_EVENT_NOTIFICATION:
288         case MPI_FUNCTION_EVENT_ACK:
289                 /*  _EVENT_NOTIFICATION should NOT come down this path any more.
290                  *  Should be routed to mpt_lan_event_process(), but just in case...
291                  */
292                 FreeReqFrame = 1;
293                 break;
294
295         default:
296                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
297                         "reply that I don't know what to do with\n");
298
299                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
300                 FreeReqFrame = 1;
301
302                 break;
303         }
304
305         return FreeReqFrame;
306 }
307
308 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
309 static int
310 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
311 {
312         struct net_device *dev = ioc->netdev;
313         struct mpt_lan_priv *priv;
314
315         if (dev == NULL)
316                 return(1);
317         else
318                 priv = netdev_priv(dev);
319
320         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
321                         reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
322                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
323
324         if (priv->mpt_rxfidx == NULL)
325                 return (1);
326
327         if (reset_phase == MPT_IOC_SETUP_RESET) {
328                 ;
329         } else if (reset_phase == MPT_IOC_PRE_RESET) {
330                 int i;
331                 unsigned long flags;
332
333                 netif_stop_queue(dev);
334
335                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
336
337                 atomic_set(&priv->buckets_out, 0);
338
339                 /* Reset Rx Free Tail index and re-populate the queue. */
340                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
341                 priv->mpt_rxfidx_tail = -1;
342                 for (i = 0; i < priv->max_buckets_out; i++)
343                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
344                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
345         } else {
346                 mpt_lan_post_receive_buckets(priv);
347                 netif_wake_queue(dev);
348         }
349
350         return 1;
351 }
352
353 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
354 static int
355 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
356 {
357         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
358
359         switch (le32_to_cpu(pEvReply->Event)) {
360         case MPI_EVENT_NONE:                            /* 00 */
361         case MPI_EVENT_LOG_DATA:                        /* 01 */
362         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
363         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
364         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
365         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
366         case MPI_EVENT_RESCAN:                          /* 06 */
367                 /* Ok, do we need to do anything here? As far as
368                    I can tell, this is when a new device gets added
369                    to the loop. */
370         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
371         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
372         case MPI_EVENT_LOGOUT:                          /* 09 */
373         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
374         default:
375                 break;
376         }
377
378         /*
379          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
380          *  Do NOT do it here now!
381          */
382
383         return 1;
384 }
385
386 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
387 static int
388 mpt_lan_open(struct net_device *dev)
389 {
390         struct mpt_lan_priv *priv = netdev_priv(dev);
391         int i;
392
393         if (mpt_lan_reset(dev) != 0) {
394                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
395
396                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
397
398                 if (mpt_dev->active)
399                         printk ("The ioc is active. Perhaps it needs to be"
400                                 " reset?\n");
401                 else
402                         printk ("The ioc in inactive, most likely in the "
403                                 "process of being reset. Please try again in "
404                                 "a moment.\n");
405         }
406
407         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
408         if (priv->mpt_txfidx == NULL)
409                 goto out;
410         priv->mpt_txfidx_tail = -1;
411
412         priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
413                                 GFP_KERNEL);
414         if (priv->SendCtl == NULL)
415                 goto out_mpt_txfidx;
416         for (i = 0; i < priv->tx_max_out; i++)
417                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
418
419         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
420
421         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
422                                    GFP_KERNEL);
423         if (priv->mpt_rxfidx == NULL)
424                 goto out_SendCtl;
425         priv->mpt_rxfidx_tail = -1;
426
427         priv->RcvCtl = kcalloc(priv->max_buckets_out,
428                                sizeof(struct BufferControl),
429                                GFP_KERNEL);
430         if (priv->RcvCtl == NULL)
431                 goto out_mpt_rxfidx;
432         for (i = 0; i < priv->max_buckets_out; i++)
433                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
434
435 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
436 /**/    for (i = 0; i < priv->tx_max_out; i++)
437 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
438 /**/    dlprintk(("\n"));
439
440         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
441
442         mpt_lan_post_receive_buckets(priv);
443         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
444                         IOC_AND_NETDEV_NAMES_s_s(dev));
445
446         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
447                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
448                         " Notifications. This is a bad thing! We're not going "
449                         "to go ahead, but I'd be leery of system stability at "
450                         "this point.\n");
451         }
452
453         netif_start_queue(dev);
454         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
455
456         return 0;
457 out_mpt_rxfidx:
458         kfree(priv->mpt_rxfidx);
459         priv->mpt_rxfidx = NULL;
460 out_SendCtl:
461         kfree(priv->SendCtl);
462         priv->SendCtl = NULL;
463 out_mpt_txfidx:
464         kfree(priv->mpt_txfidx);
465         priv->mpt_txfidx = NULL;
466 out:    return -ENOMEM;
467 }
468
469 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
470 /* Send a LanReset message to the FW. This should result in the FW returning
471    any buckets it still has. */
472 static int
473 mpt_lan_reset(struct net_device *dev)
474 {
475         MPT_FRAME_HDR *mf;
476         LANResetRequest_t *pResetReq;
477         struct mpt_lan_priv *priv = netdev_priv(dev);
478
479         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
480
481         if (mf == NULL) {
482 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
483                 "Unable to allocate a request frame.\n"));
484 */
485                 return -1;
486         }
487
488         pResetReq = (LANResetRequest_t *) mf;
489
490         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
491         pResetReq->ChainOffset  = 0;
492         pResetReq->Reserved     = 0;
493         pResetReq->PortNumber   = priv->pnum;
494         pResetReq->MsgFlags     = 0;
495         pResetReq->Reserved2    = 0;
496
497         mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
498
499         return 0;
500 }
501
502 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
503 static int
504 mpt_lan_close(struct net_device *dev)
505 {
506         struct mpt_lan_priv *priv = netdev_priv(dev);
507         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
508         unsigned long timeout;
509         int i;
510
511         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
512
513         mpt_event_deregister(LanCtx);
514
515         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
516                   "since driver was loaded, %d still out\n",
517                   priv->total_posted,atomic_read(&priv->buckets_out)));
518
519         netif_stop_queue(dev);
520
521         mpt_lan_reset(dev);
522
523         timeout = jiffies + 2 * HZ;
524         while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
525                 schedule_timeout_interruptible(1);
526
527         for (i = 0; i < priv->max_buckets_out; i++) {
528                 if (priv->RcvCtl[i].skb != NULL) {
529 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
530 /**/                              "is still out\n", i));
531                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
532                                          priv->RcvCtl[i].len,
533                                          PCI_DMA_FROMDEVICE);
534                         dev_kfree_skb(priv->RcvCtl[i].skb);
535                 }
536         }
537
538         kfree(priv->RcvCtl);
539         kfree(priv->mpt_rxfidx);
540
541         for (i = 0; i < priv->tx_max_out; i++) {
542                 if (priv->SendCtl[i].skb != NULL) {
543                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
544                                          priv->SendCtl[i].len,
545                                          PCI_DMA_TODEVICE);
546                         dev_kfree_skb(priv->SendCtl[i].skb);
547                 }
548         }
549
550         kfree(priv->SendCtl);
551         kfree(priv->mpt_txfidx);
552
553         atomic_set(&priv->buckets_out, 0);
554
555         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
556                         IOC_AND_NETDEV_NAMES_s_s(dev));
557
558         return 0;
559 }
560
561 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
562 static struct net_device_stats *
563 mpt_lan_get_stats(struct net_device *dev)
564 {
565         struct mpt_lan_priv *priv = netdev_priv(dev);
566
567         return (struct net_device_stats *) &priv->stats;
568 }
569
570 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
571 static int
572 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
573 {
574         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
575                 return -EINVAL;
576         dev->mtu = new_mtu;
577         return 0;
578 }
579
580 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
581 /* Tx timeout handler. */
582 static void
583 mpt_lan_tx_timeout(struct net_device *dev)
584 {
585         struct mpt_lan_priv *priv = netdev_priv(dev);
586         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
587
588         if (mpt_dev->active) {
589                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
590                 netif_wake_queue(dev);
591         }
592 }
593
594 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
595 //static inline int
596 static int
597 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
598 {
599         struct mpt_lan_priv *priv = netdev_priv(dev);
600         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
601         struct sk_buff *sent;
602         unsigned long flags;
603         u32 ctx;
604
605         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
606         sent = priv->SendCtl[ctx].skb;
607
608         priv->stats.tx_packets++;
609         priv->stats.tx_bytes += sent->len;
610
611         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
612                         IOC_AND_NETDEV_NAMES_s_s(dev),
613                         __func__, sent));
614
615         priv->SendCtl[ctx].skb = NULL;
616         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
617                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
618         dev_kfree_skb_irq(sent);
619
620         spin_lock_irqsave(&priv->txfidx_lock, flags);
621         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
622         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
623
624         netif_wake_queue(dev);
625         return 0;
626 }
627
628 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
629 static int
630 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
631 {
632         struct mpt_lan_priv *priv = netdev_priv(dev);
633         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
634         struct sk_buff *sent;
635         unsigned long flags;
636         int FreeReqFrame = 0;
637         u32 *pContext;
638         u32 ctx;
639         u8 count;
640
641         count = pSendRep->NumberOfContexts;
642
643         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
644                  le16_to_cpu(pSendRep->IOCStatus)));
645
646         /* Add check for Loginfo Flag in IOCStatus */
647
648         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
649         case MPI_IOCSTATUS_SUCCESS:
650                 priv->stats.tx_packets += count;
651                 break;
652
653         case MPI_IOCSTATUS_LAN_CANCELED:
654         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
655                 break;
656
657         case MPI_IOCSTATUS_INVALID_SGL:
658                 priv->stats.tx_errors += count;
659                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
660                                 IOC_AND_NETDEV_NAMES_s_s(dev));
661                 goto out;
662
663         default:
664                 priv->stats.tx_errors += count;
665                 break;
666         }
667
668         pContext = &pSendRep->BufferContext;
669
670         spin_lock_irqsave(&priv->txfidx_lock, flags);
671         while (count > 0) {
672                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
673
674                 sent = priv->SendCtl[ctx].skb;
675                 priv->stats.tx_bytes += sent->len;
676
677                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
678                                 IOC_AND_NETDEV_NAMES_s_s(dev),
679                                 __func__, sent));
680
681                 priv->SendCtl[ctx].skb = NULL;
682                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
683                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
684                 dev_kfree_skb_irq(sent);
685
686                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
687
688                 pContext++;
689                 count--;
690         }
691         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
692
693 out:
694         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
695                 FreeReqFrame = 1;
696
697         netif_wake_queue(dev);
698         return FreeReqFrame;
699 }
700
701 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
702 static int
703 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
704 {
705         struct mpt_lan_priv *priv = netdev_priv(dev);
706         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
707         MPT_FRAME_HDR *mf;
708         LANSendRequest_t *pSendReq;
709         SGETransaction32_t *pTrans;
710         SGESimple64_t *pSimple;
711         const unsigned char *mac;
712         dma_addr_t dma;
713         unsigned long flags;
714         int ctx;
715         u16 cur_naa = 0x1000;
716
717         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
718                         __func__, skb));
719
720         spin_lock_irqsave(&priv->txfidx_lock, flags);
721         if (priv->mpt_txfidx_tail < 0) {
722                 netif_stop_queue(dev);
723                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
724
725                 printk (KERN_ERR "%s: no tx context available: %u\n",
726                         __func__, priv->mpt_txfidx_tail);
727                 return 1;
728         }
729
730         mf = mpt_get_msg_frame(LanCtx, mpt_dev);
731         if (mf == NULL) {
732                 netif_stop_queue(dev);
733                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
734
735                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
736                         __func__);
737                 return 1;
738         }
739
740         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
741         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
742
743 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
744 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
745
746         pSendReq = (LANSendRequest_t *) mf;
747
748         /* Set the mac.raw pointer, since this apparently isn't getting
749          * done before we get the skb. Pull the data pointer past the mac data.
750          */
751         skb_reset_mac_header(skb);
752         skb_pull(skb, 12);
753
754         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
755                              PCI_DMA_TODEVICE);
756
757         priv->SendCtl[ctx].skb = skb;
758         priv->SendCtl[ctx].dma = dma;
759         priv->SendCtl[ctx].len = skb->len;
760
761         /* Message Header */
762         pSendReq->Reserved    = 0;
763         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
764         pSendReq->ChainOffset = 0;
765         pSendReq->Reserved2   = 0;
766         pSendReq->MsgFlags    = 0;
767         pSendReq->PortNumber  = priv->pnum;
768
769         /* Transaction Context Element */
770         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
771
772         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
773         pTrans->ContextSize   = sizeof(u32);
774         pTrans->DetailsLength = 2 * sizeof(u32);
775         pTrans->Flags         = 0;
776         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
777
778 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
779 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
780 //                      ctx, skb, skb->data));
781
782         mac = skb_mac_header(skb);
783 #ifdef QLOGIC_NAA_WORKAROUND
784 {
785         struct NAA_Hosed *nh;
786
787         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
788            RFC 2625. The longer I look at this, the more my opinion of Qlogic
789            drops. */
790         read_lock_irq(&bad_naa_lock);
791         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
792                 if ((nh->ieee[0] == mac[0]) &&
793                     (nh->ieee[1] == mac[1]) &&
794                     (nh->ieee[2] == mac[2]) &&
795                     (nh->ieee[3] == mac[3]) &&
796                     (nh->ieee[4] == mac[4]) &&
797                     (nh->ieee[5] == mac[5])) {
798                         cur_naa = nh->NAA;
799                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
800                                   "= %04x.\n", cur_naa));
801                         break;
802                 }
803         }
804         read_unlock_irq(&bad_naa_lock);
805 }
806 #endif
807
808         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
809                                                     (mac[0] <<  8) |
810                                                     (mac[1] <<  0));
811         pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
812                                                     (mac[3] << 16) |
813                                                     (mac[4] <<  8) |
814                                                     (mac[5] <<  0));
815
816         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
817
818         /* If we ever decide to send more than one Simple SGE per LANSend, then
819            we will need to make sure that LAST_ELEMENT only gets set on the
820            last one. Otherwise, bad voodoo and evil funkiness will commence. */
821         pSimple->FlagsLength = cpu_to_le32(
822                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
823                           MPI_SGE_FLAGS_END_OF_BUFFER |
824                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
825                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
826                           MPI_SGE_FLAGS_HOST_TO_IOC |
827                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
828                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
829                         skb->len);
830         pSimple->Address.Low = cpu_to_le32((u32) dma);
831         if (sizeof(dma_addr_t) > sizeof(u32))
832                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
833         else
834                 pSimple->Address.High = 0;
835
836         mpt_put_msg_frame (LanCtx, mpt_dev, mf);
837         dev->trans_start = jiffies;
838
839         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
840                         IOC_AND_NETDEV_NAMES_s_s(dev),
841                         le32_to_cpu(pSimple->FlagsLength)));
842
843         return 0;
844 }
845
846 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
847 static void
848 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
849 /*
850  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
851  */
852 {
853         struct mpt_lan_priv *priv = dev->priv;
854         
855         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
856                 if (priority) {
857                         schedule_delayed_work(&priv->post_buckets_task, 0);
858                 } else {
859                         schedule_delayed_work(&priv->post_buckets_task, 1);
860                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
861                                    "timer.\n"));
862                 }
863                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
864                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
865         }
866 }
867
868 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
869 static int
870 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
871 {
872         struct mpt_lan_priv *priv = dev->priv;
873
874         skb->protocol = mpt_lan_type_trans(skb, dev);
875
876         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
877                  "delivered to upper level.\n",
878                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
879
880         priv->stats.rx_bytes += skb->len;
881         priv->stats.rx_packets++;
882
883         skb->dev = dev;
884         netif_rx(skb);
885
886         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
887                  atomic_read(&priv->buckets_out)));
888
889         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
890                 mpt_lan_wake_post_buckets_task(dev, 1);
891
892         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
893                   "remaining, %d received back since sod\n",
894                   atomic_read(&priv->buckets_out), priv->total_received));
895
896         return 0;
897 }
898
899 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
900 //static inline int
901 static int
902 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
903 {
904         struct mpt_lan_priv *priv = dev->priv;
905         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
906         struct sk_buff *skb, *old_skb;
907         unsigned long flags;
908         u32 ctx, len;
909
910         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
911         skb = priv->RcvCtl[ctx].skb;
912
913         len = GET_LAN_PACKET_LENGTH(tmsg);
914
915         if (len < MPT_LAN_RX_COPYBREAK) {
916                 old_skb = skb;
917
918                 skb = (struct sk_buff *)dev_alloc_skb(len);
919                 if (!skb) {
920                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
921                                         IOC_AND_NETDEV_NAMES_s_s(dev),
922                                         __FILE__, __LINE__);
923                         return -ENOMEM;
924                 }
925
926                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
927                                             priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
928
929                 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
930
931                 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
932                                                priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
933                 goto out;
934         }
935
936         skb_put(skb, len);
937
938         priv->RcvCtl[ctx].skb = NULL;
939
940         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
941                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
942
943 out:
944         spin_lock_irqsave(&priv->rxfidx_lock, flags);
945         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
946         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
947
948         atomic_dec(&priv->buckets_out);
949         priv->total_received++;
950
951         return mpt_lan_receive_skb(dev, skb);
952 }
953
954 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
955 static int
956 mpt_lan_receive_post_free(struct net_device *dev,
957                           LANReceivePostReply_t *pRecvRep)
958 {
959         struct mpt_lan_priv *priv = dev->priv;
960         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
961         unsigned long flags;
962         struct sk_buff *skb;
963         u32 ctx;
964         int count;
965         int i;
966
967         count = pRecvRep->NumberOfContexts;
968
969 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
970                   "IOC returned %d buckets, freeing them...\n", count));
971
972         spin_lock_irqsave(&priv->rxfidx_lock, flags);
973         for (i = 0; i < count; i++) {
974                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
975
976                 skb = priv->RcvCtl[ctx].skb;
977
978 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
979 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
980 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
981 //                              priv, &(priv->buckets_out)));
982 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
983
984                 priv->RcvCtl[ctx].skb = NULL;
985                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
986                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
987                 dev_kfree_skb_any(skb);
988
989                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
990         }
991         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
992
993         atomic_sub(count, &priv->buckets_out);
994
995 //      for (i = 0; i < priv->max_buckets_out; i++)
996 //              if (priv->RcvCtl[i].skb != NULL)
997 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
998 //                                "is still out\n", i));
999
1000 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1001                   count));
1002 */
1003 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1004 /**/              "remaining, %d received back since sod.\n",
1005 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1006         return 0;
1007 }
1008
1009 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1010 static int
1011 mpt_lan_receive_post_reply(struct net_device *dev,
1012                            LANReceivePostReply_t *pRecvRep)
1013 {
1014         struct mpt_lan_priv *priv = dev->priv;
1015         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1016         struct sk_buff *skb, *old_skb;
1017         unsigned long flags;
1018         u32 len, ctx, offset;
1019         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1020         int count;
1021         int i, l;
1022
1023         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1024         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1025                  le16_to_cpu(pRecvRep->IOCStatus)));
1026
1027         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1028                                                 MPI_IOCSTATUS_LAN_CANCELED)
1029                 return mpt_lan_receive_post_free(dev, pRecvRep);
1030
1031         len = le32_to_cpu(pRecvRep->PacketLength);
1032         if (len == 0) {
1033                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1034                         "ReceivePostReply w/ PacketLength zero!\n",
1035                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1036                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1037                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1038                 return -1;
1039         }
1040
1041         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1042         count  = pRecvRep->NumberOfContexts;
1043         skb    = priv->RcvCtl[ctx].skb;
1044
1045         offset = le32_to_cpu(pRecvRep->PacketOffset);
1046 //      if (offset != 0) {
1047 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1048 //                      "w/ PacketOffset %u\n",
1049 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1050 //                              offset);
1051 //      }
1052
1053         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1054                         IOC_AND_NETDEV_NAMES_s_s(dev),
1055                         offset, len));
1056
1057         if (count > 1) {
1058                 int szrem = len;
1059
1060 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1061 //                      "for single packet, concatenating...\n",
1062 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1063
1064                 skb = (struct sk_buff *)dev_alloc_skb(len);
1065                 if (!skb) {
1066                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1067                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1068                                         __FILE__, __LINE__);
1069                         return -ENOMEM;
1070                 }
1071
1072                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1073                 for (i = 0; i < count; i++) {
1074
1075                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1076                         old_skb = priv->RcvCtl[ctx].skb;
1077
1078                         l = priv->RcvCtl[ctx].len;
1079                         if (szrem < l)
1080                                 l = szrem;
1081
1082 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1083 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1084 //                                      i, l));
1085
1086                         pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1087                                                     priv->RcvCtl[ctx].dma,
1088                                                     priv->RcvCtl[ctx].len,
1089                                                     PCI_DMA_FROMDEVICE);
1090                         skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1091
1092                         pci_dma_sync_single_for_device(mpt_dev->pcidev,
1093                                                        priv->RcvCtl[ctx].dma,
1094                                                        priv->RcvCtl[ctx].len,
1095                                                        PCI_DMA_FROMDEVICE);
1096
1097                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1098                         szrem -= l;
1099                 }
1100                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1101
1102         } else if (len < MPT_LAN_RX_COPYBREAK) {
1103
1104                 old_skb = skb;
1105
1106                 skb = (struct sk_buff *)dev_alloc_skb(len);
1107                 if (!skb) {
1108                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1109                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1110                                         __FILE__, __LINE__);
1111                         return -ENOMEM;
1112                 }
1113
1114                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1115                                             priv->RcvCtl[ctx].dma,
1116                                             priv->RcvCtl[ctx].len,
1117                                             PCI_DMA_FROMDEVICE);
1118
1119                 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1120
1121                 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1122                                                priv->RcvCtl[ctx].dma,
1123                                                priv->RcvCtl[ctx].len,
1124                                                PCI_DMA_FROMDEVICE);
1125
1126                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1127                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1128                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1129
1130         } else {
1131                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1132
1133                 priv->RcvCtl[ctx].skb = NULL;
1134
1135                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1136                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1137                 priv->RcvCtl[ctx].dma = 0;
1138
1139                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1140                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1141
1142                 skb_put(skb,len);
1143         }
1144
1145         atomic_sub(count, &priv->buckets_out);
1146         priv->total_received += count;
1147
1148         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1149                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1150                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1151                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1152                                 priv->mpt_rxfidx_tail,
1153                                 MPT_LAN_MAX_BUCKETS_OUT);
1154
1155                 return -1;
1156         }
1157
1158         if (remaining == 0)
1159                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1160                         "(priv->buckets_out = %d)\n",
1161                         IOC_AND_NETDEV_NAMES_s_s(dev),
1162                         atomic_read(&priv->buckets_out));
1163         else if (remaining < 10)
1164                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1165                         "(priv->buckets_out = %d)\n",
1166                         IOC_AND_NETDEV_NAMES_s_s(dev),
1167                         remaining, atomic_read(&priv->buckets_out));
1168         
1169         if ((remaining < priv->bucketthresh) &&
1170             ((atomic_read(&priv->buckets_out) - remaining) >
1171              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1172                 
1173                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1174                         "buckets_out count and fw's BucketsRemaining "
1175                         "count has crossed the threshold, issuing a "
1176                         "LanReset to clear the fw's hashtable. You may "
1177                         "want to check your /var/log/messages for \"CRC "
1178                         "error\" event notifications.\n");
1179                 
1180                 mpt_lan_reset(dev);
1181                 mpt_lan_wake_post_buckets_task(dev, 0);
1182         }
1183         
1184         return mpt_lan_receive_skb(dev, skb);
1185 }
1186
1187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1188 /* Simple SGE's only at the moment */
1189
1190 static void
1191 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1192 {
1193         struct net_device *dev = priv->dev;
1194         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1195         MPT_FRAME_HDR *mf;
1196         LANReceivePostRequest_t *pRecvReq;
1197         SGETransaction32_t *pTrans;
1198         SGESimple64_t *pSimple;
1199         struct sk_buff *skb;
1200         dma_addr_t dma;
1201         u32 curr, buckets, count, max;
1202         u32 len = (dev->mtu + dev->hard_header_len + 4);
1203         unsigned long flags;
1204         int i;
1205
1206         curr = atomic_read(&priv->buckets_out);
1207         buckets = (priv->max_buckets_out - curr);
1208
1209         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1210                         IOC_AND_NETDEV_NAMES_s_s(dev),
1211                         __func__, buckets, curr));
1212
1213         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1214                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1215
1216         while (buckets) {
1217                 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1218                 if (mf == NULL) {
1219                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1220                                 __func__);
1221                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1222                                  __func__, buckets));
1223                         goto out;
1224                 }
1225                 pRecvReq = (LANReceivePostRequest_t *) mf;
1226
1227                 i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1228                 mpt_dev->RequestNB[i] = 0;
1229                 count = buckets;
1230                 if (count > max)
1231                         count = max;
1232
1233                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1234                 pRecvReq->ChainOffset = 0;
1235                 pRecvReq->MsgFlags    = 0;
1236                 pRecvReq->PortNumber  = priv->pnum;
1237
1238                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1239                 pSimple = NULL;
1240
1241                 for (i = 0; i < count; i++) {
1242                         int ctx;
1243
1244                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1245                         if (priv->mpt_rxfidx_tail < 0) {
1246                                 printk (KERN_ERR "%s: Can't alloc context\n",
1247                                         __func__);
1248                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1249                                                        flags);
1250                                 break;
1251                         }
1252
1253                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1254
1255                         skb = priv->RcvCtl[ctx].skb;
1256                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1257                                 pci_unmap_single(mpt_dev->pcidev,
1258                                                  priv->RcvCtl[ctx].dma,
1259                                                  priv->RcvCtl[ctx].len,
1260                                                  PCI_DMA_FROMDEVICE);
1261                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1262                                 skb = priv->RcvCtl[ctx].skb = NULL;
1263                         }
1264
1265                         if (skb == NULL) {
1266                                 skb = dev_alloc_skb(len);
1267                                 if (skb == NULL) {
1268                                         printk (KERN_WARNING
1269                                                 MYNAM "/%s: Can't alloc skb\n",
1270                                                 __func__);
1271                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1272                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1273                                         break;
1274                                 }
1275
1276                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1277                                                      len, PCI_DMA_FROMDEVICE);
1278
1279                                 priv->RcvCtl[ctx].skb = skb;
1280                                 priv->RcvCtl[ctx].dma = dma;
1281                                 priv->RcvCtl[ctx].len = len;
1282                         }
1283
1284                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1285
1286                         pTrans->ContextSize   = sizeof(u32);
1287                         pTrans->DetailsLength = 0;
1288                         pTrans->Flags         = 0;
1289                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1290
1291                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1292
1293                         pSimple->FlagsLength = cpu_to_le32(
1294                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1295                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1296                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1297                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1298                         if (sizeof(dma_addr_t) > sizeof(u32))
1299                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1300                         else
1301                                 pSimple->Address.High = 0;
1302
1303                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1304                 }
1305
1306                 if (pSimple == NULL) {
1307 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1308 /**/                            __func__);
1309                         mpt_free_msg_frame(mpt_dev, mf);
1310                         goto out;
1311                 }
1312
1313                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1314
1315                 pRecvReq->BucketCount = cpu_to_le32(i);
1316
1317 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1318  *      for (i = 0; i < j + 2; i ++)
1319  *          printk (" %08x", le32_to_cpu(msg[i]));
1320  *      printk ("\n");
1321  */
1322
1323                 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1324
1325                 priv->total_posted += i;
1326                 buckets -= i;
1327                 atomic_add(i, &priv->buckets_out);
1328         }
1329
1330 out:
1331         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1332                   __func__, buckets, atomic_read(&priv->buckets_out)));
1333         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1334         __func__, priv->total_posted, priv->total_received));
1335
1336         clear_bit(0, &priv->post_buckets_active);
1337 }
1338
1339 static void
1340 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1341 {
1342         mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1343                                                   post_buckets_task.work));
1344 }
1345
1346 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1347 static struct net_device *
1348 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1349 {
1350         struct net_device *dev;
1351         struct mpt_lan_priv *priv;
1352         u8 HWaddr[FC_ALEN], *a;
1353
1354         dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1355         if (!dev)
1356                 return NULL;
1357
1358         dev->mtu = MPT_LAN_MTU;
1359
1360         priv = netdev_priv(dev);
1361
1362         priv->dev = dev;
1363         priv->mpt_dev = mpt_dev;
1364         priv->pnum = pnum;
1365
1366         INIT_DELAYED_WORK(&priv->post_buckets_task,
1367                           mpt_lan_post_receive_buckets_work);
1368         priv->post_buckets_active = 0;
1369
1370         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1371                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1372
1373         atomic_set(&priv->buckets_out, 0);
1374         priv->total_posted = 0;
1375         priv->total_received = 0;
1376         priv->max_buckets_out = max_buckets_out;
1377         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1378                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1379
1380         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1381                         __LINE__,
1382                         mpt_dev->pfacts[0].MaxLanBuckets,
1383                         max_buckets_out,
1384                         priv->max_buckets_out));
1385
1386         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1387         spin_lock_init(&priv->txfidx_lock);
1388         spin_lock_init(&priv->rxfidx_lock);
1389
1390         /*  Grab pre-fetched LANPage1 stuff. :-) */
1391         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1392
1393         HWaddr[0] = a[5];
1394         HWaddr[1] = a[4];
1395         HWaddr[2] = a[3];
1396         HWaddr[3] = a[2];
1397         HWaddr[4] = a[1];
1398         HWaddr[5] = a[0];
1399
1400         dev->addr_len = FC_ALEN;
1401         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1402         memset(dev->broadcast, 0xff, FC_ALEN);
1403
1404         /* The Tx queue is 127 deep on the 909.
1405          * Give ourselves some breathing room.
1406          */
1407         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1408                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1409
1410         dev->open = mpt_lan_open;
1411         dev->stop = mpt_lan_close;
1412         dev->get_stats = mpt_lan_get_stats;
1413         dev->set_multicast_list = NULL;
1414         dev->change_mtu = mpt_lan_change_mtu;
1415         dev->hard_start_xmit = mpt_lan_sdu_send;
1416
1417 /* Not in 2.3.42. Need 2.3.45+ */
1418         dev->tx_timeout = mpt_lan_tx_timeout;
1419         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1420
1421         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1422                 "and setting initial values\n"));
1423
1424         if (register_netdev(dev) != 0) {
1425                 free_netdev(dev);
1426                 dev = NULL;
1427         }
1428         return dev;
1429 }
1430
1431 static int
1432 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1433 {
1434         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1435         struct net_device       *dev;
1436         int                     i;
1437
1438         for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1439                 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1440                        "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1441                        ioc->name, ioc->pfacts[i].PortNumber,
1442                        ioc->pfacts[i].ProtocolFlags,
1443                        MPT_PROTOCOL_FLAGS_c_c_c_c(
1444                                ioc->pfacts[i].ProtocolFlags));
1445
1446                 if (!(ioc->pfacts[i].ProtocolFlags &
1447                                         MPI_PORTFACTS_PROTOCOL_LAN)) {
1448                         printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1449                                "seems to be disabled on this adapter port!\n",
1450                                ioc->name);
1451                         continue;
1452                 }
1453
1454                 dev = mpt_register_lan_device(ioc, i);
1455                 if (!dev) {
1456                         printk(KERN_ERR MYNAM ": %s: Unable to register "
1457                                "port%d as a LAN device\n", ioc->name,
1458                                ioc->pfacts[i].PortNumber);
1459                         continue;
1460                 }
1461                 
1462                 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1463                        "registered as '%s'\n", ioc->name, dev->name);
1464                 printk(KERN_INFO MYNAM ": %s/%s: "
1465                        "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1466                        IOC_AND_NETDEV_NAMES_s_s(dev),
1467                        dev->dev_addr[0], dev->dev_addr[1],
1468                        dev->dev_addr[2], dev->dev_addr[3],
1469                        dev->dev_addr[4], dev->dev_addr[5]);
1470         
1471                 ioc->netdev = dev;
1472
1473                 return 0;
1474         }
1475
1476         return -ENODEV;
1477 }
1478
1479 static void
1480 mptlan_remove(struct pci_dev *pdev)
1481 {
1482         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1483         struct net_device       *dev = ioc->netdev;
1484
1485         if(dev != NULL) {
1486                 unregister_netdev(dev);
1487                 free_netdev(dev);
1488         }
1489 }
1490
1491 static struct mpt_pci_driver mptlan_driver = {
1492         .probe          = mptlan_probe,
1493         .remove         = mptlan_remove,
1494 };
1495
1496 static int __init mpt_lan_init (void)
1497 {
1498         show_mptmod_ver(LANAME, LANVER);
1499
1500         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1501                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1502                 return -EBUSY;
1503         }
1504
1505         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1506
1507         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1508                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1509                        "handler with mptbase! The world is at an end! "
1510                        "Everything is fading to black! Goodbye.\n");
1511                 return -EBUSY;
1512         }
1513
1514         dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1515         
1516         mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1517         return 0;
1518 }
1519
1520 static void __exit mpt_lan_exit(void)
1521 {
1522         mpt_device_driver_deregister(MPTLAN_DRIVER);
1523         mpt_reset_deregister(LanCtx);
1524
1525         if (LanCtx) {
1526                 mpt_deregister(LanCtx);
1527                 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1528         }
1529 }
1530
1531 module_init(mpt_lan_init);
1532 module_exit(mpt_lan_exit);
1533
1534 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1535 static unsigned short
1536 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1537 {
1538         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1539         struct fcllc *fcllc;
1540
1541         skb_reset_mac_header(skb);
1542         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1543
1544         if (fch->dtype == htons(0xffff)) {
1545                 u32 *p = (u32 *) fch;
1546
1547                 swab32s(p + 0);
1548                 swab32s(p + 1);
1549                 swab32s(p + 2);
1550                 swab32s(p + 3);
1551
1552                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1553                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1554                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1555                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1556                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1557         }
1558
1559         if (*fch->daddr & 1) {
1560                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1561                         skb->pkt_type = PACKET_BROADCAST;
1562                 } else {
1563                         skb->pkt_type = PACKET_MULTICAST;
1564                 }
1565         } else {
1566                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1567                         skb->pkt_type = PACKET_OTHERHOST;
1568                 } else {
1569                         skb->pkt_type = PACKET_HOST;
1570                 }
1571         }
1572
1573         fcllc = (struct fcllc *)skb->data;
1574
1575 #ifdef QLOGIC_NAA_WORKAROUND
1576 {
1577         u16 source_naa = fch->stype, found = 0;
1578
1579         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1580            value. */
1581
1582         if ((source_naa & 0xF000) == 0)
1583                 source_naa = swab16(source_naa);
1584
1585         if (fcllc->ethertype == htons(ETH_P_ARP))
1586             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1587                       "%04x.\n", source_naa));
1588
1589         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1590            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1591                 struct NAA_Hosed *nh, *prevnh;
1592                 int i;
1593
1594                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1595                           "system with non-RFC 2625 NAA value (%04x).\n",
1596                           source_naa));
1597
1598                 write_lock_irq(&bad_naa_lock);
1599                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1600                      prevnh=nh, nh=nh->next) {
1601                         if ((nh->ieee[0] == fch->saddr[0]) &&
1602                             (nh->ieee[1] == fch->saddr[1]) &&
1603                             (nh->ieee[2] == fch->saddr[2]) &&
1604                             (nh->ieee[3] == fch->saddr[3]) &&
1605                             (nh->ieee[4] == fch->saddr[4]) &&
1606                             (nh->ieee[5] == fch->saddr[5])) {
1607                                 found = 1;
1608                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1609                                          "q/Rep w/ bad NAA from system already"
1610                                          " in DB.\n"));
1611                                 break;
1612                         }
1613                 }
1614
1615                 if ((!found) && (nh == NULL)) {
1616
1617                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1618                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1619                                  " bad NAA from system not yet in DB.\n"));
1620
1621                         if (nh != NULL) {
1622                                 nh->next = NULL;
1623                                 if (!mpt_bad_naa)
1624                                         mpt_bad_naa = nh;
1625                                 if (prevnh)
1626                                         prevnh->next = nh;
1627
1628                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1629                                 for (i = 0; i < FC_ALEN; i++)
1630                                         nh->ieee[i] = fch->saddr[i];
1631                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1632                                           "%02x:%02x with non-compliant S_NAA value.\n",
1633                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1634                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1635                         } else {
1636                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1637                                         " kmalloc a NAA_Hosed struct.\n");
1638                         }
1639                 } else if (!found) {
1640                         printk (KERN_ERR "mptlan/type_trans: found not"
1641                                 " set, but nh isn't null. Evil "
1642                                 "funkiness abounds.\n");
1643                 }
1644                 write_unlock_irq(&bad_naa_lock);
1645         }
1646 }
1647 #endif
1648
1649         /* Strip the SNAP header from ARP packets since we don't
1650          * pass them through to the 802.2/SNAP layers.
1651          */
1652         if (fcllc->dsap == EXTENDED_SAP &&
1653                 (fcllc->ethertype == htons(ETH_P_IP) ||
1654                  fcllc->ethertype == htons(ETH_P_ARP))) {
1655                 skb_pull(skb, sizeof(struct fcllc));
1656                 return fcllc->ethertype;
1657         }
1658
1659         return htons(ETH_P_802_2);
1660 }
1661
1662 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/