[PATCH] USB: lh7a40x gadget driver: Fixed a dead lock
[linux-2.6] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Logic Fibre Channel PCI chip/adapters
5  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2005 LSI Logic Corporation
8  *
9  */
10 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11 /*
12     This program is free software; you can redistribute it and/or modify
13     it under the terms of the GNU General Public License as published by
14     the Free Software Foundation; version 2 of the License.
15
16     This program is distributed in the hope that it will be useful,
17     but WITHOUT ANY WARRANTY; without even the implied warranty of
18     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19     GNU General Public License for more details.
20
21     NO WARRANTY
22     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26     solely responsible for determining the appropriateness of using and
27     distributing the Program and assumes all risks associated with its
28     exercise of rights under this Agreement, including but not limited to
29     the risks and costs of program errors, damage to or loss of data,
30     programs or equipment, and unavailability or interruption of operations.
31
32     DISCLAIMER OF LIABILITY
33     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40
41     You should have received a copy of the GNU General Public License
42     along with this program; if not, write to the Free Software
43     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
44 */
45
46 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
47 /*
48  * Define statements used for debugging
49  */
50 //#define MPT_LAN_IO_DEBUG
51
52 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
53
54 #include "mptlan.h"
55 #include <linux/init.h>
56 #include <linux/module.h>
57 #include <linux/fs.h>
58
59 #define MYNAM           "mptlan"
60
61 MODULE_LICENSE("GPL");
62
63 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
64 /*
65  * MPT LAN message sizes without variable part.
66  */
67 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
68         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
69
70 #define MPT_LAN_TRANSACTION32_SIZE \
71         (sizeof(SGETransaction32_t) - sizeof(u32))
72
73 /*
74  *  Fusion MPT LAN private structures
75  */
76
77 struct NAA_Hosed {
78         u16 NAA;
79         u8 ieee[FC_ALEN];
80         struct NAA_Hosed *next;
81 };
82
83 struct BufferControl {
84         struct sk_buff  *skb;
85         dma_addr_t      dma;
86         unsigned int    len;
87 };
88
89 struct mpt_lan_priv {
90         MPT_ADAPTER *mpt_dev;
91         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
92
93         atomic_t buckets_out;           /* number of unused buckets on IOC */
94         int bucketthresh;               /* Send more when this many left */
95
96         int *mpt_txfidx; /* Free Tx Context list */
97         int mpt_txfidx_tail;
98         spinlock_t txfidx_lock;
99
100         int *mpt_rxfidx; /* Free Rx Context list */
101         int mpt_rxfidx_tail;
102         spinlock_t rxfidx_lock;
103
104         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
105         struct BufferControl *SendCtl;  /* Send BufferControl structs */
106
107         int max_buckets_out;            /* Max buckets to send to IOC */
108         int tx_max_out;                 /* IOC's Tx queue len */
109
110         u32 total_posted;
111         u32 total_received;
112         struct net_device_stats stats;  /* Per device statistics */
113
114         struct work_struct post_buckets_task;
115         unsigned long post_buckets_active;
116 };
117
118 struct mpt_lan_ohdr {
119         u16     dtype;
120         u8      daddr[FC_ALEN];
121         u16     stype;
122         u8      saddr[FC_ALEN];
123 };
124
125 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
126
127 /*
128  *  Forward protos...
129  */
130 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
131                        MPT_FRAME_HDR *reply);
132 static int  mpt_lan_open(struct net_device *dev);
133 static int  mpt_lan_reset(struct net_device *dev);
134 static int  mpt_lan_close(struct net_device *dev);
135 static void mpt_lan_post_receive_buckets(void *dev_id);
136 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
137                                            int priority);
138 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
139 static int  mpt_lan_receive_post_reply(struct net_device *dev,
140                                        LANReceivePostReply_t *pRecvRep);
141 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
142 static int  mpt_lan_send_reply(struct net_device *dev,
143                                LANSendReply_t *pSendRep);
144 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
145 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
146 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
147                                          struct net_device *dev);
148
149 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
150 /*
151  *  Fusion MPT LAN private data
152  */
153 static int LanCtx = -1;
154
155 static u32 max_buckets_out = 127;
156 static u32 tx_max_out_p = 127 - 16;
157
158 #ifdef QLOGIC_NAA_WORKAROUND
159 static struct NAA_Hosed *mpt_bad_naa = NULL;
160 DEFINE_RWLOCK(bad_naa_lock);
161 #endif
162
163 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
164 /*
165  * Fusion MPT LAN external data
166  */
167 extern int mpt_lan_index;
168
169 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
170 /**
171  *      lan_reply - Handle all data sent from the hardware.
172  *      @ioc: Pointer to MPT_ADAPTER structure
173  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
174  *      @reply: Pointer to MPT reply frame
175  *
176  *      Returns 1 indicating original alloc'd request frame ptr
177  *      should be freed, or 0 if it shouldn't.
178  */
179 static int
180 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
181 {
182         struct net_device *dev = ioc->netdev;
183         int FreeReqFrame = 0;
184
185         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
186                   IOC_AND_NETDEV_NAMES_s_s(dev)));
187
188 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
189 //                      mf, reply));
190
191         if (mf == NULL) {
192                 u32 tmsg = CAST_PTR_TO_U32(reply);
193
194                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
195                                 IOC_AND_NETDEV_NAMES_s_s(dev),
196                                 tmsg));
197
198                 switch (GET_LAN_FORM(tmsg)) {
199
200                 // NOTE!  (Optimization) First case here is now caught in
201                 //  mptbase.c::mpt_interrupt() routine and callcack here
202                 //  is now skipped for this case!
203 #if 0
204                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
205 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
206 //                                "MessageContext turbo reply received\n"));
207                         FreeReqFrame = 1;
208                         break;
209 #endif
210
211                 case LAN_REPLY_FORM_SEND_SINGLE:
212 //                      dioprintk((MYNAM "/lan_reply: "
213 //                                "calling mpt_lan_send_reply (turbo)\n"));
214
215                         // Potential BUG here?
216                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
217                         //  If/when mpt_lan_send_turbo would return 1 here,
218                         //  calling routine (mptbase.c|mpt_interrupt)
219                         //  would Oops because mf has already been set
220                         //  to NULL.  So after return from this func,
221                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
222                         //  item back onto its adapter FreeQ - Oops!:-(
223                         //  It's Ok, since mpt_lan_send_turbo() *currently*
224                         //  always returns 0, but..., just in case:
225
226                         (void) mpt_lan_send_turbo(dev, tmsg);
227                         FreeReqFrame = 0;
228
229                         break;
230
231                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
232 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
233 //                                "rcv-Turbo = %08x\n", tmsg));
234                         mpt_lan_receive_post_turbo(dev, tmsg);
235                         break;
236
237                 default:
238                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
239                                 "that I don't know what to do with\n");
240
241                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
242
243                         break;
244                 }
245
246                 return FreeReqFrame;
247         }
248
249 //      msg = (u32 *) reply;
250 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
251 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
252 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
253 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
254 //                reply->u.hdr.Function));
255
256         switch (reply->u.hdr.Function) {
257
258         case MPI_FUNCTION_LAN_SEND:
259         {
260                 LANSendReply_t *pSendRep;
261
262                 pSendRep = (LANSendReply_t *) reply;
263                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
264                 break;
265         }
266
267         case MPI_FUNCTION_LAN_RECEIVE:
268         {
269                 LANReceivePostReply_t *pRecvRep;
270
271                 pRecvRep = (LANReceivePostReply_t *) reply;
272                 if (pRecvRep->NumberOfContexts) {
273                         mpt_lan_receive_post_reply(dev, pRecvRep);
274                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
275                                 FreeReqFrame = 1;
276                 } else
277                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
278                                   "ReceivePostReply received.\n"));
279                 break;
280         }
281
282         case MPI_FUNCTION_LAN_RESET:
283                 /* Just a default reply. Might want to check it to
284                  * make sure that everything went ok.
285                  */
286                 FreeReqFrame = 1;
287                 break;
288
289         case MPI_FUNCTION_EVENT_NOTIFICATION:
290         case MPI_FUNCTION_EVENT_ACK:
291                 /*  _EVENT_NOTIFICATION should NOT come down this path any more.
292                  *  Should be routed to mpt_lan_event_process(), but just in case...
293                  */
294                 FreeReqFrame = 1;
295                 break;
296
297         default:
298                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
299                         "reply that I don't know what to do with\n");
300
301                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
302                 FreeReqFrame = 1;
303
304                 break;
305         }
306
307         return FreeReqFrame;
308 }
309
310 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
311 static int
312 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
313 {
314         struct net_device *dev = ioc->netdev;
315         struct mpt_lan_priv *priv;
316
317         if (dev == NULL)
318                 return(1);
319         else
320                 priv = netdev_priv(dev);
321
322         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
323                         reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
324                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
325
326         if (priv->mpt_rxfidx == NULL)
327                 return (1);
328
329         if (reset_phase == MPT_IOC_SETUP_RESET) {
330                 ;
331         } else if (reset_phase == MPT_IOC_PRE_RESET) {
332                 int i;
333                 unsigned long flags;
334
335                 netif_stop_queue(dev);
336
337                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
338
339                 atomic_set(&priv->buckets_out, 0);
340
341                 /* Reset Rx Free Tail index and re-populate the queue. */
342                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
343                 priv->mpt_rxfidx_tail = -1;
344                 for (i = 0; i < priv->max_buckets_out; i++)
345                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
346                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
347         } else {
348                 mpt_lan_post_receive_buckets(dev);
349                 netif_wake_queue(dev);
350         }
351
352         return 1;
353 }
354
355 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
356 static int
357 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
358 {
359         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
360
361         switch (le32_to_cpu(pEvReply->Event)) {
362         case MPI_EVENT_NONE:                            /* 00 */
363         case MPI_EVENT_LOG_DATA:                        /* 01 */
364         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
365         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
366         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
367         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
368         case MPI_EVENT_RESCAN:                          /* 06 */
369                 /* Ok, do we need to do anything here? As far as
370                    I can tell, this is when a new device gets added
371                    to the loop. */
372         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
373         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
374         case MPI_EVENT_LOGOUT:                          /* 09 */
375         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
376         default:
377                 break;
378         }
379
380         /*
381          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
382          *  Do NOT do it here now!
383          */
384
385         return 1;
386 }
387
388 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
389 static int
390 mpt_lan_open(struct net_device *dev)
391 {
392         struct mpt_lan_priv *priv = netdev_priv(dev);
393         int i;
394
395         if (mpt_lan_reset(dev) != 0) {
396                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
397
398                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
399
400                 if (mpt_dev->active)
401                         printk ("The ioc is active. Perhaps it needs to be"
402                                 " reset?\n");
403                 else
404                         printk ("The ioc in inactive, most likely in the "
405                                 "process of being reset. Please try again in "
406                                 "a moment.\n");
407         }
408
409         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
410         if (priv->mpt_txfidx == NULL)
411                 goto out;
412         priv->mpt_txfidx_tail = -1;
413
414         priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
415                                 GFP_KERNEL);
416         if (priv->SendCtl == NULL)
417                 goto out_mpt_txfidx;
418         for (i = 0; i < priv->tx_max_out; i++)
419                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
420
421         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
422
423         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
424                                    GFP_KERNEL);
425         if (priv->mpt_rxfidx == NULL)
426                 goto out_SendCtl;
427         priv->mpt_rxfidx_tail = -1;
428
429         priv->RcvCtl = kcalloc(priv->max_buckets_out,
430                                sizeof(struct BufferControl),
431                                GFP_KERNEL);
432         if (priv->RcvCtl == NULL)
433                 goto out_mpt_rxfidx;
434         for (i = 0; i < priv->max_buckets_out; i++)
435                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
436
437 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
438 /**/    for (i = 0; i < priv->tx_max_out; i++)
439 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
440 /**/    dlprintk(("\n"));
441
442         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
443
444         mpt_lan_post_receive_buckets(dev);
445         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
446                         IOC_AND_NETDEV_NAMES_s_s(dev));
447
448         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
449                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
450                         " Notifications. This is a bad thing! We're not going "
451                         "to go ahead, but I'd be leery of system stability at "
452                         "this point.\n");
453         }
454
455         netif_start_queue(dev);
456         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
457
458         return 0;
459 out_mpt_rxfidx:
460         kfree(priv->mpt_rxfidx);
461         priv->mpt_rxfidx = NULL;
462 out_SendCtl:
463         kfree(priv->SendCtl);
464         priv->SendCtl = NULL;
465 out_mpt_txfidx:
466         kfree(priv->mpt_txfidx);
467         priv->mpt_txfidx = NULL;
468 out:    return -ENOMEM;
469 }
470
471 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
472 /* Send a LanReset message to the FW. This should result in the FW returning
473    any buckets it still has. */
474 static int
475 mpt_lan_reset(struct net_device *dev)
476 {
477         MPT_FRAME_HDR *mf;
478         LANResetRequest_t *pResetReq;
479         struct mpt_lan_priv *priv = netdev_priv(dev);
480
481         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
482
483         if (mf == NULL) {
484 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
485                 "Unable to allocate a request frame.\n"));
486 */
487                 return -1;
488         }
489
490         pResetReq = (LANResetRequest_t *) mf;
491
492         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
493         pResetReq->ChainOffset  = 0;
494         pResetReq->Reserved     = 0;
495         pResetReq->PortNumber   = priv->pnum;
496         pResetReq->MsgFlags     = 0;
497         pResetReq->Reserved2    = 0;
498
499         mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
500
501         return 0;
502 }
503
504 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
505 static int
506 mpt_lan_close(struct net_device *dev)
507 {
508         struct mpt_lan_priv *priv = netdev_priv(dev);
509         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
510         unsigned long timeout;
511         int i;
512
513         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
514
515         mpt_event_deregister(LanCtx);
516
517         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
518                   "since driver was loaded, %d still out\n",
519                   priv->total_posted,atomic_read(&priv->buckets_out)));
520
521         netif_stop_queue(dev);
522
523         mpt_lan_reset(dev);
524
525         timeout = jiffies + 2 * HZ;
526         while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
527                 schedule_timeout_interruptible(1);
528
529         for (i = 0; i < priv->max_buckets_out; i++) {
530                 if (priv->RcvCtl[i].skb != NULL) {
531 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
532 /**/                              "is still out\n", i));
533                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
534                                          priv->RcvCtl[i].len,
535                                          PCI_DMA_FROMDEVICE);
536                         dev_kfree_skb(priv->RcvCtl[i].skb);
537                 }
538         }
539
540         kfree(priv->RcvCtl);
541         kfree(priv->mpt_rxfidx);
542
543         for (i = 0; i < priv->tx_max_out; i++) {
544                 if (priv->SendCtl[i].skb != NULL) {
545                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
546                                          priv->SendCtl[i].len,
547                                          PCI_DMA_TODEVICE);
548                         dev_kfree_skb(priv->SendCtl[i].skb);
549                 }
550         }
551
552         kfree(priv->SendCtl);
553         kfree(priv->mpt_txfidx);
554
555         atomic_set(&priv->buckets_out, 0);
556
557         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
558                         IOC_AND_NETDEV_NAMES_s_s(dev));
559
560         return 0;
561 }
562
563 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
564 static struct net_device_stats *
565 mpt_lan_get_stats(struct net_device *dev)
566 {
567         struct mpt_lan_priv *priv = netdev_priv(dev);
568
569         return (struct net_device_stats *) &priv->stats;
570 }
571
572 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
573 static int
574 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
575 {
576         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
577                 return -EINVAL;
578         dev->mtu = new_mtu;
579         return 0;
580 }
581
582 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
583 /* Tx timeout handler. */
584 static void
585 mpt_lan_tx_timeout(struct net_device *dev)
586 {
587         struct mpt_lan_priv *priv = netdev_priv(dev);
588         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
589
590         if (mpt_dev->active) {
591                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
592                 netif_wake_queue(dev);
593         }
594 }
595
596 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
597 //static inline int
598 static int
599 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
600 {
601         struct mpt_lan_priv *priv = netdev_priv(dev);
602         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
603         struct sk_buff *sent;
604         unsigned long flags;
605         u32 ctx;
606
607         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
608         sent = priv->SendCtl[ctx].skb;
609
610         priv->stats.tx_packets++;
611         priv->stats.tx_bytes += sent->len;
612
613         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
614                         IOC_AND_NETDEV_NAMES_s_s(dev),
615                         __FUNCTION__, sent));
616
617         priv->SendCtl[ctx].skb = NULL;
618         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
619                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
620         dev_kfree_skb_irq(sent);
621
622         spin_lock_irqsave(&priv->txfidx_lock, flags);
623         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
624         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
625
626         netif_wake_queue(dev);
627         return 0;
628 }
629
630 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
631 static int
632 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
633 {
634         struct mpt_lan_priv *priv = netdev_priv(dev);
635         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
636         struct sk_buff *sent;
637         unsigned long flags;
638         int FreeReqFrame = 0;
639         u32 *pContext;
640         u32 ctx;
641         u8 count;
642
643         count = pSendRep->NumberOfContexts;
644
645         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
646                  le16_to_cpu(pSendRep->IOCStatus)));
647
648         /* Add check for Loginfo Flag in IOCStatus */
649
650         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
651         case MPI_IOCSTATUS_SUCCESS:
652                 priv->stats.tx_packets += count;
653                 break;
654
655         case MPI_IOCSTATUS_LAN_CANCELED:
656         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
657                 break;
658
659         case MPI_IOCSTATUS_INVALID_SGL:
660                 priv->stats.tx_errors += count;
661                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
662                                 IOC_AND_NETDEV_NAMES_s_s(dev));
663                 goto out;
664
665         default:
666                 priv->stats.tx_errors += count;
667                 break;
668         }
669
670         pContext = &pSendRep->BufferContext;
671
672         spin_lock_irqsave(&priv->txfidx_lock, flags);
673         while (count > 0) {
674                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
675
676                 sent = priv->SendCtl[ctx].skb;
677                 priv->stats.tx_bytes += sent->len;
678
679                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
680                                 IOC_AND_NETDEV_NAMES_s_s(dev),
681                                 __FUNCTION__, sent));
682
683                 priv->SendCtl[ctx].skb = NULL;
684                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
685                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
686                 dev_kfree_skb_irq(sent);
687
688                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
689
690                 pContext++;
691                 count--;
692         }
693         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
694
695 out:
696         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
697                 FreeReqFrame = 1;
698
699         netif_wake_queue(dev);
700         return FreeReqFrame;
701 }
702
703 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
704 static int
705 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
706 {
707         struct mpt_lan_priv *priv = netdev_priv(dev);
708         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
709         MPT_FRAME_HDR *mf;
710         LANSendRequest_t *pSendReq;
711         SGETransaction32_t *pTrans;
712         SGESimple64_t *pSimple;
713         dma_addr_t dma;
714         unsigned long flags;
715         int ctx;
716         u16 cur_naa = 0x1000;
717
718         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
719                         __FUNCTION__, skb));
720
721         spin_lock_irqsave(&priv->txfidx_lock, flags);
722         if (priv->mpt_txfidx_tail < 0) {
723                 netif_stop_queue(dev);
724                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
725
726                 printk (KERN_ERR "%s: no tx context available: %u\n",
727                         __FUNCTION__, priv->mpt_txfidx_tail);
728                 return 1;
729         }
730
731         mf = mpt_get_msg_frame(LanCtx, mpt_dev);
732         if (mf == NULL) {
733                 netif_stop_queue(dev);
734                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
735
736                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
737                         __FUNCTION__);
738                 return 1;
739         }
740
741         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
742         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
743
744 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
745 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
746
747         pSendReq = (LANSendRequest_t *) mf;
748
749         /* Set the mac.raw pointer, since this apparently isn't getting
750          * done before we get the skb. Pull the data pointer past the mac data.
751          */
752         skb->mac.raw = skb->data;
753         skb_pull(skb, 12);
754
755         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
756                              PCI_DMA_TODEVICE);
757
758         priv->SendCtl[ctx].skb = skb;
759         priv->SendCtl[ctx].dma = dma;
760         priv->SendCtl[ctx].len = skb->len;
761
762         /* Message Header */
763         pSendReq->Reserved    = 0;
764         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
765         pSendReq->ChainOffset = 0;
766         pSendReq->Reserved2   = 0;
767         pSendReq->MsgFlags    = 0;
768         pSendReq->PortNumber  = priv->pnum;
769
770         /* Transaction Context Element */
771         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
772
773         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
774         pTrans->ContextSize   = sizeof(u32);
775         pTrans->DetailsLength = 2 * sizeof(u32);
776         pTrans->Flags         = 0;
777         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
778
779 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
780 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
781 //                      ctx, skb, skb->data));
782
783 #ifdef QLOGIC_NAA_WORKAROUND
784 {
785         struct NAA_Hosed *nh;
786
787         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
788            RFC 2625. The longer I look at this, the more my opinion of Qlogic
789            drops. */
790         read_lock_irq(&bad_naa_lock);
791         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
792                 if ((nh->ieee[0] == skb->mac.raw[0]) &&
793                     (nh->ieee[1] == skb->mac.raw[1]) &&
794                     (nh->ieee[2] == skb->mac.raw[2]) &&
795                     (nh->ieee[3] == skb->mac.raw[3]) &&
796                     (nh->ieee[4] == skb->mac.raw[4]) &&
797                     (nh->ieee[5] == skb->mac.raw[5])) {
798                         cur_naa = nh->NAA;
799                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
800                                   "= %04x.\n", cur_naa));
801                         break;
802                 }
803         }
804         read_unlock_irq(&bad_naa_lock);
805 }
806 #endif
807
808         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
809                                                     (skb->mac.raw[0] <<  8) |
810                                                     (skb->mac.raw[1] <<  0));
811         pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
812                                                     (skb->mac.raw[3] << 16) |
813                                                     (skb->mac.raw[4] <<  8) |
814                                                     (skb->mac.raw[5] <<  0));
815
816         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
817
818         /* If we ever decide to send more than one Simple SGE per LANSend, then
819            we will need to make sure that LAST_ELEMENT only gets set on the
820            last one. Otherwise, bad voodoo and evil funkiness will commence. */
821         pSimple->FlagsLength = cpu_to_le32(
822                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
823                           MPI_SGE_FLAGS_END_OF_BUFFER |
824                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
825                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
826                           MPI_SGE_FLAGS_HOST_TO_IOC |
827                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
828                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
829                         skb->len);
830         pSimple->Address.Low = cpu_to_le32((u32) dma);
831         if (sizeof(dma_addr_t) > sizeof(u32))
832                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
833         else
834                 pSimple->Address.High = 0;
835
836         mpt_put_msg_frame (LanCtx, mpt_dev, mf);
837         dev->trans_start = jiffies;
838
839         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
840                         IOC_AND_NETDEV_NAMES_s_s(dev),
841                         le32_to_cpu(pSimple->FlagsLength)));
842
843         return 0;
844 }
845
846 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
847 static void
848 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
849 /*
850  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
851  */
852 {
853         struct mpt_lan_priv *priv = dev->priv;
854         
855         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
856                 if (priority) {
857                         schedule_work(&priv->post_buckets_task);
858                 } else {
859                         schedule_delayed_work(&priv->post_buckets_task, 1);
860                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
861                                    "timer.\n"));
862                 }
863                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
864                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
865         }
866 }
867
868 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
869 static int
870 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
871 {
872         struct mpt_lan_priv *priv = dev->priv;
873
874         skb->protocol = mpt_lan_type_trans(skb, dev);
875
876         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
877                  "delivered to upper level.\n",
878                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
879
880         priv->stats.rx_bytes += skb->len;
881         priv->stats.rx_packets++;
882
883         skb->dev = dev;
884         netif_rx(skb);
885
886         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
887                  atomic_read(&priv->buckets_out)));
888
889         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
890                 mpt_lan_wake_post_buckets_task(dev, 1);
891
892         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
893                   "remaining, %d received back since sod\n",
894                   atomic_read(&priv->buckets_out), priv->total_received));
895
896         return 0;
897 }
898
899 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
900 //static inline int
901 static int
902 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
903 {
904         struct mpt_lan_priv *priv = dev->priv;
905         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
906         struct sk_buff *skb, *old_skb;
907         unsigned long flags;
908         u32 ctx, len;
909
910         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
911         skb = priv->RcvCtl[ctx].skb;
912
913         len = GET_LAN_PACKET_LENGTH(tmsg);
914
915         if (len < MPT_LAN_RX_COPYBREAK) {
916                 old_skb = skb;
917
918                 skb = (struct sk_buff *)dev_alloc_skb(len);
919                 if (!skb) {
920                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
921                                         IOC_AND_NETDEV_NAMES_s_s(dev),
922                                         __FILE__, __LINE__);
923                         return -ENOMEM;
924                 }
925
926                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
927                                             priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
928
929                 memcpy(skb_put(skb, len), old_skb->data, len);
930
931                 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
932                                                priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
933                 goto out;
934         }
935
936         skb_put(skb, len);
937
938         priv->RcvCtl[ctx].skb = NULL;
939
940         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
941                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
942
943 out:
944         spin_lock_irqsave(&priv->rxfidx_lock, flags);
945         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
946         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
947
948         atomic_dec(&priv->buckets_out);
949         priv->total_received++;
950
951         return mpt_lan_receive_skb(dev, skb);
952 }
953
954 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
955 static int
956 mpt_lan_receive_post_free(struct net_device *dev,
957                           LANReceivePostReply_t *pRecvRep)
958 {
959         struct mpt_lan_priv *priv = dev->priv;
960         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
961         unsigned long flags;
962         struct sk_buff *skb;
963         u32 ctx;
964         int count;
965         int i;
966
967         count = pRecvRep->NumberOfContexts;
968
969 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
970                   "IOC returned %d buckets, freeing them...\n", count));
971
972         spin_lock_irqsave(&priv->rxfidx_lock, flags);
973         for (i = 0; i < count; i++) {
974                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
975
976                 skb = priv->RcvCtl[ctx].skb;
977
978 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
979 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
980 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
981 //                              priv, &(priv->buckets_out)));
982 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
983
984                 priv->RcvCtl[ctx].skb = NULL;
985                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
986                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
987                 dev_kfree_skb_any(skb);
988
989                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
990         }
991         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
992
993         atomic_sub(count, &priv->buckets_out);
994
995 //      for (i = 0; i < priv->max_buckets_out; i++)
996 //              if (priv->RcvCtl[i].skb != NULL)
997 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
998 //                                "is still out\n", i));
999
1000 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1001                   count));
1002 */
1003 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1004 /**/              "remaining, %d received back since sod.\n",
1005 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1006         return 0;
1007 }
1008
1009 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1010 static int
1011 mpt_lan_receive_post_reply(struct net_device *dev,
1012                            LANReceivePostReply_t *pRecvRep)
1013 {
1014         struct mpt_lan_priv *priv = dev->priv;
1015         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1016         struct sk_buff *skb, *old_skb;
1017         unsigned long flags;
1018         u32 len, ctx, offset;
1019         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1020         int count;
1021         int i, l;
1022
1023         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1024         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1025                  le16_to_cpu(pRecvRep->IOCStatus)));
1026
1027         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1028                                                 MPI_IOCSTATUS_LAN_CANCELED)
1029                 return mpt_lan_receive_post_free(dev, pRecvRep);
1030
1031         len = le32_to_cpu(pRecvRep->PacketLength);
1032         if (len == 0) {
1033                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1034                         "ReceivePostReply w/ PacketLength zero!\n",
1035                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1036                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1037                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1038                 return -1;
1039         }
1040
1041         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1042         count  = pRecvRep->NumberOfContexts;
1043         skb    = priv->RcvCtl[ctx].skb;
1044
1045         offset = le32_to_cpu(pRecvRep->PacketOffset);
1046 //      if (offset != 0) {
1047 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1048 //                      "w/ PacketOffset %u\n",
1049 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1050 //                              offset);
1051 //      }
1052
1053         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1054                         IOC_AND_NETDEV_NAMES_s_s(dev),
1055                         offset, len));
1056
1057         if (count > 1) {
1058                 int szrem = len;
1059
1060 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1061 //                      "for single packet, concatenating...\n",
1062 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1063
1064                 skb = (struct sk_buff *)dev_alloc_skb(len);
1065                 if (!skb) {
1066                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1067                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1068                                         __FILE__, __LINE__);
1069                         return -ENOMEM;
1070                 }
1071
1072                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1073                 for (i = 0; i < count; i++) {
1074
1075                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1076                         old_skb = priv->RcvCtl[ctx].skb;
1077
1078                         l = priv->RcvCtl[ctx].len;
1079                         if (szrem < l)
1080                                 l = szrem;
1081
1082 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1083 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1084 //                                      i, l));
1085
1086                         pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1087                                                     priv->RcvCtl[ctx].dma,
1088                                                     priv->RcvCtl[ctx].len,
1089                                                     PCI_DMA_FROMDEVICE);
1090                         memcpy(skb_put(skb, l), old_skb->data, l);
1091
1092                         pci_dma_sync_single_for_device(mpt_dev->pcidev,
1093                                                        priv->RcvCtl[ctx].dma,
1094                                                        priv->RcvCtl[ctx].len,
1095                                                        PCI_DMA_FROMDEVICE);
1096
1097                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1098                         szrem -= l;
1099                 }
1100                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1101
1102         } else if (len < MPT_LAN_RX_COPYBREAK) {
1103
1104                 old_skb = skb;
1105
1106                 skb = (struct sk_buff *)dev_alloc_skb(len);
1107                 if (!skb) {
1108                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1109                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1110                                         __FILE__, __LINE__);
1111                         return -ENOMEM;
1112                 }
1113
1114                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1115                                             priv->RcvCtl[ctx].dma,
1116                                             priv->RcvCtl[ctx].len,
1117                                             PCI_DMA_FROMDEVICE);
1118
1119                 memcpy(skb_put(skb, len), old_skb->data, len);
1120
1121                 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1122                                                priv->RcvCtl[ctx].dma,
1123                                                priv->RcvCtl[ctx].len,
1124                                                PCI_DMA_FROMDEVICE);
1125
1126                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1127                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1128                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1129
1130         } else {
1131                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1132
1133                 priv->RcvCtl[ctx].skb = NULL;
1134
1135                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1136                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1137                 priv->RcvCtl[ctx].dma = 0;
1138
1139                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1140                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1141
1142                 skb_put(skb,len);
1143         }
1144
1145         atomic_sub(count, &priv->buckets_out);
1146         priv->total_received += count;
1147
1148         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1149                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1150                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1151                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1152                                 priv->mpt_rxfidx_tail,
1153                                 MPT_LAN_MAX_BUCKETS_OUT);
1154
1155                 panic("Damn it Jim! I'm a doctor, not a programmer! "
1156                                 "Oh, wait a sec, I am a programmer. "
1157                                 "And, who's Jim?!?!\n"
1158                                 "Arrgghh! We've done it again!\n");
1159         }
1160
1161         if (remaining == 0)
1162                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1163                         "(priv->buckets_out = %d)\n",
1164                         IOC_AND_NETDEV_NAMES_s_s(dev),
1165                         atomic_read(&priv->buckets_out));
1166         else if (remaining < 10)
1167                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1168                         "(priv->buckets_out = %d)\n",
1169                         IOC_AND_NETDEV_NAMES_s_s(dev),
1170                         remaining, atomic_read(&priv->buckets_out));
1171         
1172         if ((remaining < priv->bucketthresh) &&
1173             ((atomic_read(&priv->buckets_out) - remaining) >
1174              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1175                 
1176                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1177                         "buckets_out count and fw's BucketsRemaining "
1178                         "count has crossed the threshold, issuing a "
1179                         "LanReset to clear the fw's hashtable. You may "
1180                         "want to check your /var/log/messages for \"CRC "
1181                         "error\" event notifications.\n");
1182                 
1183                 mpt_lan_reset(dev);
1184                 mpt_lan_wake_post_buckets_task(dev, 0);
1185         }
1186         
1187         return mpt_lan_receive_skb(dev, skb);
1188 }
1189
1190 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1191 /* Simple SGE's only at the moment */
1192
1193 static void
1194 mpt_lan_post_receive_buckets(void *dev_id)
1195 {
1196         struct net_device *dev = dev_id;
1197         struct mpt_lan_priv *priv = dev->priv;
1198         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1199         MPT_FRAME_HDR *mf;
1200         LANReceivePostRequest_t *pRecvReq;
1201         SGETransaction32_t *pTrans;
1202         SGESimple64_t *pSimple;
1203         struct sk_buff *skb;
1204         dma_addr_t dma;
1205         u32 curr, buckets, count, max;
1206         u32 len = (dev->mtu + dev->hard_header_len + 4);
1207         unsigned long flags;
1208         int i;
1209
1210         curr = atomic_read(&priv->buckets_out);
1211         buckets = (priv->max_buckets_out - curr);
1212
1213         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1214                         IOC_AND_NETDEV_NAMES_s_s(dev),
1215                         __FUNCTION__, buckets, curr));
1216
1217         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1218                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1219
1220         while (buckets) {
1221                 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1222                 if (mf == NULL) {
1223                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1224                                 __FUNCTION__);
1225                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1226                                  __FUNCTION__, buckets));
1227                         goto out;
1228                 }
1229                 pRecvReq = (LANReceivePostRequest_t *) mf;
1230
1231                 count = buckets;
1232                 if (count > max)
1233                         count = max;
1234
1235                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1236                 pRecvReq->ChainOffset = 0;
1237                 pRecvReq->MsgFlags    = 0;
1238                 pRecvReq->PortNumber  = priv->pnum;
1239
1240                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1241                 pSimple = NULL;
1242
1243                 for (i = 0; i < count; i++) {
1244                         int ctx;
1245
1246                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1247                         if (priv->mpt_rxfidx_tail < 0) {
1248                                 printk (KERN_ERR "%s: Can't alloc context\n",
1249                                         __FUNCTION__);
1250                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1251                                                        flags);
1252                                 break;
1253                         }
1254
1255                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1256
1257                         skb = priv->RcvCtl[ctx].skb;
1258                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1259                                 pci_unmap_single(mpt_dev->pcidev,
1260                                                  priv->RcvCtl[ctx].dma,
1261                                                  priv->RcvCtl[ctx].len,
1262                                                  PCI_DMA_FROMDEVICE);
1263                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1264                                 skb = priv->RcvCtl[ctx].skb = NULL;
1265                         }
1266
1267                         if (skb == NULL) {
1268                                 skb = dev_alloc_skb(len);
1269                                 if (skb == NULL) {
1270                                         printk (KERN_WARNING
1271                                                 MYNAM "/%s: Can't alloc skb\n",
1272                                                 __FUNCTION__);
1273                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1274                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1275                                         break;
1276                                 }
1277
1278                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1279                                                      len, PCI_DMA_FROMDEVICE);
1280
1281                                 priv->RcvCtl[ctx].skb = skb;
1282                                 priv->RcvCtl[ctx].dma = dma;
1283                                 priv->RcvCtl[ctx].len = len;
1284                         }
1285
1286                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1287
1288                         pTrans->ContextSize   = sizeof(u32);
1289                         pTrans->DetailsLength = 0;
1290                         pTrans->Flags         = 0;
1291                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1292
1293                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1294
1295                         pSimple->FlagsLength = cpu_to_le32(
1296                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1297                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1298                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1299                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1300                         if (sizeof(dma_addr_t) > sizeof(u32))
1301                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1302                         else
1303                                 pSimple->Address.High = 0;
1304
1305                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1306                 }
1307
1308                 if (pSimple == NULL) {
1309 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1310 /**/                            __FUNCTION__);
1311                         mpt_free_msg_frame(mpt_dev, mf);
1312                         goto out;
1313                 }
1314
1315                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1316
1317                 pRecvReq->BucketCount = cpu_to_le32(i);
1318
1319 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1320  *      for (i = 0; i < j + 2; i ++)
1321  *          printk (" %08x", le32_to_cpu(msg[i]));
1322  *      printk ("\n");
1323  */
1324
1325                 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1326
1327                 priv->total_posted += i;
1328                 buckets -= i;
1329                 atomic_add(i, &priv->buckets_out);
1330         }
1331
1332 out:
1333         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1334                   __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1335         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1336         __FUNCTION__, priv->total_posted, priv->total_received));
1337
1338         clear_bit(0, &priv->post_buckets_active);
1339 }
1340
1341 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1342 static struct net_device *
1343 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1344 {
1345         struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1346         struct mpt_lan_priv *priv = NULL;
1347         u8 HWaddr[FC_ALEN], *a;
1348
1349         if (!dev)
1350                 return NULL;
1351
1352         dev->mtu = MPT_LAN_MTU;
1353
1354         priv = netdev_priv(dev);
1355
1356         priv->mpt_dev = mpt_dev;
1357         priv->pnum = pnum;
1358
1359         memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
1360         INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1361         priv->post_buckets_active = 0;
1362
1363         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1364                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1365
1366         atomic_set(&priv->buckets_out, 0);
1367         priv->total_posted = 0;
1368         priv->total_received = 0;
1369         priv->max_buckets_out = max_buckets_out;
1370         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1371                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1372
1373         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1374                         __LINE__,
1375                         mpt_dev->pfacts[0].MaxLanBuckets,
1376                         max_buckets_out,
1377                         priv->max_buckets_out));
1378
1379         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1380         spin_lock_init(&priv->txfidx_lock);
1381         spin_lock_init(&priv->rxfidx_lock);
1382
1383         memset(&priv->stats, 0, sizeof(priv->stats));
1384
1385         /*  Grab pre-fetched LANPage1 stuff. :-) */
1386         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1387
1388         HWaddr[0] = a[5];
1389         HWaddr[1] = a[4];
1390         HWaddr[2] = a[3];
1391         HWaddr[3] = a[2];
1392         HWaddr[4] = a[1];
1393         HWaddr[5] = a[0];
1394
1395         dev->addr_len = FC_ALEN;
1396         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1397         memset(dev->broadcast, 0xff, FC_ALEN);
1398
1399         /* The Tx queue is 127 deep on the 909.
1400          * Give ourselves some breathing room.
1401          */
1402         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1403                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1404
1405         dev->open = mpt_lan_open;
1406         dev->stop = mpt_lan_close;
1407         dev->get_stats = mpt_lan_get_stats;
1408         dev->set_multicast_list = NULL;
1409         dev->change_mtu = mpt_lan_change_mtu;
1410         dev->hard_start_xmit = mpt_lan_sdu_send;
1411
1412 /* Not in 2.3.42. Need 2.3.45+ */
1413         dev->tx_timeout = mpt_lan_tx_timeout;
1414         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1415
1416         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1417                 "and setting initial values\n"));
1418
1419         SET_MODULE_OWNER(dev);
1420
1421         if (register_netdev(dev) != 0) {
1422                 free_netdev(dev);
1423                 dev = NULL;
1424         }
1425         return dev;
1426 }
1427
1428 static int
1429 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1430 {
1431         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1432         struct net_device       *dev;
1433         int                     i;
1434
1435         for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1436                 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1437                        "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1438                        ioc->name, ioc->pfacts[i].PortNumber,
1439                        ioc->pfacts[i].ProtocolFlags,
1440                        MPT_PROTOCOL_FLAGS_c_c_c_c(
1441                                ioc->pfacts[i].ProtocolFlags));
1442
1443                 if (!(ioc->pfacts[i].ProtocolFlags &
1444                                         MPI_PORTFACTS_PROTOCOL_LAN)) {
1445                         printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1446                                "seems to be disabled on this adapter port!\n",
1447                                ioc->name);
1448                         continue;
1449                 }
1450
1451                 dev = mpt_register_lan_device(ioc, i);
1452                 if (!dev) {
1453                         printk(KERN_ERR MYNAM ": %s: Unable to register "
1454                                "port%d as a LAN device\n", ioc->name,
1455                                ioc->pfacts[i].PortNumber);
1456                         continue;
1457                 }
1458                 
1459                 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1460                        "registered as '%s'\n", ioc->name, dev->name);
1461                 printk(KERN_INFO MYNAM ": %s/%s: "
1462                        "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1463                        IOC_AND_NETDEV_NAMES_s_s(dev),
1464                        dev->dev_addr[0], dev->dev_addr[1],
1465                        dev->dev_addr[2], dev->dev_addr[3],
1466                        dev->dev_addr[4], dev->dev_addr[5]);
1467         
1468                 ioc->netdev = dev;
1469
1470                 return 0;
1471         }
1472
1473         return -ENODEV;
1474 }
1475
1476 static void
1477 mptlan_remove(struct pci_dev *pdev)
1478 {
1479         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1480         struct net_device       *dev = ioc->netdev;
1481
1482         if(dev != NULL) {
1483                 unregister_netdev(dev);
1484                 free_netdev(dev);
1485         }
1486 }
1487
1488 static struct mpt_pci_driver mptlan_driver = {
1489         .probe          = mptlan_probe,
1490         .remove         = mptlan_remove,
1491 };
1492
1493 static int __init mpt_lan_init (void)
1494 {
1495         show_mptmod_ver(LANAME, LANVER);
1496
1497         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1498                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1499                 return -EBUSY;
1500         }
1501
1502         /* Set the callback index to be used by driver core for turbo replies */
1503         mpt_lan_index = LanCtx;
1504
1505         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1506
1507         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1508                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1509                        "handler with mptbase! The world is at an end! "
1510                        "Everything is fading to black! Goodbye.\n");
1511                 return -EBUSY;
1512         }
1513
1514         dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1515         
1516         if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
1517                 dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
1518         return 0;
1519 }
1520
1521 static void __exit mpt_lan_exit(void)
1522 {
1523         mpt_device_driver_deregister(MPTLAN_DRIVER);
1524         mpt_reset_deregister(LanCtx);
1525
1526         if (LanCtx >= 0) {
1527                 mpt_deregister(LanCtx);
1528                 LanCtx = -1;
1529                 mpt_lan_index = 0;
1530         }
1531 }
1532
1533 module_init(mpt_lan_init);
1534 module_exit(mpt_lan_exit);
1535
1536 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1537 static unsigned short
1538 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1539 {
1540         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1541         struct fcllc *fcllc;
1542
1543         skb->mac.raw = skb->data;
1544         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1545
1546         if (fch->dtype == htons(0xffff)) {
1547                 u32 *p = (u32 *) fch;
1548
1549                 swab32s(p + 0);
1550                 swab32s(p + 1);
1551                 swab32s(p + 2);
1552                 swab32s(p + 3);
1553
1554                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1555                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1556                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1557                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1558                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1559         }
1560
1561         if (*fch->daddr & 1) {
1562                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1563                         skb->pkt_type = PACKET_BROADCAST;
1564                 } else {
1565                         skb->pkt_type = PACKET_MULTICAST;
1566                 }
1567         } else {
1568                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1569                         skb->pkt_type = PACKET_OTHERHOST;
1570                 } else {
1571                         skb->pkt_type = PACKET_HOST;
1572                 }
1573         }
1574
1575         fcllc = (struct fcllc *)skb->data;
1576
1577 #ifdef QLOGIC_NAA_WORKAROUND
1578 {
1579         u16 source_naa = fch->stype, found = 0;
1580
1581         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1582            value. */
1583
1584         if ((source_naa & 0xF000) == 0)
1585                 source_naa = swab16(source_naa);
1586
1587         if (fcllc->ethertype == htons(ETH_P_ARP))
1588             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1589                       "%04x.\n", source_naa));
1590
1591         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1592            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1593                 struct NAA_Hosed *nh, *prevnh;
1594                 int i;
1595
1596                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1597                           "system with non-RFC 2625 NAA value (%04x).\n",
1598                           source_naa));
1599
1600                 write_lock_irq(&bad_naa_lock);
1601                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1602                      prevnh=nh, nh=nh->next) {
1603                         if ((nh->ieee[0] == fch->saddr[0]) &&
1604                             (nh->ieee[1] == fch->saddr[1]) &&
1605                             (nh->ieee[2] == fch->saddr[2]) &&
1606                             (nh->ieee[3] == fch->saddr[3]) &&
1607                             (nh->ieee[4] == fch->saddr[4]) &&
1608                             (nh->ieee[5] == fch->saddr[5])) {
1609                                 found = 1;
1610                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1611                                          "q/Rep w/ bad NAA from system already"
1612                                          " in DB.\n"));
1613                                 break;
1614                         }
1615                 }
1616
1617                 if ((!found) && (nh == NULL)) {
1618
1619                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1620                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1621                                  " bad NAA from system not yet in DB.\n"));
1622
1623                         if (nh != NULL) {
1624                                 nh->next = NULL;
1625                                 if (!mpt_bad_naa)
1626                                         mpt_bad_naa = nh;
1627                                 if (prevnh)
1628                                         prevnh->next = nh;
1629
1630                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1631                                 for (i = 0; i < FC_ALEN; i++)
1632                                         nh->ieee[i] = fch->saddr[i];
1633                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1634                                           "%02x:%02x with non-compliant S_NAA value.\n",
1635                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1636                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1637                         } else {
1638                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1639                                         " kmalloc a NAA_Hosed struct.\n");
1640                         }
1641                 } else if (!found) {
1642                         printk (KERN_ERR "mptlan/type_trans: found not"
1643                                 " set, but nh isn't null. Evil "
1644                                 "funkiness abounds.\n");
1645                 }
1646                 write_unlock_irq(&bad_naa_lock);
1647         }
1648 }
1649 #endif
1650
1651         /* Strip the SNAP header from ARP packets since we don't
1652          * pass them through to the 802.2/SNAP layers.
1653          */
1654         if (fcllc->dsap == EXTENDED_SAP &&
1655                 (fcllc->ethertype == htons(ETH_P_IP) ||
1656                  fcllc->ethertype == htons(ETH_P_ARP))) {
1657                 skb_pull(skb, sizeof(struct fcllc));
1658                 return fcllc->ethertype;
1659         }
1660
1661         return htons(ETH_P_802_2);
1662 }
1663
1664 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/