Merge with /pub/scm/linux/kernel/git/sfrench/cifs-2.6.git/
[linux-2.6] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Logic Fibre Channel PCI chip/adapters
5  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2005 LSI Logic Corporation
8  *
9  */
10 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11 /*
12     This program is free software; you can redistribute it and/or modify
13     it under the terms of the GNU General Public License as published by
14     the Free Software Foundation; version 2 of the License.
15
16     This program is distributed in the hope that it will be useful,
17     but WITHOUT ANY WARRANTY; without even the implied warranty of
18     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19     GNU General Public License for more details.
20
21     NO WARRANTY
22     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26     solely responsible for determining the appropriateness of using and
27     distributing the Program and assumes all risks associated with its
28     exercise of rights under this Agreement, including but not limited to
29     the risks and costs of program errors, damage to or loss of data,
30     programs or equipment, and unavailability or interruption of operations.
31
32     DISCLAIMER OF LIABILITY
33     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40
41     You should have received a copy of the GNU General Public License
42     along with this program; if not, write to the Free Software
43     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
44 */
45
46 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
47 /*
48  * Define statements used for debugging
49  */
50 //#define MPT_LAN_IO_DEBUG
51
52 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
53
54 #include "mptlan.h"
55 #include <linux/init.h>
56 #include <linux/module.h>
57 #include <linux/fs.h>
58
59 #define MYNAM           "mptlan"
60
61 MODULE_LICENSE("GPL");
62
63 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
64 /*
65  * MPT LAN message sizes without variable part.
66  */
67 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
68         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
69
70 #define MPT_LAN_TRANSACTION32_SIZE \
71         (sizeof(SGETransaction32_t) - sizeof(u32))
72
73 /*
74  *  Fusion MPT LAN private structures
75  */
76
77 struct NAA_Hosed {
78         u16 NAA;
79         u8 ieee[FC_ALEN];
80         struct NAA_Hosed *next;
81 };
82
83 struct BufferControl {
84         struct sk_buff  *skb;
85         dma_addr_t      dma;
86         unsigned int    len;
87 };
88
89 struct mpt_lan_priv {
90         MPT_ADAPTER *mpt_dev;
91         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
92
93         atomic_t buckets_out;           /* number of unused buckets on IOC */
94         int bucketthresh;               /* Send more when this many left */
95
96         int *mpt_txfidx; /* Free Tx Context list */
97         int mpt_txfidx_tail;
98         spinlock_t txfidx_lock;
99
100         int *mpt_rxfidx; /* Free Rx Context list */
101         int mpt_rxfidx_tail;
102         spinlock_t rxfidx_lock;
103
104         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
105         struct BufferControl *SendCtl;  /* Send BufferControl structs */
106
107         int max_buckets_out;            /* Max buckets to send to IOC */
108         int tx_max_out;                 /* IOC's Tx queue len */
109
110         u32 total_posted;
111         u32 total_received;
112         struct net_device_stats stats;  /* Per device statistics */
113
114         struct work_struct post_buckets_task;
115         unsigned long post_buckets_active;
116 };
117
118 struct mpt_lan_ohdr {
119         u16     dtype;
120         u8      daddr[FC_ALEN];
121         u16     stype;
122         u8      saddr[FC_ALEN];
123 };
124
125 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
126
127 /*
128  *  Forward protos...
129  */
130 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
131                        MPT_FRAME_HDR *reply);
132 static int  mpt_lan_open(struct net_device *dev);
133 static int  mpt_lan_reset(struct net_device *dev);
134 static int  mpt_lan_close(struct net_device *dev);
135 static void mpt_lan_post_receive_buckets(void *dev_id);
136 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
137                                            int priority);
138 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
139 static int  mpt_lan_receive_post_reply(struct net_device *dev,
140                                        LANReceivePostReply_t *pRecvRep);
141 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
142 static int  mpt_lan_send_reply(struct net_device *dev,
143                                LANSendReply_t *pSendRep);
144 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
145 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
146 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
147                                          struct net_device *dev);
148
149 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
150 /*
151  *  Fusion MPT LAN private data
152  */
153 static int LanCtx = -1;
154
155 static u32 max_buckets_out = 127;
156 static u32 tx_max_out_p = 127 - 16;
157
158 #ifdef QLOGIC_NAA_WORKAROUND
159 static struct NAA_Hosed *mpt_bad_naa = NULL;
160 DEFINE_RWLOCK(bad_naa_lock);
161 #endif
162
163 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
164 /*
165  * Fusion MPT LAN external data
166  */
167 extern int mpt_lan_index;
168
169 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
170 /**
171  *      lan_reply - Handle all data sent from the hardware.
172  *      @ioc: Pointer to MPT_ADAPTER structure
173  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
174  *      @reply: Pointer to MPT reply frame
175  *
176  *      Returns 1 indicating original alloc'd request frame ptr
177  *      should be freed, or 0 if it shouldn't.
178  */
179 static int
180 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
181 {
182         struct net_device *dev = ioc->netdev;
183         int FreeReqFrame = 0;
184
185         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
186                   IOC_AND_NETDEV_NAMES_s_s(dev)));
187
188 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
189 //                      mf, reply));
190
191         if (mf == NULL) {
192                 u32 tmsg = CAST_PTR_TO_U32(reply);
193
194                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
195                                 IOC_AND_NETDEV_NAMES_s_s(dev),
196                                 tmsg));
197
198                 switch (GET_LAN_FORM(tmsg)) {
199
200                 // NOTE!  (Optimization) First case here is now caught in
201                 //  mptbase.c::mpt_interrupt() routine and callcack here
202                 //  is now skipped for this case!
203 #if 0
204                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
205 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
206 //                                "MessageContext turbo reply received\n"));
207                         FreeReqFrame = 1;
208                         break;
209 #endif
210
211                 case LAN_REPLY_FORM_SEND_SINGLE:
212 //                      dioprintk((MYNAM "/lan_reply: "
213 //                                "calling mpt_lan_send_reply (turbo)\n"));
214
215                         // Potential BUG here?
216                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
217                         //  If/when mpt_lan_send_turbo would return 1 here,
218                         //  calling routine (mptbase.c|mpt_interrupt)
219                         //  would Oops because mf has already been set
220                         //  to NULL.  So after return from this func,
221                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
222                         //  item back onto its adapter FreeQ - Oops!:-(
223                         //  It's Ok, since mpt_lan_send_turbo() *currently*
224                         //  always returns 0, but..., just in case:
225
226                         (void) mpt_lan_send_turbo(dev, tmsg);
227                         FreeReqFrame = 0;
228
229                         break;
230
231                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
232 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
233 //                                "rcv-Turbo = %08x\n", tmsg));
234                         mpt_lan_receive_post_turbo(dev, tmsg);
235                         break;
236
237                 default:
238                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
239                                 "that I don't know what to do with\n");
240
241                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
242
243                         break;
244                 }
245
246                 return FreeReqFrame;
247         }
248
249 //      msg = (u32 *) reply;
250 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
251 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
252 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
253 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
254 //                reply->u.hdr.Function));
255
256         switch (reply->u.hdr.Function) {
257
258         case MPI_FUNCTION_LAN_SEND:
259         {
260                 LANSendReply_t *pSendRep;
261
262                 pSendRep = (LANSendReply_t *) reply;
263                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
264                 break;
265         }
266
267         case MPI_FUNCTION_LAN_RECEIVE:
268         {
269                 LANReceivePostReply_t *pRecvRep;
270
271                 pRecvRep = (LANReceivePostReply_t *) reply;
272                 if (pRecvRep->NumberOfContexts) {
273                         mpt_lan_receive_post_reply(dev, pRecvRep);
274                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
275                                 FreeReqFrame = 1;
276                 } else
277                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
278                                   "ReceivePostReply received.\n"));
279                 break;
280         }
281
282         case MPI_FUNCTION_LAN_RESET:
283                 /* Just a default reply. Might want to check it to
284                  * make sure that everything went ok.
285                  */
286                 FreeReqFrame = 1;
287                 break;
288
289         case MPI_FUNCTION_EVENT_NOTIFICATION:
290         case MPI_FUNCTION_EVENT_ACK:
291                 /*  _EVENT_NOTIFICATION should NOT come down this path any more.
292                  *  Should be routed to mpt_lan_event_process(), but just in case...
293                  */
294                 FreeReqFrame = 1;
295                 break;
296
297         default:
298                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
299                         "reply that I don't know what to do with\n");
300
301                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
302                 FreeReqFrame = 1;
303
304                 break;
305         }
306
307         return FreeReqFrame;
308 }
309
310 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
311 static int
312 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
313 {
314         struct net_device *dev = ioc->netdev;
315         struct mpt_lan_priv *priv;
316
317         if (dev == NULL)
318                 return(1);
319         else
320                 priv = netdev_priv(dev);
321
322         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
323                         reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
324                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
325
326         if (priv->mpt_rxfidx == NULL)
327                 return (1);
328
329         if (reset_phase == MPT_IOC_SETUP_RESET) {
330                 ;
331         } else if (reset_phase == MPT_IOC_PRE_RESET) {
332                 int i;
333                 unsigned long flags;
334
335                 netif_stop_queue(dev);
336
337                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
338
339                 atomic_set(&priv->buckets_out, 0);
340
341                 /* Reset Rx Free Tail index and re-populate the queue. */
342                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
343                 priv->mpt_rxfidx_tail = -1;
344                 for (i = 0; i < priv->max_buckets_out; i++)
345                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
346                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
347         } else {
348                 mpt_lan_post_receive_buckets(dev);
349                 netif_wake_queue(dev);
350         }
351
352         return 1;
353 }
354
355 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
356 static int
357 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
358 {
359         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
360
361         switch (le32_to_cpu(pEvReply->Event)) {
362         case MPI_EVENT_NONE:                            /* 00 */
363         case MPI_EVENT_LOG_DATA:                        /* 01 */
364         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
365         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
366         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
367         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
368         case MPI_EVENT_RESCAN:                          /* 06 */
369                 /* Ok, do we need to do anything here? As far as
370                    I can tell, this is when a new device gets added
371                    to the loop. */
372         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
373         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
374         case MPI_EVENT_LOGOUT:                          /* 09 */
375         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
376         default:
377                 break;
378         }
379
380         /*
381          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
382          *  Do NOT do it here now!
383          */
384
385         return 1;
386 }
387
388 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
389 static int
390 mpt_lan_open(struct net_device *dev)
391 {
392         struct mpt_lan_priv *priv = netdev_priv(dev);
393         int i;
394
395         if (mpt_lan_reset(dev) != 0) {
396                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
397
398                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
399
400                 if (mpt_dev->active)
401                         printk ("The ioc is active. Perhaps it needs to be"
402                                 " reset?\n");
403                 else
404                         printk ("The ioc in inactive, most likely in the "
405                                 "process of being reset. Please try again in "
406                                 "a moment.\n");
407         }
408
409         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
410         if (priv->mpt_txfidx == NULL)
411                 goto out;
412         priv->mpt_txfidx_tail = -1;
413
414         priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
415                                 GFP_KERNEL);
416         if (priv->SendCtl == NULL)
417                 goto out_mpt_txfidx;
418         for (i = 0; i < priv->tx_max_out; i++) {
419                 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
420                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
421         }
422
423         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
424
425         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
426                                    GFP_KERNEL);
427         if (priv->mpt_rxfidx == NULL)
428                 goto out_SendCtl;
429         priv->mpt_rxfidx_tail = -1;
430
431         priv->RcvCtl = kmalloc(priv->max_buckets_out *
432                                                 sizeof(struct BufferControl),
433                                GFP_KERNEL);
434         if (priv->RcvCtl == NULL)
435                 goto out_mpt_rxfidx;
436         for (i = 0; i < priv->max_buckets_out; i++) {
437                 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
438                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
439         }
440
441 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
442 /**/    for (i = 0; i < priv->tx_max_out; i++)
443 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
444 /**/    dlprintk(("\n"));
445
446         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
447
448         mpt_lan_post_receive_buckets(dev);
449         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
450                         IOC_AND_NETDEV_NAMES_s_s(dev));
451
452         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
453                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
454                         " Notifications. This is a bad thing! We're not going "
455                         "to go ahead, but I'd be leery of system stability at "
456                         "this point.\n");
457         }
458
459         netif_start_queue(dev);
460         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
461
462         return 0;
463 out_mpt_rxfidx:
464         kfree(priv->mpt_rxfidx);
465         priv->mpt_rxfidx = NULL;
466 out_SendCtl:
467         kfree(priv->SendCtl);
468         priv->SendCtl = NULL;
469 out_mpt_txfidx:
470         kfree(priv->mpt_txfidx);
471         priv->mpt_txfidx = NULL;
472 out:    return -ENOMEM;
473 }
474
475 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
476 /* Send a LanReset message to the FW. This should result in the FW returning
477    any buckets it still has. */
478 static int
479 mpt_lan_reset(struct net_device *dev)
480 {
481         MPT_FRAME_HDR *mf;
482         LANResetRequest_t *pResetReq;
483         struct mpt_lan_priv *priv = netdev_priv(dev);
484
485         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
486
487         if (mf == NULL) {
488 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
489                 "Unable to allocate a request frame.\n"));
490 */
491                 return -1;
492         }
493
494         pResetReq = (LANResetRequest_t *) mf;
495
496         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
497         pResetReq->ChainOffset  = 0;
498         pResetReq->Reserved     = 0;
499         pResetReq->PortNumber   = priv->pnum;
500         pResetReq->MsgFlags     = 0;
501         pResetReq->Reserved2    = 0;
502
503         mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
504
505         return 0;
506 }
507
508 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
509 static int
510 mpt_lan_close(struct net_device *dev)
511 {
512         struct mpt_lan_priv *priv = netdev_priv(dev);
513         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
514         unsigned int timeout;
515         int i;
516
517         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
518
519         mpt_event_deregister(LanCtx);
520
521         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
522                   "since driver was loaded, %d still out\n",
523                   priv->total_posted,atomic_read(&priv->buckets_out)));
524
525         netif_stop_queue(dev);
526
527         mpt_lan_reset(dev);
528
529         timeout = 2 * HZ;
530         while (atomic_read(&priv->buckets_out) && --timeout) {
531                 set_current_state(TASK_INTERRUPTIBLE);
532                 schedule_timeout(1);
533         }
534
535         for (i = 0; i < priv->max_buckets_out; i++) {
536                 if (priv->RcvCtl[i].skb != NULL) {
537 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
538 /**/                              "is still out\n", i));
539                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
540                                          priv->RcvCtl[i].len,
541                                          PCI_DMA_FROMDEVICE);
542                         dev_kfree_skb(priv->RcvCtl[i].skb);
543                 }
544         }
545
546         kfree(priv->RcvCtl);
547         kfree(priv->mpt_rxfidx);
548
549         for (i = 0; i < priv->tx_max_out; i++) {
550                 if (priv->SendCtl[i].skb != NULL) {
551                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
552                                          priv->SendCtl[i].len,
553                                          PCI_DMA_TODEVICE);
554                         dev_kfree_skb(priv->SendCtl[i].skb);
555                 }
556         }
557
558         kfree(priv->SendCtl);
559         kfree(priv->mpt_txfidx);
560
561         atomic_set(&priv->buckets_out, 0);
562
563         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
564                         IOC_AND_NETDEV_NAMES_s_s(dev));
565
566         return 0;
567 }
568
569 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
570 static struct net_device_stats *
571 mpt_lan_get_stats(struct net_device *dev)
572 {
573         struct mpt_lan_priv *priv = netdev_priv(dev);
574
575         return (struct net_device_stats *) &priv->stats;
576 }
577
578 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
579 static int
580 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
581 {
582         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
583                 return -EINVAL;
584         dev->mtu = new_mtu;
585         return 0;
586 }
587
588 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
589 /* Tx timeout handler. */
590 static void
591 mpt_lan_tx_timeout(struct net_device *dev)
592 {
593         struct mpt_lan_priv *priv = netdev_priv(dev);
594         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
595
596         if (mpt_dev->active) {
597                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
598                 netif_wake_queue(dev);
599         }
600 }
601
602 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
603 //static inline int
604 static int
605 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
606 {
607         struct mpt_lan_priv *priv = netdev_priv(dev);
608         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
609         struct sk_buff *sent;
610         unsigned long flags;
611         u32 ctx;
612
613         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
614         sent = priv->SendCtl[ctx].skb;
615
616         priv->stats.tx_packets++;
617         priv->stats.tx_bytes += sent->len;
618
619         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
620                         IOC_AND_NETDEV_NAMES_s_s(dev),
621                         __FUNCTION__, sent));
622
623         priv->SendCtl[ctx].skb = NULL;
624         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
625                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
626         dev_kfree_skb_irq(sent);
627
628         spin_lock_irqsave(&priv->txfidx_lock, flags);
629         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
630         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
631
632         netif_wake_queue(dev);
633         return 0;
634 }
635
636 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
637 static int
638 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
639 {
640         struct mpt_lan_priv *priv = netdev_priv(dev);
641         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
642         struct sk_buff *sent;
643         unsigned long flags;
644         int FreeReqFrame = 0;
645         u32 *pContext;
646         u32 ctx;
647         u8 count;
648
649         count = pSendRep->NumberOfContexts;
650
651         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
652                  le16_to_cpu(pSendRep->IOCStatus)));
653
654         /* Add check for Loginfo Flag in IOCStatus */
655
656         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
657         case MPI_IOCSTATUS_SUCCESS:
658                 priv->stats.tx_packets += count;
659                 break;
660
661         case MPI_IOCSTATUS_LAN_CANCELED:
662         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
663                 break;
664
665         case MPI_IOCSTATUS_INVALID_SGL:
666                 priv->stats.tx_errors += count;
667                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
668                                 IOC_AND_NETDEV_NAMES_s_s(dev));
669                 goto out;
670
671         default:
672                 priv->stats.tx_errors += count;
673                 break;
674         }
675
676         pContext = &pSendRep->BufferContext;
677
678         spin_lock_irqsave(&priv->txfidx_lock, flags);
679         while (count > 0) {
680                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
681
682                 sent = priv->SendCtl[ctx].skb;
683                 priv->stats.tx_bytes += sent->len;
684
685                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
686                                 IOC_AND_NETDEV_NAMES_s_s(dev),
687                                 __FUNCTION__, sent));
688
689                 priv->SendCtl[ctx].skb = NULL;
690                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
691                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
692                 dev_kfree_skb_irq(sent);
693
694                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
695
696                 pContext++;
697                 count--;
698         }
699         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
700
701 out:
702         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
703                 FreeReqFrame = 1;
704
705         netif_wake_queue(dev);
706         return FreeReqFrame;
707 }
708
709 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
710 static int
711 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
712 {
713         struct mpt_lan_priv *priv = netdev_priv(dev);
714         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
715         MPT_FRAME_HDR *mf;
716         LANSendRequest_t *pSendReq;
717         SGETransaction32_t *pTrans;
718         SGESimple64_t *pSimple;
719         dma_addr_t dma;
720         unsigned long flags;
721         int ctx;
722         u16 cur_naa = 0x1000;
723
724         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
725                         __FUNCTION__, skb));
726
727         spin_lock_irqsave(&priv->txfidx_lock, flags);
728         if (priv->mpt_txfidx_tail < 0) {
729                 netif_stop_queue(dev);
730                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
731
732                 printk (KERN_ERR "%s: no tx context available: %u\n",
733                         __FUNCTION__, priv->mpt_txfidx_tail);
734                 return 1;
735         }
736
737         mf = mpt_get_msg_frame(LanCtx, mpt_dev);
738         if (mf == NULL) {
739                 netif_stop_queue(dev);
740                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
741
742                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
743                         __FUNCTION__);
744                 return 1;
745         }
746
747         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
748         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
749
750 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
751 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
752
753         pSendReq = (LANSendRequest_t *) mf;
754
755         /* Set the mac.raw pointer, since this apparently isn't getting
756          * done before we get the skb. Pull the data pointer past the mac data.
757          */
758         skb->mac.raw = skb->data;
759         skb_pull(skb, 12);
760
761         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
762                              PCI_DMA_TODEVICE);
763
764         priv->SendCtl[ctx].skb = skb;
765         priv->SendCtl[ctx].dma = dma;
766         priv->SendCtl[ctx].len = skb->len;
767
768         /* Message Header */
769         pSendReq->Reserved    = 0;
770         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
771         pSendReq->ChainOffset = 0;
772         pSendReq->Reserved2   = 0;
773         pSendReq->MsgFlags    = 0;
774         pSendReq->PortNumber  = priv->pnum;
775
776         /* Transaction Context Element */
777         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
778
779         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
780         pTrans->ContextSize   = sizeof(u32);
781         pTrans->DetailsLength = 2 * sizeof(u32);
782         pTrans->Flags         = 0;
783         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
784
785 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
786 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
787 //                      ctx, skb, skb->data));
788
789 #ifdef QLOGIC_NAA_WORKAROUND
790 {
791         struct NAA_Hosed *nh;
792
793         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
794            RFC 2625. The longer I look at this, the more my opinion of Qlogic
795            drops. */
796         read_lock_irq(&bad_naa_lock);
797         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
798                 if ((nh->ieee[0] == skb->mac.raw[0]) &&
799                     (nh->ieee[1] == skb->mac.raw[1]) &&
800                     (nh->ieee[2] == skb->mac.raw[2]) &&
801                     (nh->ieee[3] == skb->mac.raw[3]) &&
802                     (nh->ieee[4] == skb->mac.raw[4]) &&
803                     (nh->ieee[5] == skb->mac.raw[5])) {
804                         cur_naa = nh->NAA;
805                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
806                                   "= %04x.\n", cur_naa));
807                         break;
808                 }
809         }
810         read_unlock_irq(&bad_naa_lock);
811 }
812 #endif
813
814         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
815                                                     (skb->mac.raw[0] <<  8) |
816                                                     (skb->mac.raw[1] <<  0));
817         pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
818                                                     (skb->mac.raw[3] << 16) |
819                                                     (skb->mac.raw[4] <<  8) |
820                                                     (skb->mac.raw[5] <<  0));
821
822         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
823
824         /* If we ever decide to send more than one Simple SGE per LANSend, then
825            we will need to make sure that LAST_ELEMENT only gets set on the
826            last one. Otherwise, bad voodoo and evil funkiness will commence. */
827         pSimple->FlagsLength = cpu_to_le32(
828                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
829                           MPI_SGE_FLAGS_END_OF_BUFFER |
830                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
831                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
832                           MPI_SGE_FLAGS_HOST_TO_IOC |
833                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
834                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
835                         skb->len);
836         pSimple->Address.Low = cpu_to_le32((u32) dma);
837         if (sizeof(dma_addr_t) > sizeof(u32))
838                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
839         else
840                 pSimple->Address.High = 0;
841
842         mpt_put_msg_frame (LanCtx, mpt_dev, mf);
843         dev->trans_start = jiffies;
844
845         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
846                         IOC_AND_NETDEV_NAMES_s_s(dev),
847                         le32_to_cpu(pSimple->FlagsLength)));
848
849         return 0;
850 }
851
852 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
853 static inline void
854 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
855 /*
856  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
857  */
858 {
859         struct mpt_lan_priv *priv = dev->priv;
860         
861         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
862                 if (priority) {
863                         schedule_work(&priv->post_buckets_task);
864                 } else {
865                         schedule_delayed_work(&priv->post_buckets_task, 1);
866                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
867                                    "timer.\n"));
868                 }
869                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
870                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
871         }
872 }
873
874 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
875 static inline int
876 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
877 {
878         struct mpt_lan_priv *priv = dev->priv;
879
880         skb->protocol = mpt_lan_type_trans(skb, dev);
881
882         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
883                  "delivered to upper level.\n",
884                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
885
886         priv->stats.rx_bytes += skb->len;
887         priv->stats.rx_packets++;
888
889         skb->dev = dev;
890         netif_rx(skb);
891
892         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
893                  atomic_read(&priv->buckets_out)));
894
895         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
896                 mpt_lan_wake_post_buckets_task(dev, 1);
897
898         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
899                   "remaining, %d received back since sod\n",
900                   atomic_read(&priv->buckets_out), priv->total_received));
901
902         return 0;
903 }
904
905 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
906 //static inline int
907 static int
908 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
909 {
910         struct mpt_lan_priv *priv = dev->priv;
911         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
912         struct sk_buff *skb, *old_skb;
913         unsigned long flags;
914         u32 ctx, len;
915
916         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
917         skb = priv->RcvCtl[ctx].skb;
918
919         len = GET_LAN_PACKET_LENGTH(tmsg);
920
921         if (len < MPT_LAN_RX_COPYBREAK) {
922                 old_skb = skb;
923
924                 skb = (struct sk_buff *)dev_alloc_skb(len);
925                 if (!skb) {
926                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
927                                         IOC_AND_NETDEV_NAMES_s_s(dev),
928                                         __FILE__, __LINE__);
929                         return -ENOMEM;
930                 }
931
932                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
933                                             priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
934
935                 memcpy(skb_put(skb, len), old_skb->data, len);
936
937                 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
938                                                priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
939                 goto out;
940         }
941
942         skb_put(skb, len);
943
944         priv->RcvCtl[ctx].skb = NULL;
945
946         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
947                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
948
949 out:
950         spin_lock_irqsave(&priv->rxfidx_lock, flags);
951         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
952         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
953
954         atomic_dec(&priv->buckets_out);
955         priv->total_received++;
956
957         return mpt_lan_receive_skb(dev, skb);
958 }
959
960 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
961 static int
962 mpt_lan_receive_post_free(struct net_device *dev,
963                           LANReceivePostReply_t *pRecvRep)
964 {
965         struct mpt_lan_priv *priv = dev->priv;
966         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
967         unsigned long flags;
968         struct sk_buff *skb;
969         u32 ctx;
970         int count;
971         int i;
972
973         count = pRecvRep->NumberOfContexts;
974
975 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
976                   "IOC returned %d buckets, freeing them...\n", count));
977
978         spin_lock_irqsave(&priv->rxfidx_lock, flags);
979         for (i = 0; i < count; i++) {
980                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
981
982                 skb = priv->RcvCtl[ctx].skb;
983
984 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
985 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
986 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
987 //                              priv, &(priv->buckets_out)));
988 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
989
990                 priv->RcvCtl[ctx].skb = NULL;
991                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
992                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
993                 dev_kfree_skb_any(skb);
994
995                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
996         }
997         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
998
999         atomic_sub(count, &priv->buckets_out);
1000
1001 //      for (i = 0; i < priv->max_buckets_out; i++)
1002 //              if (priv->RcvCtl[i].skb != NULL)
1003 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1004 //                                "is still out\n", i));
1005
1006 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1007                   count));
1008 */
1009 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1010 /**/              "remaining, %d received back since sod.\n",
1011 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1012         return 0;
1013 }
1014
1015 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1016 static int
1017 mpt_lan_receive_post_reply(struct net_device *dev,
1018                            LANReceivePostReply_t *pRecvRep)
1019 {
1020         struct mpt_lan_priv *priv = dev->priv;
1021         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1022         struct sk_buff *skb, *old_skb;
1023         unsigned long flags;
1024         u32 len, ctx, offset;
1025         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1026         int count;
1027         int i, l;
1028
1029         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1030         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1031                  le16_to_cpu(pRecvRep->IOCStatus)));
1032
1033         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1034                                                 MPI_IOCSTATUS_LAN_CANCELED)
1035                 return mpt_lan_receive_post_free(dev, pRecvRep);
1036
1037         len = le32_to_cpu(pRecvRep->PacketLength);
1038         if (len == 0) {
1039                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1040                         "ReceivePostReply w/ PacketLength zero!\n",
1041                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1042                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1043                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1044                 return -1;
1045         }
1046
1047         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1048         count  = pRecvRep->NumberOfContexts;
1049         skb    = priv->RcvCtl[ctx].skb;
1050
1051         offset = le32_to_cpu(pRecvRep->PacketOffset);
1052 //      if (offset != 0) {
1053 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1054 //                      "w/ PacketOffset %u\n",
1055 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1056 //                              offset);
1057 //      }
1058
1059         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1060                         IOC_AND_NETDEV_NAMES_s_s(dev),
1061                         offset, len));
1062
1063         if (count > 1) {
1064                 int szrem = len;
1065
1066 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1067 //                      "for single packet, concatenating...\n",
1068 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1069
1070                 skb = (struct sk_buff *)dev_alloc_skb(len);
1071                 if (!skb) {
1072                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1073                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1074                                         __FILE__, __LINE__);
1075                         return -ENOMEM;
1076                 }
1077
1078                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1079                 for (i = 0; i < count; i++) {
1080
1081                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1082                         old_skb = priv->RcvCtl[ctx].skb;
1083
1084                         l = priv->RcvCtl[ctx].len;
1085                         if (szrem < l)
1086                                 l = szrem;
1087
1088 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1089 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1090 //                                      i, l));
1091
1092                         pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1093                                                     priv->RcvCtl[ctx].dma,
1094                                                     priv->RcvCtl[ctx].len,
1095                                                     PCI_DMA_FROMDEVICE);
1096                         memcpy(skb_put(skb, l), old_skb->data, l);
1097
1098                         pci_dma_sync_single_for_device(mpt_dev->pcidev,
1099                                                        priv->RcvCtl[ctx].dma,
1100                                                        priv->RcvCtl[ctx].len,
1101                                                        PCI_DMA_FROMDEVICE);
1102
1103                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1104                         szrem -= l;
1105                 }
1106                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1107
1108         } else if (len < MPT_LAN_RX_COPYBREAK) {
1109
1110                 old_skb = skb;
1111
1112                 skb = (struct sk_buff *)dev_alloc_skb(len);
1113                 if (!skb) {
1114                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1115                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1116                                         __FILE__, __LINE__);
1117                         return -ENOMEM;
1118                 }
1119
1120                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1121                                             priv->RcvCtl[ctx].dma,
1122                                             priv->RcvCtl[ctx].len,
1123                                             PCI_DMA_FROMDEVICE);
1124
1125                 memcpy(skb_put(skb, len), old_skb->data, len);
1126
1127                 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1128                                                priv->RcvCtl[ctx].dma,
1129                                                priv->RcvCtl[ctx].len,
1130                                                PCI_DMA_FROMDEVICE);
1131
1132                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1133                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1134                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1135
1136         } else {
1137                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1138
1139                 priv->RcvCtl[ctx].skb = NULL;
1140
1141                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1142                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1143                 priv->RcvCtl[ctx].dma = 0;
1144
1145                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1146                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1147
1148                 skb_put(skb,len);
1149         }
1150
1151         atomic_sub(count, &priv->buckets_out);
1152         priv->total_received += count;
1153
1154         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1155                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1156                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1157                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1158                                 priv->mpt_rxfidx_tail,
1159                                 MPT_LAN_MAX_BUCKETS_OUT);
1160
1161                 panic("Damn it Jim! I'm a doctor, not a programmer! "
1162                                 "Oh, wait a sec, I am a programmer. "
1163                                 "And, who's Jim?!?!\n"
1164                                 "Arrgghh! We've done it again!\n");
1165         }
1166
1167         if (remaining == 0)
1168                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1169                         "(priv->buckets_out = %d)\n",
1170                         IOC_AND_NETDEV_NAMES_s_s(dev),
1171                         atomic_read(&priv->buckets_out));
1172         else if (remaining < 10)
1173                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1174                         "(priv->buckets_out = %d)\n",
1175                         IOC_AND_NETDEV_NAMES_s_s(dev),
1176                         remaining, atomic_read(&priv->buckets_out));
1177         
1178         if ((remaining < priv->bucketthresh) &&
1179             ((atomic_read(&priv->buckets_out) - remaining) >
1180              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1181                 
1182                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1183                         "buckets_out count and fw's BucketsRemaining "
1184                         "count has crossed the threshold, issuing a "
1185                         "LanReset to clear the fw's hashtable. You may "
1186                         "want to check your /var/log/messages for \"CRC "
1187                         "error\" event notifications.\n");
1188                 
1189                 mpt_lan_reset(dev);
1190                 mpt_lan_wake_post_buckets_task(dev, 0);
1191         }
1192         
1193         return mpt_lan_receive_skb(dev, skb);
1194 }
1195
1196 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1197 /* Simple SGE's only at the moment */
1198
1199 static void
1200 mpt_lan_post_receive_buckets(void *dev_id)
1201 {
1202         struct net_device *dev = dev_id;
1203         struct mpt_lan_priv *priv = dev->priv;
1204         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1205         MPT_FRAME_HDR *mf;
1206         LANReceivePostRequest_t *pRecvReq;
1207         SGETransaction32_t *pTrans;
1208         SGESimple64_t *pSimple;
1209         struct sk_buff *skb;
1210         dma_addr_t dma;
1211         u32 curr, buckets, count, max;
1212         u32 len = (dev->mtu + dev->hard_header_len + 4);
1213         unsigned long flags;
1214         int i;
1215
1216         curr = atomic_read(&priv->buckets_out);
1217         buckets = (priv->max_buckets_out - curr);
1218
1219         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1220                         IOC_AND_NETDEV_NAMES_s_s(dev),
1221                         __FUNCTION__, buckets, curr));
1222
1223         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1224                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1225
1226         while (buckets) {
1227                 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1228                 if (mf == NULL) {
1229                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1230                                 __FUNCTION__);
1231                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1232                                  __FUNCTION__, buckets));
1233                         goto out;
1234                 }
1235                 pRecvReq = (LANReceivePostRequest_t *) mf;
1236
1237                 count = buckets;
1238                 if (count > max)
1239                         count = max;
1240
1241                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1242                 pRecvReq->ChainOffset = 0;
1243                 pRecvReq->MsgFlags    = 0;
1244                 pRecvReq->PortNumber  = priv->pnum;
1245
1246                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1247                 pSimple = NULL;
1248
1249                 for (i = 0; i < count; i++) {
1250                         int ctx;
1251
1252                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1253                         if (priv->mpt_rxfidx_tail < 0) {
1254                                 printk (KERN_ERR "%s: Can't alloc context\n",
1255                                         __FUNCTION__);
1256                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1257                                                        flags);
1258                                 break;
1259                         }
1260
1261                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1262
1263                         skb = priv->RcvCtl[ctx].skb;
1264                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1265                                 pci_unmap_single(mpt_dev->pcidev,
1266                                                  priv->RcvCtl[ctx].dma,
1267                                                  priv->RcvCtl[ctx].len,
1268                                                  PCI_DMA_FROMDEVICE);
1269                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1270                                 skb = priv->RcvCtl[ctx].skb = NULL;
1271                         }
1272
1273                         if (skb == NULL) {
1274                                 skb = dev_alloc_skb(len);
1275                                 if (skb == NULL) {
1276                                         printk (KERN_WARNING
1277                                                 MYNAM "/%s: Can't alloc skb\n",
1278                                                 __FUNCTION__);
1279                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1280                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1281                                         break;
1282                                 }
1283
1284                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1285                                                      len, PCI_DMA_FROMDEVICE);
1286
1287                                 priv->RcvCtl[ctx].skb = skb;
1288                                 priv->RcvCtl[ctx].dma = dma;
1289                                 priv->RcvCtl[ctx].len = len;
1290                         }
1291
1292                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1293
1294                         pTrans->ContextSize   = sizeof(u32);
1295                         pTrans->DetailsLength = 0;
1296                         pTrans->Flags         = 0;
1297                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1298
1299                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1300
1301                         pSimple->FlagsLength = cpu_to_le32(
1302                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1303                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1304                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1305                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1306                         if (sizeof(dma_addr_t) > sizeof(u32))
1307                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1308                         else
1309                                 pSimple->Address.High = 0;
1310
1311                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1312                 }
1313
1314                 if (pSimple == NULL) {
1315 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1316 /**/                            __FUNCTION__);
1317                         mpt_free_msg_frame(mpt_dev, mf);
1318                         goto out;
1319                 }
1320
1321                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1322
1323                 pRecvReq->BucketCount = cpu_to_le32(i);
1324
1325 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1326  *      for (i = 0; i < j + 2; i ++)
1327  *          printk (" %08x", le32_to_cpu(msg[i]));
1328  *      printk ("\n");
1329  */
1330
1331                 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1332
1333                 priv->total_posted += i;
1334                 buckets -= i;
1335                 atomic_add(i, &priv->buckets_out);
1336         }
1337
1338 out:
1339         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1340                   __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1341         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1342         __FUNCTION__, priv->total_posted, priv->total_received));
1343
1344         clear_bit(0, &priv->post_buckets_active);
1345 }
1346
1347 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1348 static struct net_device *
1349 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1350 {
1351         struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1352         struct mpt_lan_priv *priv = NULL;
1353         u8 HWaddr[FC_ALEN], *a;
1354
1355         if (!dev)
1356                 return NULL;
1357
1358         dev->mtu = MPT_LAN_MTU;
1359
1360         priv = netdev_priv(dev);
1361
1362         priv->mpt_dev = mpt_dev;
1363         priv->pnum = pnum;
1364
1365         memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
1366         INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1367         priv->post_buckets_active = 0;
1368
1369         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1370                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1371
1372         atomic_set(&priv->buckets_out, 0);
1373         priv->total_posted = 0;
1374         priv->total_received = 0;
1375         priv->max_buckets_out = max_buckets_out;
1376         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1377                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1378
1379         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1380                         __LINE__,
1381                         mpt_dev->pfacts[0].MaxLanBuckets,
1382                         max_buckets_out,
1383                         priv->max_buckets_out));
1384
1385         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1386         spin_lock_init(&priv->txfidx_lock);
1387         spin_lock_init(&priv->rxfidx_lock);
1388
1389         memset(&priv->stats, 0, sizeof(priv->stats));
1390
1391         /*  Grab pre-fetched LANPage1 stuff. :-) */
1392         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1393
1394         HWaddr[0] = a[5];
1395         HWaddr[1] = a[4];
1396         HWaddr[2] = a[3];
1397         HWaddr[3] = a[2];
1398         HWaddr[4] = a[1];
1399         HWaddr[5] = a[0];
1400
1401         dev->addr_len = FC_ALEN;
1402         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1403         memset(dev->broadcast, 0xff, FC_ALEN);
1404
1405         /* The Tx queue is 127 deep on the 909.
1406          * Give ourselves some breathing room.
1407          */
1408         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1409                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1410
1411         dev->open = mpt_lan_open;
1412         dev->stop = mpt_lan_close;
1413         dev->get_stats = mpt_lan_get_stats;
1414         dev->set_multicast_list = NULL;
1415         dev->change_mtu = mpt_lan_change_mtu;
1416         dev->hard_start_xmit = mpt_lan_sdu_send;
1417
1418 /* Not in 2.3.42. Need 2.3.45+ */
1419         dev->tx_timeout = mpt_lan_tx_timeout;
1420         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1421
1422         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1423                 "and setting initial values\n"));
1424
1425         SET_MODULE_OWNER(dev);
1426
1427         if (register_netdev(dev) != 0) {
1428                 free_netdev(dev);
1429                 dev = NULL;
1430         }
1431         return dev;
1432 }
1433
1434 static int
1435 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1436 {
1437         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1438         struct net_device       *dev;
1439         int                     i;
1440
1441         for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1442                 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1443                        "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1444                        ioc->name, ioc->pfacts[i].PortNumber,
1445                        ioc->pfacts[i].ProtocolFlags,
1446                        MPT_PROTOCOL_FLAGS_c_c_c_c(
1447                                ioc->pfacts[i].ProtocolFlags));
1448
1449                 if (!(ioc->pfacts[i].ProtocolFlags &
1450                                         MPI_PORTFACTS_PROTOCOL_LAN)) {
1451                         printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1452                                "seems to be disabled on this adapter port!\n",
1453                                ioc->name);
1454                         continue;
1455                 }
1456
1457                 dev = mpt_register_lan_device(ioc, i);
1458                 if (!dev) {
1459                         printk(KERN_ERR MYNAM ": %s: Unable to register "
1460                                "port%d as a LAN device\n", ioc->name,
1461                                ioc->pfacts[i].PortNumber);
1462                         continue;
1463                 }
1464                 
1465                 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1466                        "registered as '%s'\n", ioc->name, dev->name);
1467                 printk(KERN_INFO MYNAM ": %s/%s: "
1468                        "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1469                        IOC_AND_NETDEV_NAMES_s_s(dev),
1470                        dev->dev_addr[0], dev->dev_addr[1],
1471                        dev->dev_addr[2], dev->dev_addr[3],
1472                        dev->dev_addr[4], dev->dev_addr[5]);
1473         
1474                 ioc->netdev = dev;
1475
1476                 return 0;
1477         }
1478
1479         return -ENODEV;
1480 }
1481
1482 static void
1483 mptlan_remove(struct pci_dev *pdev)
1484 {
1485         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1486         struct net_device       *dev = ioc->netdev;
1487
1488         if(dev != NULL) {
1489                 unregister_netdev(dev);
1490                 free_netdev(dev);
1491         }
1492 }
1493
1494 static struct mpt_pci_driver mptlan_driver = {
1495         .probe          = mptlan_probe,
1496         .remove         = mptlan_remove,
1497 };
1498
1499 static int __init mpt_lan_init (void)
1500 {
1501         show_mptmod_ver(LANAME, LANVER);
1502
1503         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1504                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1505                 return -EBUSY;
1506         }
1507
1508         /* Set the callback index to be used by driver core for turbo replies */
1509         mpt_lan_index = LanCtx;
1510
1511         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1512
1513         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1514                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1515                        "handler with mptbase! The world is at an end! "
1516                        "Everything is fading to black! Goodbye.\n");
1517                 return -EBUSY;
1518         }
1519
1520         dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1521         
1522         if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
1523                 dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
1524         return 0;
1525 }
1526
1527 static void __exit mpt_lan_exit(void)
1528 {
1529         mpt_device_driver_deregister(MPTLAN_DRIVER);
1530         mpt_reset_deregister(LanCtx);
1531
1532         if (LanCtx >= 0) {
1533                 mpt_deregister(LanCtx);
1534                 LanCtx = -1;
1535                 mpt_lan_index = 0;
1536         }
1537 }
1538
1539 module_init(mpt_lan_init);
1540 module_exit(mpt_lan_exit);
1541
1542 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1543 static unsigned short
1544 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1545 {
1546         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1547         struct fcllc *fcllc;
1548
1549         skb->mac.raw = skb->data;
1550         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1551
1552         if (fch->dtype == htons(0xffff)) {
1553                 u32 *p = (u32 *) fch;
1554
1555                 swab32s(p + 0);
1556                 swab32s(p + 1);
1557                 swab32s(p + 2);
1558                 swab32s(p + 3);
1559
1560                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1561                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1562                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1563                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1564                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1565         }
1566
1567         if (*fch->daddr & 1) {
1568                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1569                         skb->pkt_type = PACKET_BROADCAST;
1570                 } else {
1571                         skb->pkt_type = PACKET_MULTICAST;
1572                 }
1573         } else {
1574                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1575                         skb->pkt_type = PACKET_OTHERHOST;
1576                 } else {
1577                         skb->pkt_type = PACKET_HOST;
1578                 }
1579         }
1580
1581         fcllc = (struct fcllc *)skb->data;
1582
1583 #ifdef QLOGIC_NAA_WORKAROUND
1584 {
1585         u16 source_naa = fch->stype, found = 0;
1586
1587         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1588            value. */
1589
1590         if ((source_naa & 0xF000) == 0)
1591                 source_naa = swab16(source_naa);
1592
1593         if (fcllc->ethertype == htons(ETH_P_ARP))
1594             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1595                       "%04x.\n", source_naa));
1596
1597         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1598            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1599                 struct NAA_Hosed *nh, *prevnh;
1600                 int i;
1601
1602                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1603                           "system with non-RFC 2625 NAA value (%04x).\n",
1604                           source_naa));
1605
1606                 write_lock_irq(&bad_naa_lock);
1607                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1608                      prevnh=nh, nh=nh->next) {
1609                         if ((nh->ieee[0] == fch->saddr[0]) &&
1610                             (nh->ieee[1] == fch->saddr[1]) &&
1611                             (nh->ieee[2] == fch->saddr[2]) &&
1612                             (nh->ieee[3] == fch->saddr[3]) &&
1613                             (nh->ieee[4] == fch->saddr[4]) &&
1614                             (nh->ieee[5] == fch->saddr[5])) {
1615                                 found = 1;
1616                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1617                                          "q/Rep w/ bad NAA from system already"
1618                                          " in DB.\n"));
1619                                 break;
1620                         }
1621                 }
1622
1623                 if ((!found) && (nh == NULL)) {
1624
1625                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1626                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1627                                  " bad NAA from system not yet in DB.\n"));
1628
1629                         if (nh != NULL) {
1630                                 nh->next = NULL;
1631                                 if (!mpt_bad_naa)
1632                                         mpt_bad_naa = nh;
1633                                 if (prevnh)
1634                                         prevnh->next = nh;
1635
1636                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1637                                 for (i = 0; i < FC_ALEN; i++)
1638                                         nh->ieee[i] = fch->saddr[i];
1639                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1640                                           "%02x:%02x with non-compliant S_NAA value.\n",
1641                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1642                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1643                         } else {
1644                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1645                                         " kmalloc a NAA_Hosed struct.\n");
1646                         }
1647                 } else if (!found) {
1648                         printk (KERN_ERR "mptlan/type_trans: found not"
1649                                 " set, but nh isn't null. Evil "
1650                                 "funkiness abounds.\n");
1651                 }
1652                 write_unlock_irq(&bad_naa_lock);
1653         }
1654 }
1655 #endif
1656
1657         /* Strip the SNAP header from ARP packets since we don't
1658          * pass them through to the 802.2/SNAP layers.
1659          */
1660         if (fcllc->dsap == EXTENDED_SAP &&
1661                 (fcllc->ethertype == htons(ETH_P_IP) ||
1662                  fcllc->ethertype == htons(ETH_P_ARP))) {
1663                 skb_pull(skb, sizeof(struct fcllc));
1664                 return fcllc->ethertype;
1665         }
1666
1667         return htons(ETH_P_802_2);
1668 }
1669
1670 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/