2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with LSI Fibre Channel PCI chip/adapters
5 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 * Copyright (c) 2000-2007 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com)
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; version 2 of the License.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
23 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27 solely responsible for determining the appropriateness of using and
28 distributing the Program and assumes all risks associated with its
29 exercise of rights under this Agreement, including but not limited to
30 the risks and costs of program errors, damage to or loss of data,
31 programs or equipment, and unavailability or interruption of operations.
33 DISCLAIMER OF LIABILITY
34 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
42 You should have received a copy of the GNU General Public License
43 along with this program; if not, write to the Free Software
44 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
49 * Define statements used for debugging
51 //#define MPT_LAN_IO_DEBUG
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
56 #include <linux/init.h>
57 #include <linux/module.h>
60 #define my_VERSION MPT_LINUX_VERSION_COMMON
61 #define MYNAM "mptlan"
63 MODULE_LICENSE("GPL");
64 MODULE_VERSION(my_VERSION);
66 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
68 * MPT LAN message sizes without variable part.
70 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
71 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
73 #define MPT_LAN_TRANSACTION32_SIZE \
74 (sizeof(SGETransaction32_t) - sizeof(u32))
77 * Fusion MPT LAN private structures
83 struct NAA_Hosed *next;
86 struct BufferControl {
94 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
96 atomic_t buckets_out; /* number of unused buckets on IOC */
97 int bucketthresh; /* Send more when this many left */
99 int *mpt_txfidx; /* Free Tx Context list */
101 spinlock_t txfidx_lock;
103 int *mpt_rxfidx; /* Free Rx Context list */
105 spinlock_t rxfidx_lock;
107 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
108 struct BufferControl *SendCtl; /* Send BufferControl structs */
110 int max_buckets_out; /* Max buckets to send to IOC */
111 int tx_max_out; /* IOC's Tx queue len */
115 struct net_device_stats stats; /* Per device statistics */
117 struct delayed_work post_buckets_task;
118 struct net_device *dev;
119 unsigned long post_buckets_active;
122 struct mpt_lan_ohdr {
129 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
134 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
135 MPT_FRAME_HDR *reply);
136 static int mpt_lan_open(struct net_device *dev);
137 static int mpt_lan_reset(struct net_device *dev);
138 static int mpt_lan_close(struct net_device *dev);
139 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
140 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
142 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
143 static int mpt_lan_receive_post_reply(struct net_device *dev,
144 LANReceivePostReply_t *pRecvRep);
145 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
146 static int mpt_lan_send_reply(struct net_device *dev,
147 LANSendReply_t *pSendRep);
148 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
149 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
150 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
151 struct net_device *dev);
153 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
155 * Fusion MPT LAN private data
157 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
159 static u32 max_buckets_out = 127;
160 static u32 tx_max_out_p = 127 - 16;
162 #ifdef QLOGIC_NAA_WORKAROUND
163 static struct NAA_Hosed *mpt_bad_naa = NULL;
164 DEFINE_RWLOCK(bad_naa_lock);
167 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
169 * lan_reply - Handle all data sent from the hardware.
170 * @ioc: Pointer to MPT_ADAPTER structure
171 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
172 * @reply: Pointer to MPT reply frame
174 * Returns 1 indicating original alloc'd request frame ptr
175 * should be freed, or 0 if it shouldn't.
178 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
180 struct net_device *dev = ioc->netdev;
181 int FreeReqFrame = 0;
183 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
184 IOC_AND_NETDEV_NAMES_s_s(dev)));
186 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
190 u32 tmsg = CAST_PTR_TO_U32(reply);
192 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
193 IOC_AND_NETDEV_NAMES_s_s(dev),
196 switch (GET_LAN_FORM(tmsg)) {
198 // NOTE! (Optimization) First case here is now caught in
199 // mptbase.c::mpt_interrupt() routine and callcack here
200 // is now skipped for this case!
202 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
203 // dioprintk((KERN_INFO MYNAM "/lan_reply: "
204 // "MessageContext turbo reply received\n"));
209 case LAN_REPLY_FORM_SEND_SINGLE:
210 // dioprintk((MYNAM "/lan_reply: "
211 // "calling mpt_lan_send_reply (turbo)\n"));
213 // Potential BUG here?
214 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
215 // If/when mpt_lan_send_turbo would return 1 here,
216 // calling routine (mptbase.c|mpt_interrupt)
217 // would Oops because mf has already been set
218 // to NULL. So after return from this func,
219 // mpt_interrupt() will attempt to put (NULL) mf ptr
220 // item back onto its adapter FreeQ - Oops!:-(
221 // It's Ok, since mpt_lan_send_turbo() *currently*
222 // always returns 0, but..., just in case:
224 (void) mpt_lan_send_turbo(dev, tmsg);
229 case LAN_REPLY_FORM_RECEIVE_SINGLE:
230 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
231 // "rcv-Turbo = %08x\n", tmsg));
232 mpt_lan_receive_post_turbo(dev, tmsg);
236 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
237 "that I don't know what to do with\n");
239 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
247 // msg = (u32 *) reply;
248 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
249 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
250 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
251 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
252 // reply->u.hdr.Function));
254 switch (reply->u.hdr.Function) {
256 case MPI_FUNCTION_LAN_SEND:
258 LANSendReply_t *pSendRep;
260 pSendRep = (LANSendReply_t *) reply;
261 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
265 case MPI_FUNCTION_LAN_RECEIVE:
267 LANReceivePostReply_t *pRecvRep;
269 pRecvRep = (LANReceivePostReply_t *) reply;
270 if (pRecvRep->NumberOfContexts) {
271 mpt_lan_receive_post_reply(dev, pRecvRep);
272 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
275 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
276 "ReceivePostReply received.\n"));
280 case MPI_FUNCTION_LAN_RESET:
281 /* Just a default reply. Might want to check it to
282 * make sure that everything went ok.
287 case MPI_FUNCTION_EVENT_NOTIFICATION:
288 case MPI_FUNCTION_EVENT_ACK:
289 /* _EVENT_NOTIFICATION should NOT come down this path any more.
290 * Should be routed to mpt_lan_event_process(), but just in case...
296 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
297 "reply that I don't know what to do with\n");
299 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
308 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
310 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
312 struct net_device *dev = ioc->netdev;
313 struct mpt_lan_priv *priv;
318 priv = netdev_priv(dev);
320 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
321 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
322 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
324 if (priv->mpt_rxfidx == NULL)
327 if (reset_phase == MPT_IOC_SETUP_RESET) {
329 } else if (reset_phase == MPT_IOC_PRE_RESET) {
333 netif_stop_queue(dev);
335 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
337 atomic_set(&priv->buckets_out, 0);
339 /* Reset Rx Free Tail index and re-populate the queue. */
340 spin_lock_irqsave(&priv->rxfidx_lock, flags);
341 priv->mpt_rxfidx_tail = -1;
342 for (i = 0; i < priv->max_buckets_out; i++)
343 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
344 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
346 mpt_lan_post_receive_buckets(priv);
347 netif_wake_queue(dev);
353 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
355 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
357 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
359 switch (le32_to_cpu(pEvReply->Event)) {
360 case MPI_EVENT_NONE: /* 00 */
361 case MPI_EVENT_LOG_DATA: /* 01 */
362 case MPI_EVENT_STATE_CHANGE: /* 02 */
363 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
364 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
365 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
366 case MPI_EVENT_RESCAN: /* 06 */
367 /* Ok, do we need to do anything here? As far as
368 I can tell, this is when a new device gets added
370 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
371 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
372 case MPI_EVENT_LOGOUT: /* 09 */
373 case MPI_EVENT_EVENT_CHANGE: /* 0A */
379 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
380 * Do NOT do it here now!
386 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
388 mpt_lan_open(struct net_device *dev)
390 struct mpt_lan_priv *priv = netdev_priv(dev);
393 if (mpt_lan_reset(dev) != 0) {
394 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
396 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
399 printk ("The ioc is active. Perhaps it needs to be"
402 printk ("The ioc in inactive, most likely in the "
403 "process of being reset. Please try again in "
407 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
408 if (priv->mpt_txfidx == NULL)
410 priv->mpt_txfidx_tail = -1;
412 priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
414 if (priv->SendCtl == NULL)
416 for (i = 0; i < priv->tx_max_out; i++)
417 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
419 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
421 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
423 if (priv->mpt_rxfidx == NULL)
425 priv->mpt_rxfidx_tail = -1;
427 priv->RcvCtl = kcalloc(priv->max_buckets_out,
428 sizeof(struct BufferControl),
430 if (priv->RcvCtl == NULL)
432 for (i = 0; i < priv->max_buckets_out; i++)
433 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
435 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
436 /**/ for (i = 0; i < priv->tx_max_out; i++)
437 /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
438 /**/ dlprintk(("\n"));
440 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
442 mpt_lan_post_receive_buckets(priv);
443 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
444 IOC_AND_NETDEV_NAMES_s_s(dev));
446 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
447 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
448 " Notifications. This is a bad thing! We're not going "
449 "to go ahead, but I'd be leery of system stability at "
453 netif_start_queue(dev);
454 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
458 kfree(priv->mpt_rxfidx);
459 priv->mpt_rxfidx = NULL;
461 kfree(priv->SendCtl);
462 priv->SendCtl = NULL;
464 kfree(priv->mpt_txfidx);
465 priv->mpt_txfidx = NULL;
469 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
470 /* Send a LanReset message to the FW. This should result in the FW returning
471 any buckets it still has. */
473 mpt_lan_reset(struct net_device *dev)
476 LANResetRequest_t *pResetReq;
477 struct mpt_lan_priv *priv = netdev_priv(dev);
479 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
482 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
483 "Unable to allocate a request frame.\n"));
488 pResetReq = (LANResetRequest_t *) mf;
490 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
491 pResetReq->ChainOffset = 0;
492 pResetReq->Reserved = 0;
493 pResetReq->PortNumber = priv->pnum;
494 pResetReq->MsgFlags = 0;
495 pResetReq->Reserved2 = 0;
497 mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
502 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
504 mpt_lan_close(struct net_device *dev)
506 struct mpt_lan_priv *priv = netdev_priv(dev);
507 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
508 unsigned long timeout;
511 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
513 mpt_event_deregister(LanCtx);
515 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
516 "since driver was loaded, %d still out\n",
517 priv->total_posted,atomic_read(&priv->buckets_out)));
519 netif_stop_queue(dev);
523 timeout = jiffies + 2 * HZ;
524 while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
525 schedule_timeout_interruptible(1);
527 for (i = 0; i < priv->max_buckets_out; i++) {
528 if (priv->RcvCtl[i].skb != NULL) {
529 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
530 /**/ "is still out\n", i));
531 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
534 dev_kfree_skb(priv->RcvCtl[i].skb);
539 kfree(priv->mpt_rxfidx);
541 for (i = 0; i < priv->tx_max_out; i++) {
542 if (priv->SendCtl[i].skb != NULL) {
543 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
544 priv->SendCtl[i].len,
546 dev_kfree_skb(priv->SendCtl[i].skb);
550 kfree(priv->SendCtl);
551 kfree(priv->mpt_txfidx);
553 atomic_set(&priv->buckets_out, 0);
555 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
556 IOC_AND_NETDEV_NAMES_s_s(dev));
561 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
562 static struct net_device_stats *
563 mpt_lan_get_stats(struct net_device *dev)
565 struct mpt_lan_priv *priv = netdev_priv(dev);
567 return (struct net_device_stats *) &priv->stats;
570 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
572 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
574 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
580 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
581 /* Tx timeout handler. */
583 mpt_lan_tx_timeout(struct net_device *dev)
585 struct mpt_lan_priv *priv = netdev_priv(dev);
586 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
588 if (mpt_dev->active) {
589 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
590 netif_wake_queue(dev);
594 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
597 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
599 struct mpt_lan_priv *priv = netdev_priv(dev);
600 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
601 struct sk_buff *sent;
605 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
606 sent = priv->SendCtl[ctx].skb;
608 priv->stats.tx_packets++;
609 priv->stats.tx_bytes += sent->len;
611 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
612 IOC_AND_NETDEV_NAMES_s_s(dev),
613 __FUNCTION__, sent));
615 priv->SendCtl[ctx].skb = NULL;
616 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
617 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
618 dev_kfree_skb_irq(sent);
620 spin_lock_irqsave(&priv->txfidx_lock, flags);
621 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
622 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
624 netif_wake_queue(dev);
628 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
630 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
632 struct mpt_lan_priv *priv = netdev_priv(dev);
633 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
634 struct sk_buff *sent;
636 int FreeReqFrame = 0;
641 count = pSendRep->NumberOfContexts;
643 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
644 le16_to_cpu(pSendRep->IOCStatus)));
646 /* Add check for Loginfo Flag in IOCStatus */
648 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
649 case MPI_IOCSTATUS_SUCCESS:
650 priv->stats.tx_packets += count;
653 case MPI_IOCSTATUS_LAN_CANCELED:
654 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
657 case MPI_IOCSTATUS_INVALID_SGL:
658 priv->stats.tx_errors += count;
659 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
660 IOC_AND_NETDEV_NAMES_s_s(dev));
664 priv->stats.tx_errors += count;
668 pContext = &pSendRep->BufferContext;
670 spin_lock_irqsave(&priv->txfidx_lock, flags);
672 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
674 sent = priv->SendCtl[ctx].skb;
675 priv->stats.tx_bytes += sent->len;
677 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
678 IOC_AND_NETDEV_NAMES_s_s(dev),
679 __FUNCTION__, sent));
681 priv->SendCtl[ctx].skb = NULL;
682 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
683 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
684 dev_kfree_skb_irq(sent);
686 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
691 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
694 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
697 netif_wake_queue(dev);
701 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
703 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
705 struct mpt_lan_priv *priv = netdev_priv(dev);
706 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
708 LANSendRequest_t *pSendReq;
709 SGETransaction32_t *pTrans;
710 SGESimple64_t *pSimple;
711 const unsigned char *mac;
715 u16 cur_naa = 0x1000;
717 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
720 spin_lock_irqsave(&priv->txfidx_lock, flags);
721 if (priv->mpt_txfidx_tail < 0) {
722 netif_stop_queue(dev);
723 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
725 printk (KERN_ERR "%s: no tx context available: %u\n",
726 __FUNCTION__, priv->mpt_txfidx_tail);
730 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
732 netif_stop_queue(dev);
733 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
735 printk (KERN_ERR "%s: Unable to alloc request frame\n",
740 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
741 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
743 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
744 // IOC_AND_NETDEV_NAMES_s_s(dev)));
746 pSendReq = (LANSendRequest_t *) mf;
748 /* Set the mac.raw pointer, since this apparently isn't getting
749 * done before we get the skb. Pull the data pointer past the mac data.
751 skb_reset_mac_header(skb);
754 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
757 priv->SendCtl[ctx].skb = skb;
758 priv->SendCtl[ctx].dma = dma;
759 priv->SendCtl[ctx].len = skb->len;
762 pSendReq->Reserved = 0;
763 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
764 pSendReq->ChainOffset = 0;
765 pSendReq->Reserved2 = 0;
766 pSendReq->MsgFlags = 0;
767 pSendReq->PortNumber = priv->pnum;
769 /* Transaction Context Element */
770 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
772 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
773 pTrans->ContextSize = sizeof(u32);
774 pTrans->DetailsLength = 2 * sizeof(u32);
776 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
778 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
779 // IOC_AND_NETDEV_NAMES_s_s(dev),
780 // ctx, skb, skb->data));
782 mac = skb_mac_header(skb);
783 #ifdef QLOGIC_NAA_WORKAROUND
785 struct NAA_Hosed *nh;
787 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
788 RFC 2625. The longer I look at this, the more my opinion of Qlogic
790 read_lock_irq(&bad_naa_lock);
791 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
792 if ((nh->ieee[0] == mac[0]) &&
793 (nh->ieee[1] == mac[1]) &&
794 (nh->ieee[2] == mac[2]) &&
795 (nh->ieee[3] == mac[3]) &&
796 (nh->ieee[4] == mac[4]) &&
797 (nh->ieee[5] == mac[5])) {
799 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
800 "= %04x.\n", cur_naa));
804 read_unlock_irq(&bad_naa_lock);
808 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
811 pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
816 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
818 /* If we ever decide to send more than one Simple SGE per LANSend, then
819 we will need to make sure that LAST_ELEMENT only gets set on the
820 last one. Otherwise, bad voodoo and evil funkiness will commence. */
821 pSimple->FlagsLength = cpu_to_le32(
822 ((MPI_SGE_FLAGS_LAST_ELEMENT |
823 MPI_SGE_FLAGS_END_OF_BUFFER |
824 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
825 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
826 MPI_SGE_FLAGS_HOST_TO_IOC |
827 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
828 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
830 pSimple->Address.Low = cpu_to_le32((u32) dma);
831 if (sizeof(dma_addr_t) > sizeof(u32))
832 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
834 pSimple->Address.High = 0;
836 mpt_put_msg_frame (LanCtx, mpt_dev, mf);
837 dev->trans_start = jiffies;
839 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
840 IOC_AND_NETDEV_NAMES_s_s(dev),
841 le32_to_cpu(pSimple->FlagsLength)));
846 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
848 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
850 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
853 struct mpt_lan_priv *priv = dev->priv;
855 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
857 schedule_delayed_work(&priv->post_buckets_task, 0);
859 schedule_delayed_work(&priv->post_buckets_task, 1);
860 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
863 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
864 IOC_AND_NETDEV_NAMES_s_s(dev) ));
868 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
870 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
872 struct mpt_lan_priv *priv = dev->priv;
874 skb->protocol = mpt_lan_type_trans(skb, dev);
876 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
877 "delivered to upper level.\n",
878 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
880 priv->stats.rx_bytes += skb->len;
881 priv->stats.rx_packets++;
886 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
887 atomic_read(&priv->buckets_out)));
889 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
890 mpt_lan_wake_post_buckets_task(dev, 1);
892 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
893 "remaining, %d received back since sod\n",
894 atomic_read(&priv->buckets_out), priv->total_received));
899 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
902 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
904 struct mpt_lan_priv *priv = dev->priv;
905 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
906 struct sk_buff *skb, *old_skb;
910 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
911 skb = priv->RcvCtl[ctx].skb;
913 len = GET_LAN_PACKET_LENGTH(tmsg);
915 if (len < MPT_LAN_RX_COPYBREAK) {
918 skb = (struct sk_buff *)dev_alloc_skb(len);
920 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
921 IOC_AND_NETDEV_NAMES_s_s(dev),
926 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
927 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
929 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
931 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
932 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
938 priv->RcvCtl[ctx].skb = NULL;
940 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
941 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
944 spin_lock_irqsave(&priv->rxfidx_lock, flags);
945 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
946 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
948 atomic_dec(&priv->buckets_out);
949 priv->total_received++;
951 return mpt_lan_receive_skb(dev, skb);
954 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
956 mpt_lan_receive_post_free(struct net_device *dev,
957 LANReceivePostReply_t *pRecvRep)
959 struct mpt_lan_priv *priv = dev->priv;
960 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
967 count = pRecvRep->NumberOfContexts;
969 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
970 "IOC returned %d buckets, freeing them...\n", count));
972 spin_lock_irqsave(&priv->rxfidx_lock, flags);
973 for (i = 0; i < count; i++) {
974 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
976 skb = priv->RcvCtl[ctx].skb;
978 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
979 // IOC_AND_NETDEV_NAMES_s_s(dev)));
980 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
981 // priv, &(priv->buckets_out)));
982 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
984 priv->RcvCtl[ctx].skb = NULL;
985 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
986 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
987 dev_kfree_skb_any(skb);
989 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
991 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
993 atomic_sub(count, &priv->buckets_out);
995 // for (i = 0; i < priv->max_buckets_out; i++)
996 // if (priv->RcvCtl[i].skb != NULL)
997 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
998 // "is still out\n", i));
1000 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1003 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1004 /**/ "remaining, %d received back since sod.\n",
1005 /**/ atomic_read(&priv->buckets_out), priv->total_received));
1009 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1011 mpt_lan_receive_post_reply(struct net_device *dev,
1012 LANReceivePostReply_t *pRecvRep)
1014 struct mpt_lan_priv *priv = dev->priv;
1015 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1016 struct sk_buff *skb, *old_skb;
1017 unsigned long flags;
1018 u32 len, ctx, offset;
1019 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1023 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1024 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1025 le16_to_cpu(pRecvRep->IOCStatus)));
1027 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1028 MPI_IOCSTATUS_LAN_CANCELED)
1029 return mpt_lan_receive_post_free(dev, pRecvRep);
1031 len = le32_to_cpu(pRecvRep->PacketLength);
1033 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1034 "ReceivePostReply w/ PacketLength zero!\n",
1035 IOC_AND_NETDEV_NAMES_s_s(dev));
1036 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1037 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1041 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
1042 count = pRecvRep->NumberOfContexts;
1043 skb = priv->RcvCtl[ctx].skb;
1045 offset = le32_to_cpu(pRecvRep->PacketOffset);
1046 // if (offset != 0) {
1047 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1048 // "w/ PacketOffset %u\n",
1049 // IOC_AND_NETDEV_NAMES_s_s(dev),
1053 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1054 IOC_AND_NETDEV_NAMES_s_s(dev),
1060 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1061 // "for single packet, concatenating...\n",
1062 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1064 skb = (struct sk_buff *)dev_alloc_skb(len);
1066 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1067 IOC_AND_NETDEV_NAMES_s_s(dev),
1068 __FILE__, __LINE__);
1072 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1073 for (i = 0; i < count; i++) {
1075 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1076 old_skb = priv->RcvCtl[ctx].skb;
1078 l = priv->RcvCtl[ctx].len;
1082 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1083 // IOC_AND_NETDEV_NAMES_s_s(dev),
1086 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1087 priv->RcvCtl[ctx].dma,
1088 priv->RcvCtl[ctx].len,
1089 PCI_DMA_FROMDEVICE);
1090 skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1092 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1093 priv->RcvCtl[ctx].dma,
1094 priv->RcvCtl[ctx].len,
1095 PCI_DMA_FROMDEVICE);
1097 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1100 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1102 } else if (len < MPT_LAN_RX_COPYBREAK) {
1106 skb = (struct sk_buff *)dev_alloc_skb(len);
1108 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1109 IOC_AND_NETDEV_NAMES_s_s(dev),
1110 __FILE__, __LINE__);
1114 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1115 priv->RcvCtl[ctx].dma,
1116 priv->RcvCtl[ctx].len,
1117 PCI_DMA_FROMDEVICE);
1119 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1121 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1122 priv->RcvCtl[ctx].dma,
1123 priv->RcvCtl[ctx].len,
1124 PCI_DMA_FROMDEVICE);
1126 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1127 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1128 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1131 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1133 priv->RcvCtl[ctx].skb = NULL;
1135 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1136 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1137 priv->RcvCtl[ctx].dma = 0;
1139 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1140 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1145 atomic_sub(count, &priv->buckets_out);
1146 priv->total_received += count;
1148 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1149 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1150 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1151 IOC_AND_NETDEV_NAMES_s_s(dev),
1152 priv->mpt_rxfidx_tail,
1153 MPT_LAN_MAX_BUCKETS_OUT);
1159 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1160 "(priv->buckets_out = %d)\n",
1161 IOC_AND_NETDEV_NAMES_s_s(dev),
1162 atomic_read(&priv->buckets_out));
1163 else if (remaining < 10)
1164 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1165 "(priv->buckets_out = %d)\n",
1166 IOC_AND_NETDEV_NAMES_s_s(dev),
1167 remaining, atomic_read(&priv->buckets_out));
1169 if ((remaining < priv->bucketthresh) &&
1170 ((atomic_read(&priv->buckets_out) - remaining) >
1171 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1173 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1174 "buckets_out count and fw's BucketsRemaining "
1175 "count has crossed the threshold, issuing a "
1176 "LanReset to clear the fw's hashtable. You may "
1177 "want to check your /var/log/messages for \"CRC "
1178 "error\" event notifications.\n");
1181 mpt_lan_wake_post_buckets_task(dev, 0);
1184 return mpt_lan_receive_skb(dev, skb);
1187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1188 /* Simple SGE's only at the moment */
1191 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1193 struct net_device *dev = priv->dev;
1194 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1196 LANReceivePostRequest_t *pRecvReq;
1197 SGETransaction32_t *pTrans;
1198 SGESimple64_t *pSimple;
1199 struct sk_buff *skb;
1201 u32 curr, buckets, count, max;
1202 u32 len = (dev->mtu + dev->hard_header_len + 4);
1203 unsigned long flags;
1206 curr = atomic_read(&priv->buckets_out);
1207 buckets = (priv->max_buckets_out - curr);
1209 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1210 IOC_AND_NETDEV_NAMES_s_s(dev),
1211 __FUNCTION__, buckets, curr));
1213 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1214 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1217 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1219 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1221 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1222 __FUNCTION__, buckets));
1225 pRecvReq = (LANReceivePostRequest_t *) mf;
1227 i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1228 mpt_dev->RequestNB[i] = 0;
1233 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1234 pRecvReq->ChainOffset = 0;
1235 pRecvReq->MsgFlags = 0;
1236 pRecvReq->PortNumber = priv->pnum;
1238 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1241 for (i = 0; i < count; i++) {
1244 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1245 if (priv->mpt_rxfidx_tail < 0) {
1246 printk (KERN_ERR "%s: Can't alloc context\n",
1248 spin_unlock_irqrestore(&priv->rxfidx_lock,
1253 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1255 skb = priv->RcvCtl[ctx].skb;
1256 if (skb && (priv->RcvCtl[ctx].len != len)) {
1257 pci_unmap_single(mpt_dev->pcidev,
1258 priv->RcvCtl[ctx].dma,
1259 priv->RcvCtl[ctx].len,
1260 PCI_DMA_FROMDEVICE);
1261 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1262 skb = priv->RcvCtl[ctx].skb = NULL;
1266 skb = dev_alloc_skb(len);
1268 printk (KERN_WARNING
1269 MYNAM "/%s: Can't alloc skb\n",
1271 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1272 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1276 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1277 len, PCI_DMA_FROMDEVICE);
1279 priv->RcvCtl[ctx].skb = skb;
1280 priv->RcvCtl[ctx].dma = dma;
1281 priv->RcvCtl[ctx].len = len;
1284 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1286 pTrans->ContextSize = sizeof(u32);
1287 pTrans->DetailsLength = 0;
1289 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1291 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1293 pSimple->FlagsLength = cpu_to_le32(
1294 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1295 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1296 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1297 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1298 if (sizeof(dma_addr_t) > sizeof(u32))
1299 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1301 pSimple->Address.High = 0;
1303 pTrans = (SGETransaction32_t *) (pSimple + 1);
1306 if (pSimple == NULL) {
1307 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1309 mpt_free_msg_frame(mpt_dev, mf);
1313 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1315 pRecvReq->BucketCount = cpu_to_le32(i);
1317 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1318 * for (i = 0; i < j + 2; i ++)
1319 * printk (" %08x", le32_to_cpu(msg[i]));
1323 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1325 priv->total_posted += i;
1327 atomic_add(i, &priv->buckets_out);
1331 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1332 __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1333 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1334 __FUNCTION__, priv->total_posted, priv->total_received));
1336 clear_bit(0, &priv->post_buckets_active);
1340 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1342 mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1343 post_buckets_task.work));
1346 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1347 static struct net_device *
1348 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1350 struct net_device *dev;
1351 struct mpt_lan_priv *priv;
1352 u8 HWaddr[FC_ALEN], *a;
1354 dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1358 dev->mtu = MPT_LAN_MTU;
1360 priv = netdev_priv(dev);
1363 priv->mpt_dev = mpt_dev;
1366 INIT_DELAYED_WORK(&priv->post_buckets_task,
1367 mpt_lan_post_receive_buckets_work);
1368 priv->post_buckets_active = 0;
1370 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1371 __LINE__, dev->mtu + dev->hard_header_len + 4));
1373 atomic_set(&priv->buckets_out, 0);
1374 priv->total_posted = 0;
1375 priv->total_received = 0;
1376 priv->max_buckets_out = max_buckets_out;
1377 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1378 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1380 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1382 mpt_dev->pfacts[0].MaxLanBuckets,
1384 priv->max_buckets_out));
1386 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1387 spin_lock_init(&priv->txfidx_lock);
1388 spin_lock_init(&priv->rxfidx_lock);
1390 /* Grab pre-fetched LANPage1 stuff. :-) */
1391 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1400 dev->addr_len = FC_ALEN;
1401 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1402 memset(dev->broadcast, 0xff, FC_ALEN);
1404 /* The Tx queue is 127 deep on the 909.
1405 * Give ourselves some breathing room.
1407 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1408 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1410 dev->open = mpt_lan_open;
1411 dev->stop = mpt_lan_close;
1412 dev->get_stats = mpt_lan_get_stats;
1413 dev->set_multicast_list = NULL;
1414 dev->change_mtu = mpt_lan_change_mtu;
1415 dev->hard_start_xmit = mpt_lan_sdu_send;
1417 /* Not in 2.3.42. Need 2.3.45+ */
1418 dev->tx_timeout = mpt_lan_tx_timeout;
1419 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1421 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1422 "and setting initial values\n"));
1424 SET_MODULE_OWNER(dev);
1426 if (register_netdev(dev) != 0) {
1434 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1436 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1437 struct net_device *dev;
1440 for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1441 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1442 "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1443 ioc->name, ioc->pfacts[i].PortNumber,
1444 ioc->pfacts[i].ProtocolFlags,
1445 MPT_PROTOCOL_FLAGS_c_c_c_c(
1446 ioc->pfacts[i].ProtocolFlags));
1448 if (!(ioc->pfacts[i].ProtocolFlags &
1449 MPI_PORTFACTS_PROTOCOL_LAN)) {
1450 printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1451 "seems to be disabled on this adapter port!\n",
1456 dev = mpt_register_lan_device(ioc, i);
1458 printk(KERN_ERR MYNAM ": %s: Unable to register "
1459 "port%d as a LAN device\n", ioc->name,
1460 ioc->pfacts[i].PortNumber);
1464 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1465 "registered as '%s'\n", ioc->name, dev->name);
1466 printk(KERN_INFO MYNAM ": %s/%s: "
1467 "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1468 IOC_AND_NETDEV_NAMES_s_s(dev),
1469 dev->dev_addr[0], dev->dev_addr[1],
1470 dev->dev_addr[2], dev->dev_addr[3],
1471 dev->dev_addr[4], dev->dev_addr[5]);
1482 mptlan_remove(struct pci_dev *pdev)
1484 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1485 struct net_device *dev = ioc->netdev;
1488 unregister_netdev(dev);
1493 static struct mpt_pci_driver mptlan_driver = {
1494 .probe = mptlan_probe,
1495 .remove = mptlan_remove,
1498 static int __init mpt_lan_init (void)
1500 show_mptmod_ver(LANAME, LANVER);
1502 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1503 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1507 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1509 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1510 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1511 "handler with mptbase! The world is at an end! "
1512 "Everything is fading to black! Goodbye.\n");
1516 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1518 mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1522 static void __exit mpt_lan_exit(void)
1524 mpt_device_driver_deregister(MPTLAN_DRIVER);
1525 mpt_reset_deregister(LanCtx);
1528 mpt_deregister(LanCtx);
1529 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1533 module_init(mpt_lan_init);
1534 module_exit(mpt_lan_exit);
1536 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1537 static unsigned short
1538 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1540 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1541 struct fcllc *fcllc;
1543 skb_reset_mac_header(skb);
1544 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1546 if (fch->dtype == htons(0xffff)) {
1547 u32 *p = (u32 *) fch;
1554 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1555 NETDEV_PTR_TO_IOC_NAME_s(dev));
1556 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1557 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1558 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1561 if (*fch->daddr & 1) {
1562 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1563 skb->pkt_type = PACKET_BROADCAST;
1565 skb->pkt_type = PACKET_MULTICAST;
1568 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1569 skb->pkt_type = PACKET_OTHERHOST;
1571 skb->pkt_type = PACKET_HOST;
1575 fcllc = (struct fcllc *)skb->data;
1577 #ifdef QLOGIC_NAA_WORKAROUND
1579 u16 source_naa = fch->stype, found = 0;
1581 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1584 if ((source_naa & 0xF000) == 0)
1585 source_naa = swab16(source_naa);
1587 if (fcllc->ethertype == htons(ETH_P_ARP))
1588 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1589 "%04x.\n", source_naa));
1591 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1592 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1593 struct NAA_Hosed *nh, *prevnh;
1596 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1597 "system with non-RFC 2625 NAA value (%04x).\n",
1600 write_lock_irq(&bad_naa_lock);
1601 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1602 prevnh=nh, nh=nh->next) {
1603 if ((nh->ieee[0] == fch->saddr[0]) &&
1604 (nh->ieee[1] == fch->saddr[1]) &&
1605 (nh->ieee[2] == fch->saddr[2]) &&
1606 (nh->ieee[3] == fch->saddr[3]) &&
1607 (nh->ieee[4] == fch->saddr[4]) &&
1608 (nh->ieee[5] == fch->saddr[5])) {
1610 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1611 "q/Rep w/ bad NAA from system already"
1617 if ((!found) && (nh == NULL)) {
1619 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1620 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1621 " bad NAA from system not yet in DB.\n"));
1630 nh->NAA = source_naa; /* Set the S_NAA value. */
1631 for (i = 0; i < FC_ALEN; i++)
1632 nh->ieee[i] = fch->saddr[i];
1633 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1634 "%02x:%02x with non-compliant S_NAA value.\n",
1635 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1636 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1638 printk (KERN_ERR "mptlan/type_trans: Unable to"
1639 " kmalloc a NAA_Hosed struct.\n");
1641 } else if (!found) {
1642 printk (KERN_ERR "mptlan/type_trans: found not"
1643 " set, but nh isn't null. Evil "
1644 "funkiness abounds.\n");
1646 write_unlock_irq(&bad_naa_lock);
1651 /* Strip the SNAP header from ARP packets since we don't
1652 * pass them through to the 802.2/SNAP layers.
1654 if (fcllc->dsap == EXTENDED_SAP &&
1655 (fcllc->ethertype == htons(ETH_P_IP) ||
1656 fcllc->ethertype == htons(ETH_P_ARP))) {
1657 skb_pull(skb, sizeof(struct fcllc));
1658 return fcllc->ethertype;
1661 return htons(ETH_P_802_2);
1664 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/