2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with LSI Fibre Channel PCI chip/adapters
5 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 * Copyright (c) 2000-2008 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com)
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; version 2 of the License.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
23 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27 solely responsible for determining the appropriateness of using and
28 distributing the Program and assumes all risks associated with its
29 exercise of rights under this Agreement, including but not limited to
30 the risks and costs of program errors, damage to or loss of data,
31 programs or equipment, and unavailability or interruption of operations.
33 DISCLAIMER OF LIABILITY
34 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
42 You should have received a copy of the GNU General Public License
43 along with this program; if not, write to the Free Software
44 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
49 * Define statements used for debugging
51 //#define MPT_LAN_IO_DEBUG
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
56 #include <linux/init.h>
57 #include <linux/module.h>
60 #define my_VERSION MPT_LINUX_VERSION_COMMON
61 #define MYNAM "mptlan"
63 MODULE_LICENSE("GPL");
64 MODULE_VERSION(my_VERSION);
66 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
68 * MPT LAN message sizes without variable part.
70 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
71 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
73 #define MPT_LAN_TRANSACTION32_SIZE \
74 (sizeof(SGETransaction32_t) - sizeof(u32))
77 * Fusion MPT LAN private structures
80 struct BufferControl {
88 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
90 atomic_t buckets_out; /* number of unused buckets on IOC */
91 int bucketthresh; /* Send more when this many left */
93 int *mpt_txfidx; /* Free Tx Context list */
95 spinlock_t txfidx_lock;
97 int *mpt_rxfidx; /* Free Rx Context list */
99 spinlock_t rxfidx_lock;
101 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
102 struct BufferControl *SendCtl; /* Send BufferControl structs */
104 int max_buckets_out; /* Max buckets to send to IOC */
105 int tx_max_out; /* IOC's Tx queue len */
109 struct net_device_stats stats; /* Per device statistics */
111 struct delayed_work post_buckets_task;
112 struct net_device *dev;
113 unsigned long post_buckets_active;
116 struct mpt_lan_ohdr {
123 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
128 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
129 MPT_FRAME_HDR *reply);
130 static int mpt_lan_open(struct net_device *dev);
131 static int mpt_lan_reset(struct net_device *dev);
132 static int mpt_lan_close(struct net_device *dev);
133 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
134 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
136 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
137 static int mpt_lan_receive_post_reply(struct net_device *dev,
138 LANReceivePostReply_t *pRecvRep);
139 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
140 static int mpt_lan_send_reply(struct net_device *dev,
141 LANSendReply_t *pSendRep);
142 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
143 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
144 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
145 struct net_device *dev);
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
149 * Fusion MPT LAN private data
151 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
153 static u32 max_buckets_out = 127;
154 static u32 tx_max_out_p = 127 - 16;
156 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
158 * lan_reply - Handle all data sent from the hardware.
159 * @ioc: Pointer to MPT_ADAPTER structure
160 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
161 * @reply: Pointer to MPT reply frame
163 * Returns 1 indicating original alloc'd request frame ptr
164 * should be freed, or 0 if it shouldn't.
167 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
169 struct net_device *dev = ioc->netdev;
170 int FreeReqFrame = 0;
172 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
173 IOC_AND_NETDEV_NAMES_s_s(dev)));
175 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
179 u32 tmsg = CAST_PTR_TO_U32(reply);
181 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
182 IOC_AND_NETDEV_NAMES_s_s(dev),
185 switch (GET_LAN_FORM(tmsg)) {
187 // NOTE! (Optimization) First case here is now caught in
188 // mptbase.c::mpt_interrupt() routine and callcack here
189 // is now skipped for this case!
191 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
192 // dioprintk((KERN_INFO MYNAM "/lan_reply: "
193 // "MessageContext turbo reply received\n"));
198 case LAN_REPLY_FORM_SEND_SINGLE:
199 // dioprintk((MYNAM "/lan_reply: "
200 // "calling mpt_lan_send_reply (turbo)\n"));
202 // Potential BUG here?
203 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
204 // If/when mpt_lan_send_turbo would return 1 here,
205 // calling routine (mptbase.c|mpt_interrupt)
206 // would Oops because mf has already been set
207 // to NULL. So after return from this func,
208 // mpt_interrupt() will attempt to put (NULL) mf ptr
209 // item back onto its adapter FreeQ - Oops!:-(
210 // It's Ok, since mpt_lan_send_turbo() *currently*
211 // always returns 0, but..., just in case:
213 (void) mpt_lan_send_turbo(dev, tmsg);
218 case LAN_REPLY_FORM_RECEIVE_SINGLE:
219 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
220 // "rcv-Turbo = %08x\n", tmsg));
221 mpt_lan_receive_post_turbo(dev, tmsg);
225 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
226 "that I don't know what to do with\n");
228 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
236 // msg = (u32 *) reply;
237 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
238 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
239 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
240 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
241 // reply->u.hdr.Function));
243 switch (reply->u.hdr.Function) {
245 case MPI_FUNCTION_LAN_SEND:
247 LANSendReply_t *pSendRep;
249 pSendRep = (LANSendReply_t *) reply;
250 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
254 case MPI_FUNCTION_LAN_RECEIVE:
256 LANReceivePostReply_t *pRecvRep;
258 pRecvRep = (LANReceivePostReply_t *) reply;
259 if (pRecvRep->NumberOfContexts) {
260 mpt_lan_receive_post_reply(dev, pRecvRep);
261 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
264 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
265 "ReceivePostReply received.\n"));
269 case MPI_FUNCTION_LAN_RESET:
270 /* Just a default reply. Might want to check it to
271 * make sure that everything went ok.
276 case MPI_FUNCTION_EVENT_NOTIFICATION:
277 case MPI_FUNCTION_EVENT_ACK:
278 /* _EVENT_NOTIFICATION should NOT come down this path any more.
279 * Should be routed to mpt_lan_event_process(), but just in case...
285 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
286 "reply that I don't know what to do with\n");
288 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
297 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
299 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
301 struct net_device *dev = ioc->netdev;
302 struct mpt_lan_priv *priv;
307 priv = netdev_priv(dev);
309 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
310 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
311 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
313 if (priv->mpt_rxfidx == NULL)
316 if (reset_phase == MPT_IOC_SETUP_RESET) {
318 } else if (reset_phase == MPT_IOC_PRE_RESET) {
322 netif_stop_queue(dev);
324 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
326 atomic_set(&priv->buckets_out, 0);
328 /* Reset Rx Free Tail index and re-populate the queue. */
329 spin_lock_irqsave(&priv->rxfidx_lock, flags);
330 priv->mpt_rxfidx_tail = -1;
331 for (i = 0; i < priv->max_buckets_out; i++)
332 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
333 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
335 mpt_lan_post_receive_buckets(priv);
336 netif_wake_queue(dev);
342 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
344 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
346 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
348 switch (le32_to_cpu(pEvReply->Event)) {
349 case MPI_EVENT_NONE: /* 00 */
350 case MPI_EVENT_LOG_DATA: /* 01 */
351 case MPI_EVENT_STATE_CHANGE: /* 02 */
352 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
353 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
354 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
355 case MPI_EVENT_RESCAN: /* 06 */
356 /* Ok, do we need to do anything here? As far as
357 I can tell, this is when a new device gets added
359 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
360 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
361 case MPI_EVENT_LOGOUT: /* 09 */
362 case MPI_EVENT_EVENT_CHANGE: /* 0A */
368 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
369 * Do NOT do it here now!
375 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
377 mpt_lan_open(struct net_device *dev)
379 struct mpt_lan_priv *priv = netdev_priv(dev);
382 if (mpt_lan_reset(dev) != 0) {
383 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
385 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
388 printk ("The ioc is active. Perhaps it needs to be"
391 printk ("The ioc in inactive, most likely in the "
392 "process of being reset. Please try again in "
396 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
397 if (priv->mpt_txfidx == NULL)
399 priv->mpt_txfidx_tail = -1;
401 priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
403 if (priv->SendCtl == NULL)
405 for (i = 0; i < priv->tx_max_out; i++)
406 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
408 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
410 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
412 if (priv->mpt_rxfidx == NULL)
414 priv->mpt_rxfidx_tail = -1;
416 priv->RcvCtl = kcalloc(priv->max_buckets_out,
417 sizeof(struct BufferControl),
419 if (priv->RcvCtl == NULL)
421 for (i = 0; i < priv->max_buckets_out; i++)
422 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
424 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
425 /**/ for (i = 0; i < priv->tx_max_out; i++)
426 /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
427 /**/ dlprintk(("\n"));
429 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
431 mpt_lan_post_receive_buckets(priv);
432 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
433 IOC_AND_NETDEV_NAMES_s_s(dev));
435 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
436 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
437 " Notifications. This is a bad thing! We're not going "
438 "to go ahead, but I'd be leery of system stability at "
442 netif_start_queue(dev);
443 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
447 kfree(priv->mpt_rxfidx);
448 priv->mpt_rxfidx = NULL;
450 kfree(priv->SendCtl);
451 priv->SendCtl = NULL;
453 kfree(priv->mpt_txfidx);
454 priv->mpt_txfidx = NULL;
458 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
459 /* Send a LanReset message to the FW. This should result in the FW returning
460 any buckets it still has. */
462 mpt_lan_reset(struct net_device *dev)
465 LANResetRequest_t *pResetReq;
466 struct mpt_lan_priv *priv = netdev_priv(dev);
468 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
471 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
472 "Unable to allocate a request frame.\n"));
477 pResetReq = (LANResetRequest_t *) mf;
479 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
480 pResetReq->ChainOffset = 0;
481 pResetReq->Reserved = 0;
482 pResetReq->PortNumber = priv->pnum;
483 pResetReq->MsgFlags = 0;
484 pResetReq->Reserved2 = 0;
486 mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
491 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
493 mpt_lan_close(struct net_device *dev)
495 struct mpt_lan_priv *priv = netdev_priv(dev);
496 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
497 unsigned long timeout;
500 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
502 mpt_event_deregister(LanCtx);
504 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
505 "since driver was loaded, %d still out\n",
506 priv->total_posted,atomic_read(&priv->buckets_out)));
508 netif_stop_queue(dev);
512 timeout = jiffies + 2 * HZ;
513 while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
514 schedule_timeout_interruptible(1);
516 for (i = 0; i < priv->max_buckets_out; i++) {
517 if (priv->RcvCtl[i].skb != NULL) {
518 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
519 /**/ "is still out\n", i));
520 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
523 dev_kfree_skb(priv->RcvCtl[i].skb);
528 kfree(priv->mpt_rxfidx);
530 for (i = 0; i < priv->tx_max_out; i++) {
531 if (priv->SendCtl[i].skb != NULL) {
532 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
533 priv->SendCtl[i].len,
535 dev_kfree_skb(priv->SendCtl[i].skb);
539 kfree(priv->SendCtl);
540 kfree(priv->mpt_txfidx);
542 atomic_set(&priv->buckets_out, 0);
544 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
545 IOC_AND_NETDEV_NAMES_s_s(dev));
550 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
551 static struct net_device_stats *
552 mpt_lan_get_stats(struct net_device *dev)
554 struct mpt_lan_priv *priv = netdev_priv(dev);
556 return (struct net_device_stats *) &priv->stats;
559 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
561 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
563 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
569 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
570 /* Tx timeout handler. */
572 mpt_lan_tx_timeout(struct net_device *dev)
574 struct mpt_lan_priv *priv = netdev_priv(dev);
575 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
577 if (mpt_dev->active) {
578 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
579 netif_wake_queue(dev);
583 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
586 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
588 struct mpt_lan_priv *priv = netdev_priv(dev);
589 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
590 struct sk_buff *sent;
594 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
595 sent = priv->SendCtl[ctx].skb;
597 priv->stats.tx_packets++;
598 priv->stats.tx_bytes += sent->len;
600 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
601 IOC_AND_NETDEV_NAMES_s_s(dev),
604 priv->SendCtl[ctx].skb = NULL;
605 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
606 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
607 dev_kfree_skb_irq(sent);
609 spin_lock_irqsave(&priv->txfidx_lock, flags);
610 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
611 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
613 netif_wake_queue(dev);
617 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
619 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
621 struct mpt_lan_priv *priv = netdev_priv(dev);
622 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
623 struct sk_buff *sent;
625 int FreeReqFrame = 0;
630 count = pSendRep->NumberOfContexts;
632 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
633 le16_to_cpu(pSendRep->IOCStatus)));
635 /* Add check for Loginfo Flag in IOCStatus */
637 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
638 case MPI_IOCSTATUS_SUCCESS:
639 priv->stats.tx_packets += count;
642 case MPI_IOCSTATUS_LAN_CANCELED:
643 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
646 case MPI_IOCSTATUS_INVALID_SGL:
647 priv->stats.tx_errors += count;
648 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
649 IOC_AND_NETDEV_NAMES_s_s(dev));
653 priv->stats.tx_errors += count;
657 pContext = &pSendRep->BufferContext;
659 spin_lock_irqsave(&priv->txfidx_lock, flags);
661 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
663 sent = priv->SendCtl[ctx].skb;
664 priv->stats.tx_bytes += sent->len;
666 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
667 IOC_AND_NETDEV_NAMES_s_s(dev),
670 priv->SendCtl[ctx].skb = NULL;
671 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
672 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
673 dev_kfree_skb_irq(sent);
675 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
680 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
683 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
686 netif_wake_queue(dev);
690 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
692 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
694 struct mpt_lan_priv *priv = netdev_priv(dev);
695 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
697 LANSendRequest_t *pSendReq;
698 SGETransaction32_t *pTrans;
699 SGESimple64_t *pSimple;
700 const unsigned char *mac;
704 u16 cur_naa = 0x1000;
706 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
709 spin_lock_irqsave(&priv->txfidx_lock, flags);
710 if (priv->mpt_txfidx_tail < 0) {
711 netif_stop_queue(dev);
712 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
714 printk (KERN_ERR "%s: no tx context available: %u\n",
715 __func__, priv->mpt_txfidx_tail);
719 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
721 netif_stop_queue(dev);
722 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
724 printk (KERN_ERR "%s: Unable to alloc request frame\n",
729 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
730 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
732 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
733 // IOC_AND_NETDEV_NAMES_s_s(dev)));
735 pSendReq = (LANSendRequest_t *) mf;
737 /* Set the mac.raw pointer, since this apparently isn't getting
738 * done before we get the skb. Pull the data pointer past the mac data.
740 skb_reset_mac_header(skb);
743 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
746 priv->SendCtl[ctx].skb = skb;
747 priv->SendCtl[ctx].dma = dma;
748 priv->SendCtl[ctx].len = skb->len;
751 pSendReq->Reserved = 0;
752 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
753 pSendReq->ChainOffset = 0;
754 pSendReq->Reserved2 = 0;
755 pSendReq->MsgFlags = 0;
756 pSendReq->PortNumber = priv->pnum;
758 /* Transaction Context Element */
759 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
761 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
762 pTrans->ContextSize = sizeof(u32);
763 pTrans->DetailsLength = 2 * sizeof(u32);
765 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
767 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
768 // IOC_AND_NETDEV_NAMES_s_s(dev),
769 // ctx, skb, skb->data));
771 mac = skb_mac_header(skb);
773 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
776 pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
781 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
783 /* If we ever decide to send more than one Simple SGE per LANSend, then
784 we will need to make sure that LAST_ELEMENT only gets set on the
785 last one. Otherwise, bad voodoo and evil funkiness will commence. */
786 pSimple->FlagsLength = cpu_to_le32(
787 ((MPI_SGE_FLAGS_LAST_ELEMENT |
788 MPI_SGE_FLAGS_END_OF_BUFFER |
789 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
790 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
791 MPI_SGE_FLAGS_HOST_TO_IOC |
792 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
793 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
795 pSimple->Address.Low = cpu_to_le32((u32) dma);
796 if (sizeof(dma_addr_t) > sizeof(u32))
797 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
799 pSimple->Address.High = 0;
801 mpt_put_msg_frame (LanCtx, mpt_dev, mf);
802 dev->trans_start = jiffies;
804 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
805 IOC_AND_NETDEV_NAMES_s_s(dev),
806 le32_to_cpu(pSimple->FlagsLength)));
811 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
813 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
815 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
818 struct mpt_lan_priv *priv = netdev_priv(dev);
820 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
822 schedule_delayed_work(&priv->post_buckets_task, 0);
824 schedule_delayed_work(&priv->post_buckets_task, 1);
825 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
828 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
829 IOC_AND_NETDEV_NAMES_s_s(dev) ));
833 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
835 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
837 struct mpt_lan_priv *priv = netdev_priv(dev);
839 skb->protocol = mpt_lan_type_trans(skb, dev);
841 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
842 "delivered to upper level.\n",
843 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
845 priv->stats.rx_bytes += skb->len;
846 priv->stats.rx_packets++;
851 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
852 atomic_read(&priv->buckets_out)));
854 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
855 mpt_lan_wake_post_buckets_task(dev, 1);
857 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
858 "remaining, %d received back since sod\n",
859 atomic_read(&priv->buckets_out), priv->total_received));
864 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
867 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
869 struct mpt_lan_priv *priv = netdev_priv(dev);
870 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
871 struct sk_buff *skb, *old_skb;
875 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
876 skb = priv->RcvCtl[ctx].skb;
878 len = GET_LAN_PACKET_LENGTH(tmsg);
880 if (len < MPT_LAN_RX_COPYBREAK) {
883 skb = (struct sk_buff *)dev_alloc_skb(len);
885 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
886 IOC_AND_NETDEV_NAMES_s_s(dev),
891 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
892 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
894 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
896 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
897 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
903 priv->RcvCtl[ctx].skb = NULL;
905 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
906 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
909 spin_lock_irqsave(&priv->rxfidx_lock, flags);
910 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
911 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
913 atomic_dec(&priv->buckets_out);
914 priv->total_received++;
916 return mpt_lan_receive_skb(dev, skb);
919 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
921 mpt_lan_receive_post_free(struct net_device *dev,
922 LANReceivePostReply_t *pRecvRep)
924 struct mpt_lan_priv *priv = netdev_priv(dev);
925 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
932 count = pRecvRep->NumberOfContexts;
934 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
935 "IOC returned %d buckets, freeing them...\n", count));
937 spin_lock_irqsave(&priv->rxfidx_lock, flags);
938 for (i = 0; i < count; i++) {
939 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
941 skb = priv->RcvCtl[ctx].skb;
943 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
944 // IOC_AND_NETDEV_NAMES_s_s(dev)));
945 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
946 // priv, &(priv->buckets_out)));
947 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
949 priv->RcvCtl[ctx].skb = NULL;
950 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
951 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
952 dev_kfree_skb_any(skb);
954 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
956 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
958 atomic_sub(count, &priv->buckets_out);
960 // for (i = 0; i < priv->max_buckets_out; i++)
961 // if (priv->RcvCtl[i].skb != NULL)
962 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
963 // "is still out\n", i));
965 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
968 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
969 /**/ "remaining, %d received back since sod.\n",
970 /**/ atomic_read(&priv->buckets_out), priv->total_received));
974 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
976 mpt_lan_receive_post_reply(struct net_device *dev,
977 LANReceivePostReply_t *pRecvRep)
979 struct mpt_lan_priv *priv = netdev_priv(dev);
980 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
981 struct sk_buff *skb, *old_skb;
983 u32 len, ctx, offset;
984 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
988 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
989 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
990 le16_to_cpu(pRecvRep->IOCStatus)));
992 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
993 MPI_IOCSTATUS_LAN_CANCELED)
994 return mpt_lan_receive_post_free(dev, pRecvRep);
996 len = le32_to_cpu(pRecvRep->PacketLength);
998 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
999 "ReceivePostReply w/ PacketLength zero!\n",
1000 IOC_AND_NETDEV_NAMES_s_s(dev));
1001 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1002 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1006 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
1007 count = pRecvRep->NumberOfContexts;
1008 skb = priv->RcvCtl[ctx].skb;
1010 offset = le32_to_cpu(pRecvRep->PacketOffset);
1011 // if (offset != 0) {
1012 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1013 // "w/ PacketOffset %u\n",
1014 // IOC_AND_NETDEV_NAMES_s_s(dev),
1018 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1019 IOC_AND_NETDEV_NAMES_s_s(dev),
1025 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1026 // "for single packet, concatenating...\n",
1027 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1029 skb = (struct sk_buff *)dev_alloc_skb(len);
1031 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1032 IOC_AND_NETDEV_NAMES_s_s(dev),
1033 __FILE__, __LINE__);
1037 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1038 for (i = 0; i < count; i++) {
1040 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1041 old_skb = priv->RcvCtl[ctx].skb;
1043 l = priv->RcvCtl[ctx].len;
1047 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1048 // IOC_AND_NETDEV_NAMES_s_s(dev),
1051 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1052 priv->RcvCtl[ctx].dma,
1053 priv->RcvCtl[ctx].len,
1054 PCI_DMA_FROMDEVICE);
1055 skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1057 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1058 priv->RcvCtl[ctx].dma,
1059 priv->RcvCtl[ctx].len,
1060 PCI_DMA_FROMDEVICE);
1062 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1065 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1067 } else if (len < MPT_LAN_RX_COPYBREAK) {
1071 skb = (struct sk_buff *)dev_alloc_skb(len);
1073 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1074 IOC_AND_NETDEV_NAMES_s_s(dev),
1075 __FILE__, __LINE__);
1079 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1080 priv->RcvCtl[ctx].dma,
1081 priv->RcvCtl[ctx].len,
1082 PCI_DMA_FROMDEVICE);
1084 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1086 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1087 priv->RcvCtl[ctx].dma,
1088 priv->RcvCtl[ctx].len,
1089 PCI_DMA_FROMDEVICE);
1091 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1092 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1093 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1096 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1098 priv->RcvCtl[ctx].skb = NULL;
1100 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1101 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1102 priv->RcvCtl[ctx].dma = 0;
1104 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1105 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1110 atomic_sub(count, &priv->buckets_out);
1111 priv->total_received += count;
1113 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1114 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1115 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1116 IOC_AND_NETDEV_NAMES_s_s(dev),
1117 priv->mpt_rxfidx_tail,
1118 MPT_LAN_MAX_BUCKETS_OUT);
1124 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1125 "(priv->buckets_out = %d)\n",
1126 IOC_AND_NETDEV_NAMES_s_s(dev),
1127 atomic_read(&priv->buckets_out));
1128 else if (remaining < 10)
1129 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1130 "(priv->buckets_out = %d)\n",
1131 IOC_AND_NETDEV_NAMES_s_s(dev),
1132 remaining, atomic_read(&priv->buckets_out));
1134 if ((remaining < priv->bucketthresh) &&
1135 ((atomic_read(&priv->buckets_out) - remaining) >
1136 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1138 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1139 "buckets_out count and fw's BucketsRemaining "
1140 "count has crossed the threshold, issuing a "
1141 "LanReset to clear the fw's hashtable. You may "
1142 "want to check your /var/log/messages for \"CRC "
1143 "error\" event notifications.\n");
1146 mpt_lan_wake_post_buckets_task(dev, 0);
1149 return mpt_lan_receive_skb(dev, skb);
1152 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1153 /* Simple SGE's only at the moment */
1156 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1158 struct net_device *dev = priv->dev;
1159 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1161 LANReceivePostRequest_t *pRecvReq;
1162 SGETransaction32_t *pTrans;
1163 SGESimple64_t *pSimple;
1164 struct sk_buff *skb;
1166 u32 curr, buckets, count, max;
1167 u32 len = (dev->mtu + dev->hard_header_len + 4);
1168 unsigned long flags;
1171 curr = atomic_read(&priv->buckets_out);
1172 buckets = (priv->max_buckets_out - curr);
1174 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1175 IOC_AND_NETDEV_NAMES_s_s(dev),
1176 __func__, buckets, curr));
1178 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1179 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1182 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1184 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1186 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1187 __func__, buckets));
1190 pRecvReq = (LANReceivePostRequest_t *) mf;
1192 i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1193 mpt_dev->RequestNB[i] = 0;
1198 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1199 pRecvReq->ChainOffset = 0;
1200 pRecvReq->MsgFlags = 0;
1201 pRecvReq->PortNumber = priv->pnum;
1203 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1206 for (i = 0; i < count; i++) {
1209 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1210 if (priv->mpt_rxfidx_tail < 0) {
1211 printk (KERN_ERR "%s: Can't alloc context\n",
1213 spin_unlock_irqrestore(&priv->rxfidx_lock,
1218 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1220 skb = priv->RcvCtl[ctx].skb;
1221 if (skb && (priv->RcvCtl[ctx].len != len)) {
1222 pci_unmap_single(mpt_dev->pcidev,
1223 priv->RcvCtl[ctx].dma,
1224 priv->RcvCtl[ctx].len,
1225 PCI_DMA_FROMDEVICE);
1226 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1227 skb = priv->RcvCtl[ctx].skb = NULL;
1231 skb = dev_alloc_skb(len);
1233 printk (KERN_WARNING
1234 MYNAM "/%s: Can't alloc skb\n",
1236 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1237 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1241 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1242 len, PCI_DMA_FROMDEVICE);
1244 priv->RcvCtl[ctx].skb = skb;
1245 priv->RcvCtl[ctx].dma = dma;
1246 priv->RcvCtl[ctx].len = len;
1249 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1251 pTrans->ContextSize = sizeof(u32);
1252 pTrans->DetailsLength = 0;
1254 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1256 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1258 pSimple->FlagsLength = cpu_to_le32(
1259 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1260 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1261 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1262 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1263 if (sizeof(dma_addr_t) > sizeof(u32))
1264 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1266 pSimple->Address.High = 0;
1268 pTrans = (SGETransaction32_t *) (pSimple + 1);
1271 if (pSimple == NULL) {
1272 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1274 mpt_free_msg_frame(mpt_dev, mf);
1278 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1280 pRecvReq->BucketCount = cpu_to_le32(i);
1282 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1283 * for (i = 0; i < j + 2; i ++)
1284 * printk (" %08x", le32_to_cpu(msg[i]));
1288 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1290 priv->total_posted += i;
1292 atomic_add(i, &priv->buckets_out);
1296 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1297 __func__, buckets, atomic_read(&priv->buckets_out)));
1298 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1299 __func__, priv->total_posted, priv->total_received));
1301 clear_bit(0, &priv->post_buckets_active);
1305 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1307 mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1308 post_buckets_task.work));
1311 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1312 static struct net_device *
1313 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1315 struct net_device *dev;
1316 struct mpt_lan_priv *priv;
1317 u8 HWaddr[FC_ALEN], *a;
1319 dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1323 dev->mtu = MPT_LAN_MTU;
1325 priv = netdev_priv(dev);
1328 priv->mpt_dev = mpt_dev;
1331 INIT_DELAYED_WORK(&priv->post_buckets_task,
1332 mpt_lan_post_receive_buckets_work);
1333 priv->post_buckets_active = 0;
1335 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1336 __LINE__, dev->mtu + dev->hard_header_len + 4));
1338 atomic_set(&priv->buckets_out, 0);
1339 priv->total_posted = 0;
1340 priv->total_received = 0;
1341 priv->max_buckets_out = max_buckets_out;
1342 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1343 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1345 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1347 mpt_dev->pfacts[0].MaxLanBuckets,
1349 priv->max_buckets_out));
1351 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1352 spin_lock_init(&priv->txfidx_lock);
1353 spin_lock_init(&priv->rxfidx_lock);
1355 /* Grab pre-fetched LANPage1 stuff. :-) */
1356 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1365 dev->addr_len = FC_ALEN;
1366 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1367 memset(dev->broadcast, 0xff, FC_ALEN);
1369 /* The Tx queue is 127 deep on the 909.
1370 * Give ourselves some breathing room.
1372 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1373 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1375 dev->open = mpt_lan_open;
1376 dev->stop = mpt_lan_close;
1377 dev->get_stats = mpt_lan_get_stats;
1378 dev->set_multicast_list = NULL;
1379 dev->change_mtu = mpt_lan_change_mtu;
1380 dev->hard_start_xmit = mpt_lan_sdu_send;
1382 /* Not in 2.3.42. Need 2.3.45+ */
1383 dev->tx_timeout = mpt_lan_tx_timeout;
1384 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1386 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1387 "and setting initial values\n"));
1389 if (register_netdev(dev) != 0) {
1397 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1399 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1400 struct net_device *dev;
1403 for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1404 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1405 "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1406 ioc->name, ioc->pfacts[i].PortNumber,
1407 ioc->pfacts[i].ProtocolFlags,
1408 MPT_PROTOCOL_FLAGS_c_c_c_c(
1409 ioc->pfacts[i].ProtocolFlags));
1411 if (!(ioc->pfacts[i].ProtocolFlags &
1412 MPI_PORTFACTS_PROTOCOL_LAN)) {
1413 printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1414 "seems to be disabled on this adapter port!\n",
1419 dev = mpt_register_lan_device(ioc, i);
1421 printk(KERN_ERR MYNAM ": %s: Unable to register "
1422 "port%d as a LAN device\n", ioc->name,
1423 ioc->pfacts[i].PortNumber);
1427 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1428 "registered as '%s'\n", ioc->name, dev->name);
1429 printk(KERN_INFO MYNAM ": %s/%s: "
1431 IOC_AND_NETDEV_NAMES_s_s(dev),
1443 mptlan_remove(struct pci_dev *pdev)
1445 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1446 struct net_device *dev = ioc->netdev;
1449 unregister_netdev(dev);
1454 static struct mpt_pci_driver mptlan_driver = {
1455 .probe = mptlan_probe,
1456 .remove = mptlan_remove,
1459 static int __init mpt_lan_init (void)
1461 show_mptmod_ver(LANAME, LANVER);
1463 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1464 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1468 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1470 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1471 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1472 "handler with mptbase! The world is at an end! "
1473 "Everything is fading to black! Goodbye.\n");
1477 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1479 mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1483 static void __exit mpt_lan_exit(void)
1485 mpt_device_driver_deregister(MPTLAN_DRIVER);
1486 mpt_reset_deregister(LanCtx);
1489 mpt_deregister(LanCtx);
1490 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1494 module_init(mpt_lan_init);
1495 module_exit(mpt_lan_exit);
1497 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1498 static unsigned short
1499 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1501 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1502 struct fcllc *fcllc;
1504 skb_reset_mac_header(skb);
1505 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1507 if (fch->dtype == htons(0xffff)) {
1508 u32 *p = (u32 *) fch;
1515 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1516 NETDEV_PTR_TO_IOC_NAME_s(dev));
1517 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1521 if (*fch->daddr & 1) {
1522 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1523 skb->pkt_type = PACKET_BROADCAST;
1525 skb->pkt_type = PACKET_MULTICAST;
1528 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1529 skb->pkt_type = PACKET_OTHERHOST;
1531 skb->pkt_type = PACKET_HOST;
1535 fcllc = (struct fcllc *)skb->data;
1537 /* Strip the SNAP header from ARP packets since we don't
1538 * pass them through to the 802.2/SNAP layers.
1540 if (fcllc->dsap == EXTENDED_SAP &&
1541 (fcllc->ethertype == htons(ETH_P_IP) ||
1542 fcllc->ethertype == htons(ETH_P_ARP))) {
1543 skb_pull(skb, sizeof(struct fcllc));
1544 return fcllc->ethertype;
1547 return htons(ETH_P_802_2);
1550 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/