3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED
\93AS IS
\94 AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
76 #include <linux/delay.h>
78 #include <asm/system.h>
79 #include <asm/bitops.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
93 #include "et131x_isr.h"
95 #include "et1310_tx.h"
97 /* Data for debugging facilities */
98 #ifdef CONFIG_ET131X_DEBUG
99 extern dbg_info_t *et131x_dbginfo;
100 #endif /* CONFIG_ET131X_DEBUG */
102 static void et131x_update_tcb_list(struct et131x_adapter *pAdapter);
103 static void et131x_check_send_wait_list(struct et131x_adapter *pAdapter);
104 static inline void et131x_free_send_packet(struct et131x_adapter *pAdapter,
106 static int et131x_send_packet(struct sk_buff *skb,
107 struct et131x_adapter *pAdapter);
108 static int nic_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb);
111 * et131x_tx_dma_memory_alloc
112 * @adapter: pointer to our private adapter structure
114 * Returns 0 on success and errno on failure (as defined in errno.h).
116 * Allocates memory that will be visible both to the device and to the CPU.
117 * The OS will pass us packets, pointers to which we will insert in the Tx
118 * Descriptor queue. The device will read this queue to find the packets in
119 * memory. The device will update the "status" in memory each time it xmits a
122 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
125 TX_RING_t *tx_ring = &adapter->TxRing;
127 DBG_ENTER(et131x_dbginfo);
129 /* Allocate memory for the TCB's (Transmit Control Block) */
130 adapter->TxRing.MpTcbMem = (MP_TCB *) kcalloc(NUM_TCB, sizeof(MP_TCB),
131 GFP_ATOMIC | GFP_DMA);
132 if (!adapter->TxRing.MpTcbMem) {
133 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
134 DBG_LEAVE(et131x_dbginfo);
138 /* Allocate enough memory for the Tx descriptor ring, and allocate
139 * some extra so that the ring can be aligned on a 4k boundary.
141 desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
142 tx_ring->pTxDescRingVa =
143 (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
144 &tx_ring->pTxDescRingPa);
145 if (!adapter->TxRing.pTxDescRingVa) {
146 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
147 DBG_LEAVE(et131x_dbginfo);
151 /* Save physical address
153 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
154 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
155 * are ever returned, make sure the high part is retrieved here before
156 * storing the adjusted address.
158 tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
160 /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
161 et131x_align_allocated_memory(adapter,
162 &tx_ring->pTxDescRingAdjustedPa,
163 &tx_ring->TxDescOffset, 0x0FFF);
165 tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
167 /* Allocate memory for the Tx status block */
168 tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
169 sizeof(TX_STATUS_BLOCK_t),
170 &tx_ring->pTxStatusPa);
171 if (!adapter->TxRing.pTxStatusPa) {
172 DBG_ERROR(et131x_dbginfo,
173 "Cannot alloc memory for Tx status block\n");
174 DBG_LEAVE(et131x_dbginfo);
178 /* Allocate memory for a dummy buffer */
179 tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
181 &tx_ring->pTxDummyBlkPa);
182 if (!adapter->TxRing.pTxDummyBlkPa) {
183 DBG_ERROR(et131x_dbginfo,
184 "Cannot alloc memory for Tx dummy buffer\n");
185 DBG_LEAVE(et131x_dbginfo);
189 DBG_LEAVE(et131x_dbginfo);
194 * et131x_tx_dma_memory_free - Free all memory allocated within this module
195 * @adapter: pointer to our private adapter structure
197 * Returns 0 on success and errno on failure (as defined in errno.h).
199 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
203 DBG_ENTER(et131x_dbginfo);
205 if (adapter->TxRing.pTxDescRingVa) {
206 /* Free memory relating to Tx rings here */
207 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
210 (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
212 pci_free_consistent(adapter->pdev,
214 adapter->TxRing.pTxDescRingVa,
215 adapter->TxRing.pTxDescRingPa);
217 adapter->TxRing.pTxDescRingVa = NULL;
220 /* Free memory for the Tx status block */
221 if (adapter->TxRing.pTxStatusVa) {
222 pci_free_consistent(adapter->pdev,
223 sizeof(TX_STATUS_BLOCK_t),
224 adapter->TxRing.pTxStatusVa,
225 adapter->TxRing.pTxStatusPa);
227 adapter->TxRing.pTxStatusVa = NULL;
230 /* Free memory for the dummy buffer */
231 if (adapter->TxRing.pTxDummyBlkVa) {
232 pci_free_consistent(adapter->pdev,
234 adapter->TxRing.pTxDummyBlkVa,
235 adapter->TxRing.pTxDummyBlkPa);
237 adapter->TxRing.pTxDummyBlkVa = NULL;
240 /* Free the memory for MP_TCB structures */
241 if (adapter->TxRing.MpTcbMem) {
242 kfree(adapter->TxRing.MpTcbMem);
243 adapter->TxRing.MpTcbMem = NULL;
246 DBG_LEAVE(et131x_dbginfo);
250 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
251 * @adapter: pointer to our private adapter structure
253 void ConfigTxDmaRegs(struct et131x_adapter *pAdapter)
255 struct _TXDMA_t __iomem *pTxDma = &pAdapter->CSRAddress->txdma;
257 DBG_ENTER(et131x_dbginfo);
259 /* Load the hardware with the start of the transmit descriptor ring. */
260 writel((uint32_t) (pAdapter->TxRing.pTxDescRingAdjustedPa >> 32),
261 &pTxDma->pr_base_hi);
262 writel((uint32_t) pAdapter->TxRing.pTxDescRingAdjustedPa,
263 &pTxDma->pr_base_lo);
265 /* Initialise the transmit DMA engine */
266 writel(NUM_DESC_PER_RING_TX - 1, &pTxDma->pr_num_des.value);
268 /* Load the completion writeback physical address
270 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
271 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
272 * are ever returned, make sure the high part is retrieved here before
273 * storing the adjusted address.
275 writel(0, &pTxDma->dma_wb_base_hi);
276 writel(pAdapter->TxRing.pTxStatusPa, &pTxDma->dma_wb_base_lo);
278 memset(pAdapter->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
280 writel(0, &pTxDma->service_request.value);
281 pAdapter->TxRing.txDmaReadyToSend.value = 0;
283 DBG_LEAVE(et131x_dbginfo);
287 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
288 * @pAdapter: pointer to our adapter structure
290 void et131x_tx_dma_disable(struct et131x_adapter *pAdapter)
292 DBG_ENTER(et131x_dbginfo);
294 /* Setup the tramsmit dma configuration register */
295 writel(0x101, &pAdapter->CSRAddress->txdma.csr.value);
297 DBG_LEAVE(et131x_dbginfo);
301 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
302 * @pAdapter: pointer to our adapter structure
304 * Mainly used after a return to the D0 (full-power) state from a lower state.
306 void et131x_tx_dma_enable(struct et131x_adapter *pAdapter)
308 DBG_ENTER(et131x_dbginfo);
310 if (pAdapter->RegistryPhyLoopbk) {
311 /* TxDMA is disabled for loopback operation. */
312 writel(0x101, &pAdapter->CSRAddress->txdma.csr.value);
314 TXDMA_CSR_t csr = { 0 };
316 /* Setup the transmit dma configuration register for normal
319 csr.bits.sngl_epkt_mode = 1;
321 csr.bits.cache_thrshld = pAdapter->RegistryDMACache;
322 writel(csr.value, &pAdapter->CSRAddress->txdma.csr.value);
325 DBG_LEAVE(et131x_dbginfo);
329 * et131x_init_send - Initialize send data structures
330 * @adapter: pointer to our private adapter structure
332 void et131x_init_send(struct et131x_adapter *adapter)
338 DBG_ENTER(et131x_dbginfo);
340 /* Setup some convenience pointers */
341 tx_ring = &adapter->TxRing;
342 pMpTcb = adapter->TxRing.MpTcbMem;
344 tx_ring->TCBReadyQueueHead = pMpTcb;
346 /* Go through and set up each TCB */
347 for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
348 memset(pMpTcb, 0, sizeof(MP_TCB));
350 /* Set the link pointer in HW TCB to the next TCB in the
351 * chain. If this is the last TCB in the chain, also set the
354 if (TcbCount < NUM_TCB - 1) {
355 pMpTcb->Next = pMpTcb + 1;
357 tx_ring->TCBReadyQueueTail = pMpTcb;
358 pMpTcb->Next = (PMP_TCB) NULL;
364 /* Curr send queue should now be empty */
365 tx_ring->CurrSendHead = (PMP_TCB) NULL;
366 tx_ring->CurrSendTail = (PMP_TCB) NULL;
368 INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
370 DBG_LEAVE(et131x_dbginfo);
374 * et131x_send_packets - This function is called by the OS to send packets
375 * @skb: the packet(s) to send
376 * @netdev:device on which to TX the above packet(s)
378 * Return 0 in almost all cases; non-zero value in extreme hard failure only
380 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
383 struct et131x_adapter *pAdapter = NULL;
385 DBG_TX_ENTER(et131x_dbginfo);
387 pAdapter = netdev_priv(netdev);
389 /* Send these packets
391 * NOTE: The Linux Tx entry point is only given one packet at a time
392 * to Tx, so the PacketCount and it's array used makes no sense here
395 /* Queue is not empty or TCB is not available */
396 if (!list_empty(&pAdapter->TxRing.SendWaitQueue) ||
397 MP_TCB_RESOURCES_NOT_AVAILABLE(pAdapter)) {
398 /* NOTE: If there's an error on send, no need to queue the
399 * packet under Linux; if we just send an error up to the
400 * netif layer, it will resend the skb to us.
402 DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
405 /* We need to see if the link is up; if it's not, make the
406 * netif layer think we're good and drop the packet
408 //if( MP_SHOULD_FAIL_SEND( pAdapter ) || pAdapter->DriverNoPhyAccess )
409 if (MP_SHOULD_FAIL_SEND(pAdapter) || pAdapter->DriverNoPhyAccess
410 || !netif_carrier_ok(netdev)) {
411 DBG_VERBOSE(et131x_dbginfo,
412 "Can't Tx, Link is DOWN; drop the packet\n");
414 dev_kfree_skb_any(skb);
417 pAdapter->net_stats.tx_dropped++;
419 status = et131x_send_packet(skb, pAdapter);
421 if (status == -ENOMEM) {
423 /* NOTE: If there's an error on send, no need
424 * to queue the packet under Linux; if we just
425 * send an error up to the netif layer, it
426 * will resend the skb to us.
428 DBG_WARNING(et131x_dbginfo,
429 "Resources problem, Queue tx packet\n");
430 } else if (status != 0) {
431 /* On any other error, make netif think we're
432 * OK and drop the packet
434 DBG_WARNING(et131x_dbginfo,
435 "General error, drop packet\n");
437 dev_kfree_skb_any(skb);
440 pAdapter->net_stats.tx_dropped++;
445 DBG_TX_LEAVE(et131x_dbginfo);
450 * et131x_send_packet - Do the work to send a packet
451 * @skb: the packet(s) to send
452 * @pAdapter: a pointer to the device's private adapter structure
454 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
456 * Assumption: Send spinlock has been acquired
458 static int et131x_send_packet(struct sk_buff *skb,
459 struct et131x_adapter *pAdapter)
462 PMP_TCB pMpTcb = NULL;
464 unsigned long lockflags;
466 DBG_TX_ENTER(et131x_dbginfo);
468 /* Is our buffer scattered, or continuous? */
469 if (skb_shinfo(skb)->nr_frags == 0) {
470 DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
472 DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
473 skb_shinfo(skb)->nr_frags);
476 /* All packets must have at least a MAC address and a protocol type */
477 if (skb->len < ETH_HLEN) {
478 DBG_ERROR(et131x_dbginfo,
479 "Packet size < ETH_HLEN (14 bytes)\n");
480 DBG_LEAVE(et131x_dbginfo);
484 /* Get a TCB for this packet */
485 spin_lock_irqsave(&pAdapter->TCBReadyQLock, lockflags);
487 pMpTcb = pAdapter->TxRing.TCBReadyQueueHead;
489 if (pMpTcb == NULL) {
490 spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
492 DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
493 DBG_TX_LEAVE(et131x_dbginfo);
497 pAdapter->TxRing.TCBReadyQueueHead = pMpTcb->Next;
499 if (pAdapter->TxRing.TCBReadyQueueHead == NULL) {
500 pAdapter->TxRing.TCBReadyQueueTail = NULL;
503 spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
505 pMpTcb->PacketLength = skb->len;
506 pMpTcb->Packet = skb;
508 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
509 pShBufVa = (uint16_t *) skb->data;
511 if ((pShBufVa[0] == 0xffff) &&
512 (pShBufVa[1] == 0xffff) && (pShBufVa[2] == 0xffff)) {
513 MP_SET_FLAG(pMpTcb, fMP_DEST_BROAD);
514 } else if ((pShBufVa[0] & 0x3) == 0x0001) {
515 MP_SET_FLAG(pMpTcb, fMP_DEST_MULTI);
521 /* Call the NIC specific send handler. */
523 status = nic_send_packet(pAdapter, pMpTcb);
527 spin_lock_irqsave(&pAdapter->TCBReadyQLock, lockflags);
529 if (pAdapter->TxRing.TCBReadyQueueTail) {
530 pAdapter->TxRing.TCBReadyQueueTail->Next = pMpTcb;
532 /* Apparently ready Q is empty. */
533 pAdapter->TxRing.TCBReadyQueueHead = pMpTcb;
536 pAdapter->TxRing.TCBReadyQueueTail = pMpTcb;
538 spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
540 DBG_TX_LEAVE(et131x_dbginfo);
544 DBG_ASSERT(pAdapter->TxRing.nBusySend <= NUM_TCB);
546 DBG_TX_LEAVE(et131x_dbginfo);
551 * nic_send_packet - NIC specific send handler for version B silicon.
552 * @pAdapter: pointer to our adapter
553 * @pMpTcb: pointer to MP_TCB
555 * Returns 0 or errno.
557 static int nic_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb)
560 TX_DESC_ENTRY_t CurDesc[24];
561 uint32_t FragmentNumber = 0;
562 uint32_t iThisCopy, iRemainder;
563 struct sk_buff *pPacket = pMpTcb->Packet;
564 uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
565 struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
566 unsigned long lockflags1, lockflags2;
568 DBG_TX_ENTER(et131x_dbginfo);
570 /* Part of the optimizations of this send routine restrict us to
571 * sending 24 fragments at a pass. In practice we should never see
572 * more than 5 fragments.
574 * NOTE: The older version of this function (below) can handle any
575 * number of fragments. If needed, we can call this function,
576 * although it is less efficient.
578 if (FragListCount > 23) {
579 DBG_TX_LEAVE(et131x_dbginfo);
583 memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
585 for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
586 /* If there is something in this element, lets get a
587 * descriptor from the ring and get the necessary data
589 if (loopIndex == 0) {
590 /* If the fragments are smaller than a standard MTU,
591 * then map them to a single descriptor in the Tx
592 * Desc ring. However, if they're larger, as is
593 * possible with support for jumbo packets, then
594 * split them each across 2 descriptors.
596 * This will work until we determine why the hardware
597 * doesn't seem to like large fragments.
599 if ((pPacket->len - pPacket->data_len) <= 1514) {
600 DBG_TX(et131x_dbginfo,
601 "Got packet of length %d, "
602 "filling desc entry %d, "
604 (pPacket->len - pPacket->data_len),
605 pAdapter->TxRing.txDmaReadyToSend.bits.
608 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
610 CurDesc[FragmentNumber].word2.bits.
612 pPacket->len - pPacket->data_len;
614 /* NOTE: Here, the dma_addr_t returned from
615 * pci_map_single() is implicitly cast as a
616 * uint32_t. Although dma_addr_t can be
617 * 64-bit, the address returned by
618 * pci_map_single() is always 32-bit
619 * addressable (as defined by the pci/dma
622 CurDesc[FragmentNumber++].DataBufferPtrLow =
623 pci_map_single(pAdapter->pdev,
629 DBG_TX(et131x_dbginfo,
630 "Got packet of length %d, "
631 "filling desc entry %d, "
633 (pPacket->len - pPacket->data_len),
634 pAdapter->TxRing.txDmaReadyToSend.bits.
637 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
639 CurDesc[FragmentNumber].word2.bits.
641 ((pPacket->len - pPacket->data_len) / 2);
643 /* NOTE: Here, the dma_addr_t returned from
644 * pci_map_single() is implicitly cast as a
645 * uint32_t. Although dma_addr_t can be
646 * 64-bit, the address returned by
647 * pci_map_single() is always 32-bit
648 * addressable (as defined by the pci/dma
651 CurDesc[FragmentNumber++].DataBufferPtrLow =
652 pci_map_single(pAdapter->pdev,
655 pPacket->data_len) / 2),
657 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
659 CurDesc[FragmentNumber].word2.bits.
661 ((pPacket->len - pPacket->data_len) / 2);
663 /* NOTE: Here, the dma_addr_t returned from
664 * pci_map_single() is implicitly cast as a
665 * uint32_t. Although dma_addr_t can be
666 * 64-bit, the address returned by
667 * pci_map_single() is always 32-bit
668 * addressable (as defined by the pci/dma
671 CurDesc[FragmentNumber++].DataBufferPtrLow =
672 pci_map_single(pAdapter->pdev,
675 pPacket->data_len) / 2),
677 pPacket->data_len) / 2),
681 DBG_TX(et131x_dbginfo,
682 "Got packet of length %d,"
683 "filling desc entry %d\n"
685 pFragList[loopIndex].size,
686 pAdapter->TxRing.txDmaReadyToSend.bits.val,
689 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
691 CurDesc[FragmentNumber].word2.bits.length_in_bytes =
692 pFragList[loopIndex - 1].size;
694 /* NOTE: Here, the dma_addr_t returned from
695 * pci_map_page() is implicitly cast as a uint32_t.
696 * Although dma_addr_t can be 64-bit, the address
697 * returned by pci_map_page() is always 32-bit
698 * addressable (as defined by the pci/dma subsystem)
700 CurDesc[FragmentNumber++].DataBufferPtrLow =
701 pci_map_page(pAdapter->pdev,
702 pFragList[loopIndex - 1].page,
703 pFragList[loopIndex - 1].page_offset,
704 pFragList[loopIndex - 1].size,
709 if (FragmentNumber == 0) {
710 DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
714 if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
715 if (++pAdapter->TxRing.TxPacketsSinceLastinterrupt ==
716 pAdapter->RegistryTxNumBuffers) {
717 CurDesc[FragmentNumber - 1].word3.value = 0x5;
718 pAdapter->TxRing.TxPacketsSinceLastinterrupt = 0;
720 CurDesc[FragmentNumber - 1].word3.value = 0x1;
723 CurDesc[FragmentNumber - 1].word3.value = 0x5;
726 CurDesc[0].word3.bits.f = 1;
728 pMpTcb->WrIndexStart = pAdapter->TxRing.txDmaReadyToSend;
729 pMpTcb->PacketStaleCount = 0;
731 spin_lock_irqsave(&pAdapter->SendHWLock, lockflags1);
734 NUM_DESC_PER_RING_TX - pAdapter->TxRing.txDmaReadyToSend.bits.val;
736 if (iThisCopy >= FragmentNumber) {
738 iThisCopy = FragmentNumber;
740 iRemainder = FragmentNumber - iThisCopy;
743 memcpy(pAdapter->TxRing.pTxDescRingVa +
744 pAdapter->TxRing.txDmaReadyToSend.bits.val, CurDesc,
745 sizeof(TX_DESC_ENTRY_t) * iThisCopy);
747 pAdapter->TxRing.txDmaReadyToSend.bits.val += iThisCopy;
749 if ((pAdapter->TxRing.txDmaReadyToSend.bits.val == 0) ||
750 (pAdapter->TxRing.txDmaReadyToSend.bits.val ==
751 NUM_DESC_PER_RING_TX)) {
752 if (pAdapter->TxRing.txDmaReadyToSend.bits.wrap) {
753 pAdapter->TxRing.txDmaReadyToSend.value = 0;
755 pAdapter->TxRing.txDmaReadyToSend.value = 0x400;
760 memcpy(pAdapter->TxRing.pTxDescRingVa,
762 sizeof(TX_DESC_ENTRY_t) * iRemainder);
764 pAdapter->TxRing.txDmaReadyToSend.bits.val += iRemainder;
767 if (pAdapter->TxRing.txDmaReadyToSend.bits.val == 0) {
768 if (pAdapter->TxRing.txDmaReadyToSend.value) {
769 pMpTcb->WrIndex.value = NUM_DESC_PER_RING_TX - 1;
771 pMpTcb->WrIndex.value =
772 0x400 | (NUM_DESC_PER_RING_TX - 1);
775 pMpTcb->WrIndex.value =
776 pAdapter->TxRing.txDmaReadyToSend.value - 1;
779 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags2);
781 if (pAdapter->TxRing.CurrSendTail) {
782 pAdapter->TxRing.CurrSendTail->Next = pMpTcb;
784 pAdapter->TxRing.CurrSendHead = pMpTcb;
787 pAdapter->TxRing.CurrSendTail = pMpTcb;
789 DBG_ASSERT(pMpTcb->Next == NULL);
791 pAdapter->TxRing.nBusySend++;
793 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags2);
795 /* Write the new write pointer back to the device. */
796 writel(pAdapter->TxRing.txDmaReadyToSend.value,
797 &pAdapter->CSRAddress->txdma.service_request.value);
799 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
800 * timer to wake us up if this packet isn't followed by N more.
802 if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
803 writel(pAdapter->RegistryTxTimeInterval * NANO_IN_A_MICRO,
804 &pAdapter->CSRAddress->global.watchdog_timer);
807 spin_unlock_irqrestore(&pAdapter->SendHWLock, lockflags1);
809 DBG_TX_LEAVE(et131x_dbginfo);
814 * NOTE: For now, keep this older version of NICSendPacket around for
815 * reference, even though it's not used
820 * NICSendPacket - NIC specific send handler.
821 * @pAdapter: pointer to our adapter
822 * @pMpTcb: pointer to MP_TCB
824 * Returns 0 on succes, errno on failure.
826 * This version of the send routine is designed for version A silicon.
827 * Assumption - Send spinlock has been acquired.
829 static int nic_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb)
831 uint32_t loopIndex, fragIndex, loopEnd;
832 uint32_t iSplitFirstElement = 0;
833 uint32_t SegmentSize = 0;
834 TX_DESC_ENTRY_t CurDesc;
835 TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
836 uint32_t SlotsAvailable;
837 DMA10W_t ServiceComplete;
838 unsigned int lockflags1, lockflags2;
839 struct sk_buff *pPacket = pMpTcb->Packet;
840 uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
841 struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
843 DBG_TX_ENTER(et131x_dbginfo);
845 ServiceComplete.value =
846 readl(&pAdapter->CSRAddress->txdma.NewServiceComplete.value);
849 * Attempt to fix TWO hardware bugs:
850 * 1) NEVER write an odd number of descriptors.
851 * 2) If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
852 * packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
853 * descriptor IN HALF DUPLEX MODE ONLY
854 * NOTE that (2) interacts with (1). If the packet is less than
855 * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
856 * Therefore if it is even now, it will eventually end up odd, and
857 * so will need adjusting.
859 * VLAN tags get involved since VLAN tags add another one or two
862 DBG_TX(et131x_dbginfo,
863 "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
865 if ((pAdapter->uiDuplexMode == 0)
866 && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
867 DBG_TX(et131x_dbginfo,
868 "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
869 if ((FragListCount & 0x1) == 0) {
870 DBG_TX(et131x_dbginfo,
871 "Even number of descs, split 1st elem\n");
872 iSplitFirstElement = 1;
873 //SegmentSize = pFragList[0].size / 2;
874 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
876 } else if (FragListCount & 0x1) {
877 DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
879 iSplitFirstElement = 1;
880 //SegmentSize = pFragList[0].size / 2;
881 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
884 spin_lock_irqsave(&pAdapter->SendHWLock, lockflags1);
886 if (pAdapter->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
887 ServiceComplete.bits.serv_cpl_wrap) {
888 /* The ring hasn't wrapped. Slots available should be
889 * (RING_SIZE) - the difference between the two pointers.
891 SlotsAvailable = NUM_DESC_PER_RING_TX -
892 (pAdapter->TxRing.txDmaReadyToSend.bits.serv_req -
893 ServiceComplete.bits.serv_cpl);
895 /* The ring has wrapped. Slots available should be the
896 * difference between the two pointers.
898 SlotsAvailable = ServiceComplete.bits.serv_cpl -
899 pAdapter->TxRing.txDmaReadyToSend.bits.serv_req;
902 if ((FragListCount + iSplitFirstElement) > SlotsAvailable) {
903 DBG_WARNING(et131x_dbginfo,
904 "Not Enough Space in Tx Desc Ring\n");
905 spin_unlock_irqrestore(&pAdapter->SendHWLock, lockflags1);
909 loopEnd = (FragListCount) + iSplitFirstElement;
912 DBG_TX(et131x_dbginfo,
914 "Packet (SKB) : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
915 "FragListCount : %d\t iSplitFirstElement: %d\t loopEnd:%d\n",
917 pPacket, pPacket->len, pPacket->data_len,
918 FragListCount, iSplitFirstElement, loopEnd);
920 for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
921 if (loopIndex > iSplitFirstElement) {
925 DBG_TX(et131x_dbginfo,
926 "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
929 /* If there is something in this element, let's get a
930 * descriptor from the ring and get the necessary data
932 DBG_TX(et131x_dbginfo,
934 "filling desc entry %d\n",
936 pAdapter->TxRing.txDmaReadyToSend.bits.serv_req);
938 // NOTE - Should we do a paranoia check here to make sure the fragment
939 // actually has a length? It's HIGHLY unlikely the fragment would
940 // contain no data...
942 // NOTE - Currently always getting 32-bit addrs, and dma_addr_t is
943 // only 32-bit, so leave "high" ptr value out for now
944 CurDesc.DataBufferPtrHigh = 0;
946 CurDesc.word2.value = 0;
947 CurDesc.word3.value = 0;
949 if (fragIndex == 0) {
950 if (iSplitFirstElement) {
951 DBG_TX(et131x_dbginfo,
952 "Split first element: YES\n");
954 if (loopIndex == 0) {
955 DBG_TX(et131x_dbginfo,
956 "Got fragment of length %d, fragIndex: %d\n",
960 DBG_TX(et131x_dbginfo,
967 CurDesc.DataBufferPtrLow =
968 pci_map_single(pAdapter->
974 DBG_TX(et131x_dbginfo,
975 "pci_map_single() returns: 0x%08x\n",
979 DBG_TX(et131x_dbginfo,
980 "Got fragment of length %d, fragIndex: %d\n",
984 DBG_TX(et131x_dbginfo,
985 "Leftover Size: %d\n",
995 CurDesc.DataBufferPtrLow =
996 pci_map_single(pAdapter->
1007 DBG_TX(et131x_dbginfo,
1008 "pci_map_single() returns: 0x%08x\n",
1013 DBG_TX(et131x_dbginfo,
1014 "Split first element: NO\n");
1016 CurDesc.word2.bits.length_in_bytes =
1017 pPacket->len - pPacket->data_len;
1019 CurDesc.DataBufferPtrLow =
1020 pci_map_single(pAdapter->pdev,
1025 DBG_TX(et131x_dbginfo,
1026 "pci_map_single() returns: 0x%08x\n",
1027 CurDesc.DataBufferPtrLow);
1031 CurDesc.word2.bits.length_in_bytes =
1032 pFragList[fragIndex - 1].size;
1033 CurDesc.DataBufferPtrLow =
1034 pci_map_page(pAdapter->pdev,
1035 pFragList[fragIndex - 1].page,
1036 pFragList[fragIndex -
1038 pFragList[fragIndex - 1].size,
1040 DBG_TX(et131x_dbginfo,
1041 "pci_map_page() returns: 0x%08x\n",
1042 CurDesc.DataBufferPtrLow);
1045 if (loopIndex == 0) {
1046 /* This is the first descriptor of the packet
1048 * Set the "f" bit to indicate this is the
1049 * first descriptor in the packet.
1051 DBG_TX(et131x_dbginfo,
1052 "This is our FIRST descriptor\n");
1053 CurDesc.word3.bits.f = 1;
1055 pMpTcb->WrIndexStart =
1056 pAdapter->TxRing.txDmaReadyToSend;
1059 if ((loopIndex == (loopEnd - 1)) &&
1060 (pAdapter->uiDuplexMode ||
1061 (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
1062 /* This is the Last descriptor of the packet */
1063 DBG_TX(et131x_dbginfo,
1064 "THIS is our LAST descriptor\n");
1066 if (pAdapter->uiLinkSpeed ==
1067 TRUEPHY_SPEED_1000MBPS) {
1068 if (++pAdapter->TxRing.
1069 TxPacketsSinceLastinterrupt >=
1070 pAdapter->RegistryTxNumBuffers) {
1071 CurDesc.word3.value = 0x5;
1073 TxPacketsSinceLastinterrupt
1076 CurDesc.word3.value = 0x1;
1079 CurDesc.word3.value = 0x5;
1082 /* Following index will be used during freeing
1086 pAdapter->TxRing.txDmaReadyToSend;
1087 pMpTcb->PacketStaleCount = 0;
1090 /* Copy the descriptor (filled above) into the
1091 * descriptor ring at the next free entry. Advance
1092 * the "next free entry" variable
1094 memcpy(pAdapter->TxRing.pTxDescRingVa +
1095 pAdapter->TxRing.txDmaReadyToSend.bits.serv_req,
1096 &CurDesc, sizeof(TX_DESC_ENTRY_t));
1099 pAdapter->TxRing.pTxDescRingVa +
1100 pAdapter->TxRing.txDmaReadyToSend.bits.serv_req;
1102 DBG_TX(et131x_dbginfo,
1103 "CURRENT DESCRIPTOR\n"
1104 "\tAddress : 0x%p\n"
1105 "\tDataBufferPtrHigh : 0x%08x\n"
1106 "\tDataBufferPtrLow : 0x%08x\n"
1107 "\tword2 : 0x%08x\n"
1108 "\tword3 : 0x%08x\n",
1110 CurDescPostCopy->DataBufferPtrHigh,
1111 CurDescPostCopy->DataBufferPtrLow,
1112 CurDescPostCopy->word2.value,
1113 CurDescPostCopy->word3.value);
1115 if (++pAdapter->TxRing.txDmaReadyToSend.bits.serv_req >=
1116 NUM_DESC_PER_RING_TX) {
1117 if (pAdapter->TxRing.txDmaReadyToSend.bits.
1119 pAdapter->TxRing.txDmaReadyToSend.
1122 pAdapter->TxRing.txDmaReadyToSend.
1129 if (pAdapter->uiDuplexMode == 0 &&
1130 pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
1131 // NOTE - Same 32/64-bit issue as above...
1132 CurDesc.DataBufferPtrHigh = 0x0;
1133 CurDesc.DataBufferPtrLow = pAdapter->TxRing.pTxDummyBlkPa;
1134 CurDesc.word2.value = 0;
1136 if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
1137 if (++pAdapter->TxRing.TxPacketsSinceLastinterrupt >=
1138 pAdapter->RegistryTxNumBuffers) {
1139 CurDesc.word3.value = 0x5;
1140 pAdapter->TxRing.TxPacketsSinceLastinterrupt =
1143 CurDesc.word3.value = 0x1;
1146 CurDesc.word3.value = 0x5;
1149 CurDesc.word2.bits.length_in_bytes =
1150 NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
1152 pMpTcb->WrIndex = pAdapter->TxRing.txDmaReadyToSend;
1154 memcpy(pAdapter->TxRing.pTxDescRingVa +
1155 pAdapter->TxRing.txDmaReadyToSend.bits.serv_req,
1156 &CurDesc, sizeof(TX_DESC_ENTRY_t));
1159 pAdapter->TxRing.pTxDescRingVa +
1160 pAdapter->TxRing.txDmaReadyToSend.bits.serv_req;
1162 DBG_TX(et131x_dbginfo,
1163 "CURRENT DESCRIPTOR\n"
1164 "\tAddress : 0x%p\n"
1165 "\tDataBufferPtrHigh : 0x%08x\n"
1166 "\tDataBufferPtrLow : 0x%08x\n"
1167 "\tword2 : 0x%08x\n"
1168 "\tword3 : 0x%08x\n",
1170 CurDescPostCopy->DataBufferPtrHigh,
1171 CurDescPostCopy->DataBufferPtrLow,
1172 CurDescPostCopy->word2.value,
1173 CurDescPostCopy->word3.value);
1175 if (++pAdapter->TxRing.txDmaReadyToSend.bits.serv_req >=
1176 NUM_DESC_PER_RING_TX) {
1177 if (pAdapter->TxRing.txDmaReadyToSend.bits.
1179 pAdapter->TxRing.txDmaReadyToSend.value = 0;
1181 pAdapter->TxRing.txDmaReadyToSend.value = 0x400;
1185 DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
1186 //pAdapter->TxRing.txDmaReadyToSend.value,
1187 pAdapter->TxRing.txDmaReadyToSend.bits.serv_req,
1188 NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
1191 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags2);
1193 if (pAdapter->TxRing.CurrSendTail) {
1194 pAdapter->TxRing.CurrSendTail->Next = pMpTcb;
1196 pAdapter->TxRing.CurrSendHead = pMpTcb;
1199 pAdapter->TxRing.CurrSendTail = pMpTcb;
1201 DBG_ASSERT(pMpTcb->Next == NULL);
1203 pAdapter->TxRing.nBusySend++;
1205 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags2);
1207 /* Write the new write pointer back to the device. */
1208 writel(pAdapter->TxRing.txDmaReadyToSend.value,
1209 &pAdapter->CSRAddress->txdma.service_request.value);
1211 #ifdef CONFIG_ET131X_DEBUG
1212 DumpDeviceBlock(DBG_TX_ON, pAdapter, 1);
1215 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
1216 * timer to wake us up if this packet isn't followed by N more.
1218 if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
1219 writel(pAdapter->RegistryTxTimeInterval * NANO_IN_A_MICRO,
1220 &pAdapter->CSRAddress->global.watchdog_timer);
1223 spin_unlock_irqrestore(&pAdapter->SendHWLock, lockflags1);
1225 DBG_TX_LEAVE(et131x_dbginfo);
1232 * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
1233 * @pAdapter: pointer to our adapter
1234 * @pMpTcb: pointer to MP_TCB
1236 * Assumption - Send spinlock has been acquired
1238 __inline void et131x_free_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb)
1240 unsigned long lockflags;
1241 TX_DESC_ENTRY_t *desc = NULL;
1242 struct net_device_stats *stats = &pAdapter->net_stats;
1244 if (MP_TEST_FLAG(pMpTcb, fMP_DEST_BROAD)) {
1245 atomic_inc(&pAdapter->Stats.brdcstxmt);
1246 } else if (MP_TEST_FLAG(pMpTcb, fMP_DEST_MULTI)) {
1247 atomic_inc(&pAdapter->Stats.multixmt);
1249 atomic_inc(&pAdapter->Stats.unixmt);
1252 if (pMpTcb->Packet) {
1253 stats->tx_bytes += pMpTcb->Packet->len;
1255 /* Iterate through the TX descriptors on the ring
1256 * corresponding to this packet and umap the fragments
1259 DBG_TX(et131x_dbginfo,
1260 "Unmap descriptors Here\n"
1263 "TCB PacketLength : %d\n"
1264 "TCB WrIndex.value : 0x%08x\n"
1265 "TCB WrIndex.bits.val : %d\n"
1266 "TCB WrIndex.value : 0x%08x\n"
1267 "TCB WrIndex.bits.val : %d\n",
1270 pMpTcb->PacketLength,
1271 pMpTcb->WrIndexStart.value,
1272 pMpTcb->WrIndexStart.bits.val,
1273 pMpTcb->WrIndex.value,
1274 pMpTcb->WrIndex.bits.val);
1278 (TX_DESC_ENTRY_t *) (pAdapter->TxRing.
1280 pMpTcb->WrIndexStart.bits.val);
1282 DBG_TX(et131x_dbginfo,
1283 "CURRENT DESCRIPTOR\n"
1284 "\tAddress : 0x%p\n"
1285 "\tDataBufferPtrHigh : 0x%08x\n"
1286 "\tDataBufferPtrLow : 0x%08x\n"
1287 "\tword2 : 0x%08x\n"
1288 "\tword3 : 0x%08x\n",
1290 desc->DataBufferPtrHigh,
1291 desc->DataBufferPtrLow,
1295 pci_unmap_single(pAdapter->pdev,
1296 desc->DataBufferPtrLow,
1297 desc->word2.value, PCI_DMA_TODEVICE);
1299 if (++pMpTcb->WrIndexStart.bits.val >=
1300 NUM_DESC_PER_RING_TX) {
1301 if (pMpTcb->WrIndexStart.bits.wrap) {
1302 pMpTcb->WrIndexStart.value = 0;
1304 pMpTcb->WrIndexStart.value = 0x400;
1308 while (desc != (pAdapter->TxRing.pTxDescRingVa +
1309 pMpTcb->WrIndex.bits.val));
1311 DBG_TX(et131x_dbginfo,
1312 "Free Packet (SKB) : 0x%p\n", pMpTcb->Packet);
1314 dev_kfree_skb_any(pMpTcb->Packet);
1317 memset(pMpTcb, 0, sizeof(MP_TCB));
1319 /* Add the TCB to the Ready Q */
1320 spin_lock_irqsave(&pAdapter->TCBReadyQLock, lockflags);
1322 pAdapter->Stats.opackets++;
1324 if (pAdapter->TxRing.TCBReadyQueueTail) {
1325 pAdapter->TxRing.TCBReadyQueueTail->Next = pMpTcb;
1327 /* Apparently ready Q is empty. */
1328 pAdapter->TxRing.TCBReadyQueueHead = pMpTcb;
1331 pAdapter->TxRing.TCBReadyQueueTail = pMpTcb;
1333 spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
1335 DBG_ASSERT(pAdapter->TxRing.nBusySend >= 0);
1339 * et131x_free_busy_send_packets - Free and complete the stopped active sends
1340 * @pAdapter: pointer to our adapter
1342 * Assumption - Send spinlock has been acquired
1344 void et131x_free_busy_send_packets(struct et131x_adapter *pAdapter)
1347 struct list_head *pEntry;
1348 struct sk_buff *pPacket = NULL;
1349 unsigned long lockflags;
1350 uint32_t FreeCounter = 0;
1352 DBG_ENTER(et131x_dbginfo);
1354 while (!list_empty(&pAdapter->TxRing.SendWaitQueue)) {
1355 spin_lock_irqsave(&pAdapter->SendWaitLock, lockflags);
1357 pAdapter->TxRing.nWaitSend--;
1358 spin_unlock_irqrestore(&pAdapter->SendWaitLock, lockflags);
1360 pEntry = pAdapter->TxRing.SendWaitQueue.next;
1365 pAdapter->TxRing.nWaitSend = 0;
1367 /* Any packets being sent? Check the first TCB on the send list */
1368 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1370 pMpTcb = pAdapter->TxRing.CurrSendHead;
1372 while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
1373 PMP_TCB pNext = pMpTcb->Next;
1375 pAdapter->TxRing.CurrSendHead = pNext;
1377 if (pNext == NULL) {
1378 pAdapter->TxRing.CurrSendTail = NULL;
1381 pAdapter->TxRing.nBusySend--;
1383 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1385 DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
1388 MP_FREE_SEND_PACKET_FUN(pAdapter, pMpTcb);
1390 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1392 pMpTcb = pAdapter->TxRing.CurrSendHead;
1395 if (FreeCounter == NUM_TCB) {
1396 DBG_ERROR(et131x_dbginfo,
1397 "MpFreeBusySendPackets exitted loop for a bad reason\n");
1401 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1403 pAdapter->TxRing.nBusySend = 0;
1405 DBG_LEAVE(et131x_dbginfo);
1409 * et131x_handle_send_interrupt - Interrupt handler for sending processing
1410 * @pAdapter: pointer to our adapter
1412 * Re-claim the send resources, complete sends and get more to send from
1413 * the send wait queue.
1415 * Assumption - Send spinlock has been acquired
1417 void et131x_handle_send_interrupt(struct et131x_adapter *pAdapter)
1419 DBG_TX_ENTER(et131x_dbginfo);
1421 /* Mark as completed any packets which have been sent by the device. */
1422 et131x_update_tcb_list(pAdapter);
1424 /* If we queued any transmits because we didn't have any TCBs earlier,
1425 * dequeue and send those packets now, as long as we have free TCBs.
1427 et131x_check_send_wait_list(pAdapter);
1429 DBG_TX_LEAVE(et131x_dbginfo);
1433 * et131x_update_tcb_list - Helper routine for Send Interrupt handler
1434 * @pAdapter: pointer to our adapter
1436 * Re-claims the send resources and completes sends. Can also be called as
1437 * part of the NIC send routine when the "ServiceComplete" indication has
1440 static void et131x_update_tcb_list(struct et131x_adapter *pAdapter)
1442 unsigned long lockflags;
1443 DMA10W_t ServiceComplete;
1446 ServiceComplete.value =
1447 readl(&pAdapter->CSRAddress->txdma.NewServiceComplete.value);
1449 /* Has the ring wrapped? Process any descriptors that do not have
1450 * the same "wrap" indicator as the current completion indicator
1452 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1454 pMpTcb = pAdapter->TxRing.CurrSendHead;
1456 ServiceComplete.bits.wrap != pMpTcb->WrIndex.bits.wrap &&
1457 ServiceComplete.bits.val < pMpTcb->WrIndex.bits.val) {
1458 pAdapter->TxRing.nBusySend--;
1459 pAdapter->TxRing.CurrSendHead = pMpTcb->Next;
1460 if (pMpTcb->Next == NULL) {
1461 pAdapter->TxRing.CurrSendTail = NULL;
1464 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1465 MP_FREE_SEND_PACKET_FUN(pAdapter, pMpTcb);
1466 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1468 /* Goto the next packet */
1469 pMpTcb = pAdapter->TxRing.CurrSendHead;
1472 ServiceComplete.bits.wrap == pMpTcb->WrIndex.bits.wrap &&
1473 ServiceComplete.bits.val > pMpTcb->WrIndex.bits.val) {
1474 pAdapter->TxRing.nBusySend--;
1475 pAdapter->TxRing.CurrSendHead = pMpTcb->Next;
1476 if (pMpTcb->Next == NULL) {
1477 pAdapter->TxRing.CurrSendTail = NULL;
1480 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1481 MP_FREE_SEND_PACKET_FUN(pAdapter, pMpTcb);
1482 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1484 /* Goto the next packet */
1485 pMpTcb = pAdapter->TxRing.CurrSendHead;
1488 /* Wake up the queue when we hit a low-water mark */
1489 if (pAdapter->TxRing.nBusySend <= (NUM_TCB / 3)) {
1490 netif_wake_queue(pAdapter->netdev);
1493 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1497 * et131x_check_send_wait_list - Helper routine for the interrupt handler
1498 * @pAdapter: pointer to our adapter
1500 * Takes packets from the send wait queue and posts them to the device (if
1503 static void et131x_check_send_wait_list(struct et131x_adapter *pAdapter)
1505 unsigned long lockflags;
1507 spin_lock_irqsave(&pAdapter->SendWaitLock, lockflags);
1509 while (!list_empty(&pAdapter->TxRing.SendWaitQueue) &&
1510 MP_TCB_RESOURCES_AVAILABLE(pAdapter)) {
1511 struct list_head *pEntry;
1513 DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
1515 pEntry = pAdapter->TxRing.SendWaitQueue.next;
1517 pAdapter->TxRing.nWaitSend--;
1519 DBG_WARNING(et131x_dbginfo,
1520 "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
1521 pAdapter->TxRing.nWaitSend);
1524 spin_unlock_irqrestore(&pAdapter->SendWaitLock, lockflags);