Merge git://git.kernel.org/pub/scm/linux/kernel/git/czankel/xtensa-2.6
[linux-2.6] / drivers / staging / et131x / et1310_tx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_tx.c - Routines used to perform data transmission.
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED \93AS IS\94 AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
61
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
67
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
75 #include <linux/in.h>
76 #include <linux/delay.h>
77 #include <asm/io.h>
78 #include <asm/system.h>
79 #include <asm/bitops.h>
80
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
86
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
90
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
93 #include "et131x_isr.h"
94
95 #include "et1310_tx.h"
96
97 /* Data for debugging facilities */
98 #ifdef CONFIG_ET131X_DEBUG
99 extern dbg_info_t *et131x_dbginfo;
100 #endif /* CONFIG_ET131X_DEBUG */
101
102 static void et131x_update_tcb_list(struct et131x_adapter *pAdapter);
103 static void et131x_check_send_wait_list(struct et131x_adapter *pAdapter);
104 static inline void et131x_free_send_packet(struct et131x_adapter *pAdapter,
105                                            PMP_TCB pMpTcb);
106 static int et131x_send_packet(struct sk_buff *skb,
107                               struct et131x_adapter *pAdapter);
108 static int nic_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb);
109
110 /**
111  * et131x_tx_dma_memory_alloc
112  * @adapter: pointer to our private adapter structure
113  *
114  * Returns 0 on success and errno on failure (as defined in errno.h).
115  *
116  * Allocates memory that will be visible both to the device and to the CPU.
117  * The OS will pass us packets, pointers to which we will insert in the Tx
118  * Descriptor queue. The device will read this queue to find the packets in
119  * memory. The device will update the "status" in memory each time it xmits a
120  * packet.
121  */
122 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
123 {
124         int desc_size = 0;
125         TX_RING_t *tx_ring = &adapter->TxRing;
126
127         DBG_ENTER(et131x_dbginfo);
128
129         /* Allocate memory for the TCB's (Transmit Control Block) */
130         adapter->TxRing.MpTcbMem = (MP_TCB *) kcalloc(NUM_TCB, sizeof(MP_TCB),
131                                                       GFP_ATOMIC | GFP_DMA);
132         if (!adapter->TxRing.MpTcbMem) {
133                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
134                 DBG_LEAVE(et131x_dbginfo);
135                 return -ENOMEM;
136         }
137
138         /* Allocate enough memory for the Tx descriptor ring, and allocate
139          * some extra so that the ring can be aligned on a 4k boundary.
140          */
141         desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
142         tx_ring->pTxDescRingVa =
143             (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
144                                                     &tx_ring->pTxDescRingPa);
145         if (!adapter->TxRing.pTxDescRingVa) {
146                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
147                 DBG_LEAVE(et131x_dbginfo);
148                 return -ENOMEM;
149         }
150
151         /* Save physical address
152          *
153          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
154          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
155          * are ever returned, make sure the high part is retrieved here before
156          * storing the adjusted address.
157          */
158         tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
159
160         /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
161         et131x_align_allocated_memory(adapter,
162                                       &tx_ring->pTxDescRingAdjustedPa,
163                                       &tx_ring->TxDescOffset, 0x0FFF);
164
165         tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
166
167         /* Allocate memory for the Tx status block */
168         tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
169                                                     sizeof(TX_STATUS_BLOCK_t),
170                                                     &tx_ring->pTxStatusPa);
171         if (!adapter->TxRing.pTxStatusPa) {
172                 DBG_ERROR(et131x_dbginfo,
173                           "Cannot alloc memory for Tx status block\n");
174                 DBG_LEAVE(et131x_dbginfo);
175                 return -ENOMEM;
176         }
177
178         /* Allocate memory for a dummy buffer */
179         tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
180                                                       NIC_MIN_PACKET_SIZE,
181                                                       &tx_ring->pTxDummyBlkPa);
182         if (!adapter->TxRing.pTxDummyBlkPa) {
183                 DBG_ERROR(et131x_dbginfo,
184                           "Cannot alloc memory for Tx dummy buffer\n");
185                 DBG_LEAVE(et131x_dbginfo);
186                 return -ENOMEM;
187         }
188
189         DBG_LEAVE(et131x_dbginfo);
190         return 0;
191 }
192
193 /**
194  * et131x_tx_dma_memory_free - Free all memory allocated within this module
195  * @adapter: pointer to our private adapter structure
196  *
197  * Returns 0 on success and errno on failure (as defined in errno.h).
198  */
199 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
200 {
201         int desc_size = 0;
202
203         DBG_ENTER(et131x_dbginfo);
204
205         if (adapter->TxRing.pTxDescRingVa) {
206                 /* Free memory relating to Tx rings here */
207                 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
208
209                 desc_size =
210                     (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
211
212                 pci_free_consistent(adapter->pdev,
213                                     desc_size,
214                                     adapter->TxRing.pTxDescRingVa,
215                                     adapter->TxRing.pTxDescRingPa);
216
217                 adapter->TxRing.pTxDescRingVa = NULL;
218         }
219
220         /* Free memory for the Tx status block */
221         if (adapter->TxRing.pTxStatusVa) {
222                 pci_free_consistent(adapter->pdev,
223                                     sizeof(TX_STATUS_BLOCK_t),
224                                     adapter->TxRing.pTxStatusVa,
225                                     adapter->TxRing.pTxStatusPa);
226
227                 adapter->TxRing.pTxStatusVa = NULL;
228         }
229
230         /* Free memory for the dummy buffer */
231         if (adapter->TxRing.pTxDummyBlkVa) {
232                 pci_free_consistent(adapter->pdev,
233                                     NIC_MIN_PACKET_SIZE,
234                                     adapter->TxRing.pTxDummyBlkVa,
235                                     adapter->TxRing.pTxDummyBlkPa);
236
237                 adapter->TxRing.pTxDummyBlkVa = NULL;
238         }
239
240         /* Free the memory for MP_TCB structures */
241         if (adapter->TxRing.MpTcbMem) {
242                 kfree(adapter->TxRing.MpTcbMem);
243                 adapter->TxRing.MpTcbMem = NULL;
244         }
245
246         DBG_LEAVE(et131x_dbginfo);
247 }
248
249 /**
250  * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
251  * @adapter: pointer to our private adapter structure
252  */
253 void ConfigTxDmaRegs(struct et131x_adapter *pAdapter)
254 {
255         struct _TXDMA_t __iomem *pTxDma = &pAdapter->CSRAddress->txdma;
256
257         DBG_ENTER(et131x_dbginfo);
258
259         /* Load the hardware with the start of the transmit descriptor ring. */
260         writel((uint32_t) (pAdapter->TxRing.pTxDescRingAdjustedPa >> 32),
261                &pTxDma->pr_base_hi);
262         writel((uint32_t) pAdapter->TxRing.pTxDescRingAdjustedPa,
263                &pTxDma->pr_base_lo);
264
265         /* Initialise the transmit DMA engine */
266         writel(NUM_DESC_PER_RING_TX - 1, &pTxDma->pr_num_des.value);
267
268         /* Load the completion writeback physical address
269          *
270          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
271          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
272          * are ever returned, make sure the high part is retrieved here before
273          * storing the adjusted address.
274          */
275         writel(0, &pTxDma->dma_wb_base_hi);
276         writel(pAdapter->TxRing.pTxStatusPa, &pTxDma->dma_wb_base_lo);
277
278         memset(pAdapter->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
279
280         writel(0, &pTxDma->service_request.value);
281         pAdapter->TxRing.txDmaReadyToSend.value = 0;
282
283         DBG_LEAVE(et131x_dbginfo);
284 }
285
286 /**
287  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
288  * @pAdapter: pointer to our adapter structure
289  */
290 void et131x_tx_dma_disable(struct et131x_adapter *pAdapter)
291 {
292         DBG_ENTER(et131x_dbginfo);
293
294         /* Setup the tramsmit dma configuration register */
295         writel(0x101, &pAdapter->CSRAddress->txdma.csr.value);
296
297         DBG_LEAVE(et131x_dbginfo);
298 }
299
300 /**
301  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
302  * @pAdapter: pointer to our adapter structure
303  *
304  * Mainly used after a return to the D0 (full-power) state from a lower state.
305  */
306 void et131x_tx_dma_enable(struct et131x_adapter *pAdapter)
307 {
308         DBG_ENTER(et131x_dbginfo);
309
310         if (pAdapter->RegistryPhyLoopbk) {
311         /* TxDMA is disabled for loopback operation. */
312                 writel(0x101, &pAdapter->CSRAddress->txdma.csr.value);
313         } else {
314                 TXDMA_CSR_t csr = { 0 };
315
316                 /* Setup the transmit dma configuration register for normal
317                  * operation
318                  */
319                 csr.bits.sngl_epkt_mode = 1;
320                 csr.bits.halt = 0;
321                 csr.bits.cache_thrshld = pAdapter->RegistryDMACache;
322                 writel(csr.value, &pAdapter->CSRAddress->txdma.csr.value);
323         }
324
325         DBG_LEAVE(et131x_dbginfo);
326 }
327
328 /**
329  * et131x_init_send - Initialize send data structures
330  * @adapter: pointer to our private adapter structure
331  */
332 void et131x_init_send(struct et131x_adapter *adapter)
333 {
334         PMP_TCB pMpTcb;
335         uint32_t TcbCount;
336         TX_RING_t *tx_ring;
337
338         DBG_ENTER(et131x_dbginfo);
339
340         /* Setup some convenience pointers */
341         tx_ring = &adapter->TxRing;
342         pMpTcb = adapter->TxRing.MpTcbMem;
343
344         tx_ring->TCBReadyQueueHead = pMpTcb;
345
346         /* Go through and set up each TCB */
347         for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
348                 memset(pMpTcb, 0, sizeof(MP_TCB));
349
350                 /* Set the link pointer in HW TCB to the next TCB in the
351                  * chain.  If this is the last TCB in the chain, also set the
352                  * tail pointer.
353                  */
354                 if (TcbCount < NUM_TCB - 1) {
355                         pMpTcb->Next = pMpTcb + 1;
356                 } else {
357                         tx_ring->TCBReadyQueueTail = pMpTcb;
358                         pMpTcb->Next = (PMP_TCB) NULL;
359                 }
360
361                 pMpTcb++;
362         }
363
364         /* Curr send queue should now be empty */
365         tx_ring->CurrSendHead = (PMP_TCB) NULL;
366         tx_ring->CurrSendTail = (PMP_TCB) NULL;
367
368         INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
369
370         DBG_LEAVE(et131x_dbginfo);
371 }
372
373 /**
374  * et131x_send_packets - This function is called by the OS to send packets
375  * @skb: the packet(s) to send
376  * @netdev:device on which to TX the above packet(s)
377  *
378  * Return 0 in almost all cases; non-zero value in extreme hard failure only
379  */
380 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
381 {
382         int status = 0;
383         struct et131x_adapter *pAdapter = NULL;
384
385         DBG_TX_ENTER(et131x_dbginfo);
386
387         pAdapter = netdev_priv(netdev);
388
389         /* Send these packets
390          *
391          * NOTE: The Linux Tx entry point is only given one packet at a time
392          * to Tx, so the PacketCount and it's array used makes no sense here
393          */
394
395         /* Queue is not empty or TCB is not available */
396         if (!list_empty(&pAdapter->TxRing.SendWaitQueue) ||
397             MP_TCB_RESOURCES_NOT_AVAILABLE(pAdapter)) {
398                 /* NOTE: If there's an error on send, no need to queue the
399                  * packet under Linux; if we just send an error up to the
400                  * netif layer, it will resend the skb to us.
401                  */
402                 DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
403                 status = -ENOMEM;
404         } else {
405                 /* We need to see if the link is up; if it's not, make the
406                  * netif layer think we're good and drop the packet
407                  */
408                 //if( MP_SHOULD_FAIL_SEND( pAdapter ) || pAdapter->DriverNoPhyAccess )
409                 if (MP_SHOULD_FAIL_SEND(pAdapter) || pAdapter->DriverNoPhyAccess
410                     || !netif_carrier_ok(netdev)) {
411                         DBG_VERBOSE(et131x_dbginfo,
412                                     "Can't Tx, Link is DOWN; drop the packet\n");
413
414                         dev_kfree_skb_any(skb);
415                         skb = NULL;
416
417                         pAdapter->net_stats.tx_dropped++;
418                 } else {
419                         status = et131x_send_packet(skb, pAdapter);
420
421                         if (status == -ENOMEM) {
422
423                                 /* NOTE: If there's an error on send, no need
424                                  * to queue the packet under Linux; if we just
425                                  * send an error up to the netif layer, it
426                                  * will resend the skb to us.
427                                  */
428                                 DBG_WARNING(et131x_dbginfo,
429                                             "Resources problem, Queue tx packet\n");
430                         } else if (status != 0) {
431                                 /* On any other error, make netif think we're
432                                  * OK and drop the packet
433                                  */
434                                 DBG_WARNING(et131x_dbginfo,
435                                             "General error, drop packet\n");
436
437                                 dev_kfree_skb_any(skb);
438                                 skb = NULL;
439
440                                 pAdapter->net_stats.tx_dropped++;
441                         }
442                 }
443         }
444
445         DBG_TX_LEAVE(et131x_dbginfo);
446         return status;
447 }
448
449 /**
450  * et131x_send_packet - Do the work to send a packet
451  * @skb: the packet(s) to send
452  * @pAdapter: a pointer to the device's private adapter structure
453  *
454  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
455  *
456  * Assumption: Send spinlock has been acquired
457  */
458 static int et131x_send_packet(struct sk_buff *skb,
459                               struct et131x_adapter *pAdapter)
460 {
461         int status = 0;
462         PMP_TCB pMpTcb = NULL;
463         uint16_t *pShBufVa;
464         unsigned long lockflags;
465
466         DBG_TX_ENTER(et131x_dbginfo);
467
468         /* Is our buffer scattered, or continuous? */
469         if (skb_shinfo(skb)->nr_frags == 0) {
470                 DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
471         } else {
472                 DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
473                        skb_shinfo(skb)->nr_frags);
474         }
475
476         /* All packets must have at least a MAC address and a protocol type */
477         if (skb->len < ETH_HLEN) {
478                 DBG_ERROR(et131x_dbginfo,
479                           "Packet size < ETH_HLEN (14 bytes)\n");
480                 DBG_LEAVE(et131x_dbginfo);
481                 return -EIO;
482         }
483
484         /* Get a TCB for this packet */
485         spin_lock_irqsave(&pAdapter->TCBReadyQLock, lockflags);
486
487         pMpTcb = pAdapter->TxRing.TCBReadyQueueHead;
488
489         if (pMpTcb == NULL) {
490                 spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
491
492                 DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
493                 DBG_TX_LEAVE(et131x_dbginfo);
494                 return -ENOMEM;
495         }
496
497         pAdapter->TxRing.TCBReadyQueueHead = pMpTcb->Next;
498
499         if (pAdapter->TxRing.TCBReadyQueueHead == NULL) {
500                 pAdapter->TxRing.TCBReadyQueueTail = NULL;
501         }
502
503         spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
504
505         pMpTcb->PacketLength = skb->len;
506         pMpTcb->Packet = skb;
507
508         if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
509                 pShBufVa = (uint16_t *) skb->data;
510
511                 if ((pShBufVa[0] == 0xffff) &&
512                     (pShBufVa[1] == 0xffff) && (pShBufVa[2] == 0xffff)) {
513                         MP_SET_FLAG(pMpTcb, fMP_DEST_BROAD);
514                 } else if ((pShBufVa[0] & 0x3) == 0x0001) {
515                         MP_SET_FLAG(pMpTcb, fMP_DEST_MULTI);
516                 }
517         }
518
519         pMpTcb->Next = NULL;
520
521         /* Call the NIC specific send handler. */
522         if (status == 0) {
523                 status = nic_send_packet(pAdapter, pMpTcb);
524         }
525
526         if (status != 0) {
527                 spin_lock_irqsave(&pAdapter->TCBReadyQLock, lockflags);
528
529                 if (pAdapter->TxRing.TCBReadyQueueTail) {
530                         pAdapter->TxRing.TCBReadyQueueTail->Next = pMpTcb;
531                 } else {
532                         /* Apparently ready Q is empty. */
533                         pAdapter->TxRing.TCBReadyQueueHead = pMpTcb;
534                 }
535
536                 pAdapter->TxRing.TCBReadyQueueTail = pMpTcb;
537
538                 spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
539
540                 DBG_TX_LEAVE(et131x_dbginfo);
541                 return status;
542         }
543
544         DBG_ASSERT(pAdapter->TxRing.nBusySend <= NUM_TCB);
545
546         DBG_TX_LEAVE(et131x_dbginfo);
547         return 0;
548 }
549
550 /**
551  * nic_send_packet - NIC specific send handler for version B silicon.
552  * @pAdapter: pointer to our adapter
553  * @pMpTcb: pointer to MP_TCB
554  *
555  * Returns 0 or errno.
556  */
557 static int nic_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb)
558 {
559         uint32_t loopIndex;
560         TX_DESC_ENTRY_t CurDesc[24];
561         uint32_t FragmentNumber = 0;
562         uint32_t iThisCopy, iRemainder;
563         struct sk_buff *pPacket = pMpTcb->Packet;
564         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
565         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
566         unsigned long lockflags1, lockflags2;
567
568         DBG_TX_ENTER(et131x_dbginfo);
569
570         /* Part of the optimizations of this send routine restrict us to
571          * sending 24 fragments at a pass.  In practice we should never see
572          * more than 5 fragments.
573          *
574          * NOTE: The older version of this function (below) can handle any
575          * number of fragments. If needed, we can call this function,
576          * although it is less efficient.
577          */
578         if (FragListCount > 23) {
579                 DBG_TX_LEAVE(et131x_dbginfo);
580                 return -EIO;
581         }
582
583         memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
584
585         for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
586                 /* If there is something in this element, lets get a
587                  * descriptor from the ring and get the necessary data
588                  */
589                 if (loopIndex == 0) {
590                         /* If the fragments are smaller than a standard MTU,
591                          * then map them to a single descriptor in the Tx
592                          * Desc ring. However, if they're larger, as is
593                          * possible with support for jumbo packets, then
594                          * split them each across 2 descriptors.
595                          *
596                          * This will work until we determine why the hardware
597                          * doesn't seem to like large fragments.
598                          */
599                         if ((pPacket->len - pPacket->data_len) <= 1514) {
600                                 DBG_TX(et131x_dbginfo,
601                                        "Got packet of length %d, "
602                                        "filling desc entry %d, "
603                                        "TCB: 0x%p\n",
604                                        (pPacket->len - pPacket->data_len),
605                                        pAdapter->TxRing.txDmaReadyToSend.bits.
606                                        val, pMpTcb);
607
608                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
609
610                                 CurDesc[FragmentNumber].word2.bits.
611                                     length_in_bytes =
612                                     pPacket->len - pPacket->data_len;
613
614                                 /* NOTE: Here, the dma_addr_t returned from
615                                  * pci_map_single() is implicitly cast as a
616                                  * uint32_t. Although dma_addr_t can be
617                                  * 64-bit, the address returned by
618                                  * pci_map_single() is always 32-bit
619                                  * addressable (as defined by the pci/dma
620                                  * subsystem)
621                                  */
622                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
623                                     pci_map_single(pAdapter->pdev,
624                                                    pPacket->data,
625                                                    pPacket->len -
626                                                    pPacket->data_len,
627                                                    PCI_DMA_TODEVICE);
628                         } else {
629                                 DBG_TX(et131x_dbginfo,
630                                        "Got packet of length %d, "
631                                        "filling desc entry %d, "
632                                        "TCB: 0x%p\n",
633                                        (pPacket->len - pPacket->data_len),
634                                        pAdapter->TxRing.txDmaReadyToSend.bits.
635                                        val, pMpTcb);
636
637                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
638
639                                 CurDesc[FragmentNumber].word2.bits.
640                                     length_in_bytes =
641                                     ((pPacket->len - pPacket->data_len) / 2);
642
643                                 /* NOTE: Here, the dma_addr_t returned from
644                                  * pci_map_single() is implicitly cast as a
645                                  * uint32_t. Although dma_addr_t can be
646                                  * 64-bit, the address returned by
647                                  * pci_map_single() is always 32-bit
648                                  * addressable (as defined by the pci/dma
649                                  * subsystem)
650                                  */
651                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
652                                     pci_map_single(pAdapter->pdev,
653                                                    pPacket->data,
654                                                    ((pPacket->len -
655                                                      pPacket->data_len) / 2),
656                                                    PCI_DMA_TODEVICE);
657                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
658
659                                 CurDesc[FragmentNumber].word2.bits.
660                                     length_in_bytes =
661                                     ((pPacket->len - pPacket->data_len) / 2);
662
663                                 /* NOTE: Here, the dma_addr_t returned from
664                                  * pci_map_single() is implicitly cast as a
665                                  * uint32_t. Although dma_addr_t can be
666                                  * 64-bit, the address returned by
667                                  * pci_map_single() is always 32-bit
668                                  * addressable (as defined by the pci/dma
669                                  * subsystem)
670                                  */
671                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
672                                     pci_map_single(pAdapter->pdev,
673                                                    pPacket->data +
674                                                    ((pPacket->len -
675                                                      pPacket->data_len) / 2),
676                                                    ((pPacket->len -
677                                                      pPacket->data_len) / 2),
678                                                    PCI_DMA_TODEVICE);
679                         }
680                 } else {
681                         DBG_TX(et131x_dbginfo,
682                                "Got packet of length %d,"
683                                "filling desc entry %d\n"
684                                "TCB: 0x%p\n",
685                                pFragList[loopIndex].size,
686                                pAdapter->TxRing.txDmaReadyToSend.bits.val,
687                                pMpTcb);
688
689                         CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
690
691                         CurDesc[FragmentNumber].word2.bits.length_in_bytes =
692                             pFragList[loopIndex - 1].size;
693
694                         /* NOTE: Here, the dma_addr_t returned from
695                          * pci_map_page() is implicitly cast as a uint32_t.
696                          * Although dma_addr_t can be 64-bit, the address
697                          * returned by pci_map_page() is always 32-bit
698                          * addressable (as defined by the pci/dma subsystem)
699                          */
700                         CurDesc[FragmentNumber++].DataBufferPtrLow =
701                             pci_map_page(pAdapter->pdev,
702                                          pFragList[loopIndex - 1].page,
703                                          pFragList[loopIndex - 1].page_offset,
704                                          pFragList[loopIndex - 1].size,
705                                          PCI_DMA_TODEVICE);
706                 }
707         }
708
709         if (FragmentNumber == 0) {
710                 DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
711                 return -EIO;
712         }
713
714         if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
715                 if (++pAdapter->TxRing.TxPacketsSinceLastinterrupt ==
716                     pAdapter->RegistryTxNumBuffers) {
717                         CurDesc[FragmentNumber - 1].word3.value = 0x5;
718                         pAdapter->TxRing.TxPacketsSinceLastinterrupt = 0;
719                 } else {
720                         CurDesc[FragmentNumber - 1].word3.value = 0x1;
721                 }
722         } else {
723                 CurDesc[FragmentNumber - 1].word3.value = 0x5;
724         }
725
726         CurDesc[0].word3.bits.f = 1;
727
728         pMpTcb->WrIndexStart = pAdapter->TxRing.txDmaReadyToSend;
729         pMpTcb->PacketStaleCount = 0;
730
731         spin_lock_irqsave(&pAdapter->SendHWLock, lockflags1);
732
733         iThisCopy =
734             NUM_DESC_PER_RING_TX - pAdapter->TxRing.txDmaReadyToSend.bits.val;
735
736         if (iThisCopy >= FragmentNumber) {
737                 iRemainder = 0;
738                 iThisCopy = FragmentNumber;
739         } else {
740                 iRemainder = FragmentNumber - iThisCopy;
741         }
742
743         memcpy(pAdapter->TxRing.pTxDescRingVa +
744                pAdapter->TxRing.txDmaReadyToSend.bits.val, CurDesc,
745                sizeof(TX_DESC_ENTRY_t) * iThisCopy);
746
747         pAdapter->TxRing.txDmaReadyToSend.bits.val += iThisCopy;
748
749         if ((pAdapter->TxRing.txDmaReadyToSend.bits.val == 0) ||
750             (pAdapter->TxRing.txDmaReadyToSend.bits.val ==
751              NUM_DESC_PER_RING_TX)) {
752                 if (pAdapter->TxRing.txDmaReadyToSend.bits.wrap) {
753                         pAdapter->TxRing.txDmaReadyToSend.value = 0;
754                 } else {
755                         pAdapter->TxRing.txDmaReadyToSend.value = 0x400;
756                 }
757         }
758
759         if (iRemainder) {
760                 memcpy(pAdapter->TxRing.pTxDescRingVa,
761                        CurDesc + iThisCopy,
762                        sizeof(TX_DESC_ENTRY_t) * iRemainder);
763
764                 pAdapter->TxRing.txDmaReadyToSend.bits.val += iRemainder;
765         }
766
767         if (pAdapter->TxRing.txDmaReadyToSend.bits.val == 0) {
768                 if (pAdapter->TxRing.txDmaReadyToSend.value) {
769                         pMpTcb->WrIndex.value = NUM_DESC_PER_RING_TX - 1;
770                 } else {
771                         pMpTcb->WrIndex.value =
772                             0x400 | (NUM_DESC_PER_RING_TX - 1);
773                 }
774         } else {
775                 pMpTcb->WrIndex.value =
776                     pAdapter->TxRing.txDmaReadyToSend.value - 1;
777         }
778
779         spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags2);
780
781         if (pAdapter->TxRing.CurrSendTail) {
782                 pAdapter->TxRing.CurrSendTail->Next = pMpTcb;
783         } else {
784                 pAdapter->TxRing.CurrSendHead = pMpTcb;
785         }
786
787         pAdapter->TxRing.CurrSendTail = pMpTcb;
788
789         DBG_ASSERT(pMpTcb->Next == NULL);
790
791         pAdapter->TxRing.nBusySend++;
792
793         spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags2);
794
795         /* Write the new write pointer back to the device. */
796         writel(pAdapter->TxRing.txDmaReadyToSend.value,
797                &pAdapter->CSRAddress->txdma.service_request.value);
798
799         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
800          * timer to wake us up if this packet isn't followed by N more.
801          */
802         if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
803                 writel(pAdapter->RegistryTxTimeInterval * NANO_IN_A_MICRO,
804                        &pAdapter->CSRAddress->global.watchdog_timer);
805         }
806
807         spin_unlock_irqrestore(&pAdapter->SendHWLock, lockflags1);
808
809         DBG_TX_LEAVE(et131x_dbginfo);
810         return 0;
811 }
812
813 /*
814  * NOTE: For now, keep this older version of NICSendPacket around for
815  * reference, even though it's not used
816  */
817 #if 0
818
819 /**
820  * NICSendPacket - NIC specific send handler.
821  * @pAdapter: pointer to our adapter
822  * @pMpTcb: pointer to MP_TCB
823  *
824  * Returns 0 on succes, errno on failure.
825  *
826  * This version of the send routine is designed for version A silicon.
827  * Assumption - Send spinlock has been acquired.
828  */
829 static int nic_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb)
830 {
831         uint32_t loopIndex, fragIndex, loopEnd;
832         uint32_t iSplitFirstElement = 0;
833         uint32_t SegmentSize = 0;
834         TX_DESC_ENTRY_t CurDesc;
835         TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
836         uint32_t SlotsAvailable;
837         DMA10W_t ServiceComplete;
838         unsigned int lockflags1, lockflags2;
839         struct sk_buff *pPacket = pMpTcb->Packet;
840         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
841         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
842
843         DBG_TX_ENTER(et131x_dbginfo);
844
845         ServiceComplete.value =
846                 readl(&pAdapter->CSRAddress->txdma.NewServiceComplete.value);
847
848         /*
849          * Attempt to fix TWO hardware bugs:
850          * 1)  NEVER write an odd number of descriptors.
851          * 2)  If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
852          *     packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
853          *     descriptor IN HALF DUPLEX MODE ONLY
854          * NOTE that (2) interacts with (1).  If the packet is less than
855          * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
856          * Therefore if it is even now, it will eventually end up odd, and
857          * so will need adjusting.
858          *
859          * VLAN tags get involved since VLAN tags add another one or two
860          * segments.
861          */
862         DBG_TX(et131x_dbginfo,
863                "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
864
865         if ((pAdapter->uiDuplexMode == 0)
866             && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
867                 DBG_TX(et131x_dbginfo,
868                        "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
869                 if ((FragListCount & 0x1) == 0) {
870                         DBG_TX(et131x_dbginfo,
871                                "Even number of descs, split 1st elem\n");
872                         iSplitFirstElement = 1;
873                         //SegmentSize = pFragList[0].size / 2;
874                         SegmentSize = (pPacket->len - pPacket->data_len) / 2;
875                 }
876         } else if (FragListCount & 0x1) {
877                 DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
878
879                 iSplitFirstElement = 1;
880                 //SegmentSize = pFragList[0].size / 2;
881                 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
882         }
883
884         spin_lock_irqsave(&pAdapter->SendHWLock, lockflags1);
885
886         if (pAdapter->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
887             ServiceComplete.bits.serv_cpl_wrap) {
888                 /* The ring hasn't wrapped.  Slots available should be
889                  * (RING_SIZE) -  the difference between the two pointers.
890                  */
891                 SlotsAvailable = NUM_DESC_PER_RING_TX -
892                     (pAdapter->TxRing.txDmaReadyToSend.bits.serv_req -
893                      ServiceComplete.bits.serv_cpl);
894         } else {
895                 /* The ring has wrapped.  Slots available should be the
896                  * difference between the two pointers.
897                  */
898                 SlotsAvailable = ServiceComplete.bits.serv_cpl -
899                     pAdapter->TxRing.txDmaReadyToSend.bits.serv_req;
900         }
901
902         if ((FragListCount + iSplitFirstElement) > SlotsAvailable) {
903                 DBG_WARNING(et131x_dbginfo,
904                             "Not Enough Space in Tx Desc Ring\n");
905                 spin_unlock_irqrestore(&pAdapter->SendHWLock, lockflags1);
906                 return -ENOMEM;
907         }
908
909         loopEnd = (FragListCount) + iSplitFirstElement;
910         fragIndex = 0;
911
912         DBG_TX(et131x_dbginfo,
913                "TCB           : 0x%p\n"
914                "Packet (SKB)  : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
915                "FragListCount : %d\t iSplitFirstElement: %d\t loopEnd:%d\n",
916                pMpTcb,
917                pPacket, pPacket->len, pPacket->data_len,
918                FragListCount, iSplitFirstElement, loopEnd);
919
920         for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
921                 if (loopIndex > iSplitFirstElement) {
922                         fragIndex++;
923                 }
924
925                 DBG_TX(et131x_dbginfo,
926                        "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
927                        fragIndex);
928
929                 /* If there is something in this element, let's get a
930                  * descriptor from the ring and get the necessary data
931                  */
932                 DBG_TX(et131x_dbginfo,
933                        "Packet Length %d,"
934                        "filling desc entry %d\n",
935                        pPacket->len,
936                        pAdapter->TxRing.txDmaReadyToSend.bits.serv_req);
937
938                 // NOTE - Should we do a paranoia check here to make sure the fragment
939                 // actually has a length? It's HIGHLY unlikely the fragment would
940                 // contain no data...
941                 if (1) {
942                         // NOTE - Currently always getting 32-bit addrs, and dma_addr_t is
943                         //        only 32-bit, so leave "high" ptr value out for now
944                         CurDesc.DataBufferPtrHigh = 0;
945
946                         CurDesc.word2.value = 0;
947                         CurDesc.word3.value = 0;
948
949                         if (fragIndex == 0) {
950                                 if (iSplitFirstElement) {
951                                         DBG_TX(et131x_dbginfo,
952                                                "Split first element: YES\n");
953
954                                         if (loopIndex == 0) {
955                                                 DBG_TX(et131x_dbginfo,
956                                                        "Got fragment of length %d, fragIndex: %d\n",
957                                                        pPacket->len -
958                                                        pPacket->data_len,
959                                                        fragIndex);
960                                                 DBG_TX(et131x_dbginfo,
961                                                        "SegmentSize: %d\n",
962                                                        SegmentSize);
963
964                                                 CurDesc.word2.bits.
965                                                     length_in_bytes =
966                                                     SegmentSize;
967                                                 CurDesc.DataBufferPtrLow =
968                                                     pci_map_single(pAdapter->
969                                                                    pdev,
970                                                                    pPacket->
971                                                                    data,
972                                                                    SegmentSize,
973                                                                    PCI_DMA_TODEVICE);
974                                                 DBG_TX(et131x_dbginfo,
975                                                        "pci_map_single() returns: 0x%08x\n",
976                                                        CurDesc.
977                                                        DataBufferPtrLow);
978                                         } else {
979                                                 DBG_TX(et131x_dbginfo,
980                                                        "Got fragment of length %d, fragIndex: %d\n",
981                                                        pPacket->len -
982                                                        pPacket->data_len,
983                                                        fragIndex);
984                                                 DBG_TX(et131x_dbginfo,
985                                                        "Leftover Size: %d\n",
986                                                        (pPacket->len -
987                                                         pPacket->data_len -
988                                                         SegmentSize));
989
990                                                 CurDesc.word2.bits.
991                                                     length_in_bytes =
992                                                     ((pPacket->len -
993                                                       pPacket->data_len) -
994                                                      SegmentSize);
995                                                 CurDesc.DataBufferPtrLow =
996                                                     pci_map_single(pAdapter->
997                                                                    pdev,
998                                                                    (pPacket->
999                                                                     data +
1000                                                                     SegmentSize),
1001                                                                    (pPacket->
1002                                                                     len -
1003                                                                     pPacket->
1004                                                                     data_len -
1005                                                                     SegmentSize),
1006                                                                    PCI_DMA_TODEVICE);
1007                                                 DBG_TX(et131x_dbginfo,
1008                                                        "pci_map_single() returns: 0x%08x\n",
1009                                                        CurDesc.
1010                                                        DataBufferPtrLow);
1011                                         }
1012                                 } else {
1013                                         DBG_TX(et131x_dbginfo,
1014                                                "Split first element: NO\n");
1015
1016                                         CurDesc.word2.bits.length_in_bytes =
1017                                             pPacket->len - pPacket->data_len;
1018
1019                                         CurDesc.DataBufferPtrLow =
1020                                             pci_map_single(pAdapter->pdev,
1021                                                            pPacket->data,
1022                                                            (pPacket->len -
1023                                                             pPacket->data_len),
1024                                                            PCI_DMA_TODEVICE);
1025                                         DBG_TX(et131x_dbginfo,
1026                                                "pci_map_single() returns: 0x%08x\n",
1027                                                CurDesc.DataBufferPtrLow);
1028                                 }
1029                         } else {
1030
1031                                 CurDesc.word2.bits.length_in_bytes =
1032                                     pFragList[fragIndex - 1].size;
1033                                 CurDesc.DataBufferPtrLow =
1034                                     pci_map_page(pAdapter->pdev,
1035                                                  pFragList[fragIndex - 1].page,
1036                                                  pFragList[fragIndex -
1037                                                            1].page_offset,
1038                                                  pFragList[fragIndex - 1].size,
1039                                                  PCI_DMA_TODEVICE);
1040                                 DBG_TX(et131x_dbginfo,
1041                                        "pci_map_page() returns: 0x%08x\n",
1042                                        CurDesc.DataBufferPtrLow);
1043                         }
1044
1045                         if (loopIndex == 0) {
1046                                 /* This is the first descriptor of the packet
1047                                  *
1048                                  * Set the "f" bit to indicate this is the
1049                                  * first descriptor in the packet.
1050                                  */
1051                                 DBG_TX(et131x_dbginfo,
1052                                        "This is our FIRST descriptor\n");
1053                                 CurDesc.word3.bits.f = 1;
1054
1055                                 pMpTcb->WrIndexStart =
1056                                     pAdapter->TxRing.txDmaReadyToSend;
1057                         }
1058
1059                         if ((loopIndex == (loopEnd - 1)) &&
1060                             (pAdapter->uiDuplexMode ||
1061                              (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
1062                                 /* This is the Last descriptor of the packet */
1063                                 DBG_TX(et131x_dbginfo,
1064                                        "THIS is our LAST descriptor\n");
1065
1066                                 if (pAdapter->uiLinkSpeed ==
1067                                     TRUEPHY_SPEED_1000MBPS) {
1068                                         if (++pAdapter->TxRing.
1069                                             TxPacketsSinceLastinterrupt >=
1070                                             pAdapter->RegistryTxNumBuffers) {
1071                                                 CurDesc.word3.value = 0x5;
1072                                                 pAdapter->TxRing.
1073                                                     TxPacketsSinceLastinterrupt
1074                                                     = 0;
1075                                         } else {
1076                                                 CurDesc.word3.value = 0x1;
1077                                         }
1078                                 } else {
1079                                         CurDesc.word3.value = 0x5;
1080                                 }
1081
1082                                 /* Following index will be used during freeing
1083                                  * of packet
1084                                  */
1085                                 pMpTcb->WrIndex =
1086                                     pAdapter->TxRing.txDmaReadyToSend;
1087                                 pMpTcb->PacketStaleCount = 0;
1088                         }
1089
1090                         /* Copy the descriptor (filled above) into the
1091                          * descriptor ring at the next free entry.  Advance
1092                          * the "next free entry" variable
1093                          */
1094                         memcpy(pAdapter->TxRing.pTxDescRingVa +
1095                                pAdapter->TxRing.txDmaReadyToSend.bits.serv_req,
1096                                &CurDesc, sizeof(TX_DESC_ENTRY_t));
1097
1098                         CurDescPostCopy =
1099                             pAdapter->TxRing.pTxDescRingVa +
1100                             pAdapter->TxRing.txDmaReadyToSend.bits.serv_req;
1101
1102                         DBG_TX(et131x_dbginfo,
1103                                "CURRENT DESCRIPTOR\n"
1104                                "\tAddress           : 0x%p\n"
1105                                "\tDataBufferPtrHigh : 0x%08x\n"
1106                                "\tDataBufferPtrLow  : 0x%08x\n"
1107                                "\tword2             : 0x%08x\n"
1108                                "\tword3             : 0x%08x\n",
1109                                CurDescPostCopy,
1110                                CurDescPostCopy->DataBufferPtrHigh,
1111                                CurDescPostCopy->DataBufferPtrLow,
1112                                CurDescPostCopy->word2.value,
1113                                CurDescPostCopy->word3.value);
1114
1115                         if (++pAdapter->TxRing.txDmaReadyToSend.bits.serv_req >=
1116                             NUM_DESC_PER_RING_TX) {
1117                                 if (pAdapter->TxRing.txDmaReadyToSend.bits.
1118                                     serv_req_wrap) {
1119                                         pAdapter->TxRing.txDmaReadyToSend.
1120                                             value = 0;
1121                                 } else {
1122                                         pAdapter->TxRing.txDmaReadyToSend.
1123                                             value = 0x400;
1124                                 }
1125                         }
1126                 }
1127         }
1128
1129         if (pAdapter->uiDuplexMode == 0 &&
1130             pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
1131                 // NOTE - Same 32/64-bit issue as above...
1132                 CurDesc.DataBufferPtrHigh = 0x0;
1133                 CurDesc.DataBufferPtrLow = pAdapter->TxRing.pTxDummyBlkPa;
1134                 CurDesc.word2.value = 0;
1135
1136                 if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
1137                         if (++pAdapter->TxRing.TxPacketsSinceLastinterrupt >=
1138                             pAdapter->RegistryTxNumBuffers) {
1139                                 CurDesc.word3.value = 0x5;
1140                                 pAdapter->TxRing.TxPacketsSinceLastinterrupt =
1141                                     0;
1142                         } else {
1143                                 CurDesc.word3.value = 0x1;
1144                         }
1145                 } else {
1146                         CurDesc.word3.value = 0x5;
1147                 }
1148
1149                 CurDesc.word2.bits.length_in_bytes =
1150                     NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
1151
1152                 pMpTcb->WrIndex = pAdapter->TxRing.txDmaReadyToSend;
1153
1154                 memcpy(pAdapter->TxRing.pTxDescRingVa +
1155                        pAdapter->TxRing.txDmaReadyToSend.bits.serv_req,
1156                        &CurDesc, sizeof(TX_DESC_ENTRY_t));
1157
1158                 CurDescPostCopy =
1159                     pAdapter->TxRing.pTxDescRingVa +
1160                     pAdapter->TxRing.txDmaReadyToSend.bits.serv_req;
1161
1162                 DBG_TX(et131x_dbginfo,
1163                        "CURRENT DESCRIPTOR\n"
1164                        "\tAddress           : 0x%p\n"
1165                        "\tDataBufferPtrHigh : 0x%08x\n"
1166                        "\tDataBufferPtrLow  : 0x%08x\n"
1167                        "\tword2             : 0x%08x\n"
1168                        "\tword3             : 0x%08x\n",
1169                        CurDescPostCopy,
1170                        CurDescPostCopy->DataBufferPtrHigh,
1171                        CurDescPostCopy->DataBufferPtrLow,
1172                        CurDescPostCopy->word2.value,
1173                        CurDescPostCopy->word3.value);
1174
1175                 if (++pAdapter->TxRing.txDmaReadyToSend.bits.serv_req >=
1176                     NUM_DESC_PER_RING_TX) {
1177                         if (pAdapter->TxRing.txDmaReadyToSend.bits.
1178                             serv_req_wrap) {
1179                                 pAdapter->TxRing.txDmaReadyToSend.value = 0;
1180                         } else {
1181                                 pAdapter->TxRing.txDmaReadyToSend.value = 0x400;
1182                         }
1183                 }
1184
1185                 DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
1186                        //pAdapter->TxRing.txDmaReadyToSend.value,
1187                        pAdapter->TxRing.txDmaReadyToSend.bits.serv_req,
1188                        NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
1189         }
1190
1191         spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags2);
1192
1193         if (pAdapter->TxRing.CurrSendTail) {
1194                 pAdapter->TxRing.CurrSendTail->Next = pMpTcb;
1195         } else {
1196                 pAdapter->TxRing.CurrSendHead = pMpTcb;
1197         }
1198
1199         pAdapter->TxRing.CurrSendTail = pMpTcb;
1200
1201         DBG_ASSERT(pMpTcb->Next == NULL);
1202
1203         pAdapter->TxRing.nBusySend++;
1204
1205         spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags2);
1206
1207         /* Write the new write pointer back to the device. */
1208         writel(pAdapter->TxRing.txDmaReadyToSend.value,
1209                &pAdapter->CSRAddress->txdma.service_request.value);
1210
1211 #ifdef CONFIG_ET131X_DEBUG
1212         DumpDeviceBlock(DBG_TX_ON, pAdapter, 1);
1213 #endif
1214
1215         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
1216          * timer to wake us up if this packet isn't followed by N more.
1217          */
1218         if (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
1219                 writel(pAdapter->RegistryTxTimeInterval * NANO_IN_A_MICRO,
1220                        &pAdapter->CSRAddress->global.watchdog_timer);
1221         }
1222
1223         spin_unlock_irqrestore(&pAdapter->SendHWLock, lockflags1);
1224
1225         DBG_TX_LEAVE(et131x_dbginfo);
1226         return 0;
1227 }
1228
1229 #endif
1230
1231 /**
1232  * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
1233  * @pAdapter: pointer to our adapter
1234  * @pMpTcb: pointer to MP_TCB
1235  *
1236  * Assumption - Send spinlock has been acquired
1237  */
1238 __inline void et131x_free_send_packet(struct et131x_adapter *pAdapter, PMP_TCB pMpTcb)
1239 {
1240         unsigned long lockflags;
1241         TX_DESC_ENTRY_t *desc = NULL;
1242         struct net_device_stats *stats = &pAdapter->net_stats;
1243
1244         if (MP_TEST_FLAG(pMpTcb, fMP_DEST_BROAD)) {
1245                 atomic_inc(&pAdapter->Stats.brdcstxmt);
1246         } else if (MP_TEST_FLAG(pMpTcb, fMP_DEST_MULTI)) {
1247                 atomic_inc(&pAdapter->Stats.multixmt);
1248         } else {
1249                 atomic_inc(&pAdapter->Stats.unixmt);
1250         }
1251
1252         if (pMpTcb->Packet) {
1253                 stats->tx_bytes += pMpTcb->Packet->len;
1254
1255                 /* Iterate through the TX descriptors on the ring
1256                  * corresponding to this packet and umap the fragments
1257                  * they point to
1258                  */
1259                 DBG_TX(et131x_dbginfo,
1260                        "Unmap descriptors Here\n"
1261                        "TCB                  : 0x%p\n"
1262                        "TCB Next             : 0x%p\n"
1263                        "TCB PacketLength     : %d\n"
1264                        "TCB WrIndex.value    : 0x%08x\n"
1265                        "TCB WrIndex.bits.val : %d\n"
1266                        "TCB WrIndex.value    : 0x%08x\n"
1267                        "TCB WrIndex.bits.val : %d\n",
1268                        pMpTcb,
1269                        pMpTcb->Next,
1270                        pMpTcb->PacketLength,
1271                        pMpTcb->WrIndexStart.value,
1272                        pMpTcb->WrIndexStart.bits.val,
1273                        pMpTcb->WrIndex.value,
1274                        pMpTcb->WrIndex.bits.val);
1275
1276                 do {
1277                         desc =
1278                             (TX_DESC_ENTRY_t *) (pAdapter->TxRing.
1279                                                  pTxDescRingVa +
1280                                                  pMpTcb->WrIndexStart.bits.val);
1281
1282                         DBG_TX(et131x_dbginfo,
1283                                "CURRENT DESCRIPTOR\n"
1284                                "\tAddress           : 0x%p\n"
1285                                "\tDataBufferPtrHigh : 0x%08x\n"
1286                                "\tDataBufferPtrLow  : 0x%08x\n"
1287                                "\tword2             : 0x%08x\n"
1288                                "\tword3             : 0x%08x\n",
1289                                desc,
1290                                desc->DataBufferPtrHigh,
1291                                desc->DataBufferPtrLow,
1292                                desc->word2.value,
1293                                desc->word3.value);
1294
1295                         pci_unmap_single(pAdapter->pdev,
1296                                          desc->DataBufferPtrLow,
1297                                          desc->word2.value, PCI_DMA_TODEVICE);
1298
1299                         if (++pMpTcb->WrIndexStart.bits.val >=
1300                             NUM_DESC_PER_RING_TX) {
1301                                 if (pMpTcb->WrIndexStart.bits.wrap) {
1302                                         pMpTcb->WrIndexStart.value = 0;
1303                                 } else {
1304                                         pMpTcb->WrIndexStart.value = 0x400;
1305                                 }
1306                         }
1307                 }
1308                 while (desc != (pAdapter->TxRing.pTxDescRingVa +
1309                                 pMpTcb->WrIndex.bits.val));
1310
1311                 DBG_TX(et131x_dbginfo,
1312                        "Free Packet (SKB)   : 0x%p\n", pMpTcb->Packet);
1313
1314                 dev_kfree_skb_any(pMpTcb->Packet);
1315         }
1316
1317         memset(pMpTcb, 0, sizeof(MP_TCB));
1318
1319         /* Add the TCB to the Ready Q */
1320         spin_lock_irqsave(&pAdapter->TCBReadyQLock, lockflags);
1321
1322         pAdapter->Stats.opackets++;
1323
1324         if (pAdapter->TxRing.TCBReadyQueueTail) {
1325                 pAdapter->TxRing.TCBReadyQueueTail->Next = pMpTcb;
1326         } else {
1327                 /* Apparently ready Q is empty. */
1328                 pAdapter->TxRing.TCBReadyQueueHead = pMpTcb;
1329         }
1330
1331         pAdapter->TxRing.TCBReadyQueueTail = pMpTcb;
1332
1333         spin_unlock_irqrestore(&pAdapter->TCBReadyQLock, lockflags);
1334
1335         DBG_ASSERT(pAdapter->TxRing.nBusySend >= 0);
1336 }
1337
1338 /**
1339  * et131x_free_busy_send_packets - Free and complete the stopped active sends
1340  * @pAdapter: pointer to our adapter
1341  *
1342  * Assumption - Send spinlock has been acquired
1343  */
1344 void et131x_free_busy_send_packets(struct et131x_adapter *pAdapter)
1345 {
1346         PMP_TCB pMpTcb;
1347         struct list_head *pEntry;
1348         struct sk_buff *pPacket = NULL;
1349         unsigned long lockflags;
1350         uint32_t FreeCounter = 0;
1351
1352         DBG_ENTER(et131x_dbginfo);
1353
1354         while (!list_empty(&pAdapter->TxRing.SendWaitQueue)) {
1355                 spin_lock_irqsave(&pAdapter->SendWaitLock, lockflags);
1356
1357                 pAdapter->TxRing.nWaitSend--;
1358                 spin_unlock_irqrestore(&pAdapter->SendWaitLock, lockflags);
1359
1360                 pEntry = pAdapter->TxRing.SendWaitQueue.next;
1361
1362                 pPacket = NULL;
1363         }
1364
1365         pAdapter->TxRing.nWaitSend = 0;
1366
1367         /* Any packets being sent? Check the first TCB on the send list */
1368         spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1369
1370         pMpTcb = pAdapter->TxRing.CurrSendHead;
1371
1372         while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
1373                 PMP_TCB pNext = pMpTcb->Next;
1374
1375                 pAdapter->TxRing.CurrSendHead = pNext;
1376
1377                 if (pNext == NULL) {
1378                         pAdapter->TxRing.CurrSendTail = NULL;
1379                 }
1380
1381                 pAdapter->TxRing.nBusySend--;
1382
1383                 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1384
1385                 DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
1386
1387                 FreeCounter++;
1388                 MP_FREE_SEND_PACKET_FUN(pAdapter, pMpTcb);
1389
1390                 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1391
1392                 pMpTcb = pAdapter->TxRing.CurrSendHead;
1393         }
1394
1395         if (FreeCounter == NUM_TCB) {
1396                 DBG_ERROR(et131x_dbginfo,
1397                           "MpFreeBusySendPackets exitted loop for a bad reason\n");
1398                 BUG();
1399         }
1400
1401         spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1402
1403         pAdapter->TxRing.nBusySend = 0;
1404
1405         DBG_LEAVE(et131x_dbginfo);
1406 }
1407
1408 /**
1409  * et131x_handle_send_interrupt - Interrupt handler for sending processing
1410  * @pAdapter: pointer to our adapter
1411  *
1412  * Re-claim the send resources, complete sends and get more to send from
1413  * the send wait queue.
1414  *
1415  * Assumption - Send spinlock has been acquired
1416  */
1417 void et131x_handle_send_interrupt(struct et131x_adapter *pAdapter)
1418 {
1419         DBG_TX_ENTER(et131x_dbginfo);
1420
1421         /* Mark as completed any packets which have been sent by the device. */
1422         et131x_update_tcb_list(pAdapter);
1423
1424         /* If we queued any transmits because we didn't have any TCBs earlier,
1425          * dequeue and send those packets now, as long as we have free TCBs.
1426          */
1427         et131x_check_send_wait_list(pAdapter);
1428
1429         DBG_TX_LEAVE(et131x_dbginfo);
1430 }
1431
1432 /**
1433  * et131x_update_tcb_list - Helper routine for Send Interrupt handler
1434  * @pAdapter: pointer to our adapter
1435  *
1436  * Re-claims the send resources and completes sends.  Can also be called as
1437  * part of the NIC send routine when the "ServiceComplete" indication has
1438  * wrapped.
1439  */
1440 static void et131x_update_tcb_list(struct et131x_adapter *pAdapter)
1441 {
1442         unsigned long lockflags;
1443         DMA10W_t ServiceComplete;
1444         PMP_TCB pMpTcb;
1445
1446         ServiceComplete.value =
1447             readl(&pAdapter->CSRAddress->txdma.NewServiceComplete.value);
1448
1449         /* Has the ring wrapped?  Process any descriptors that do not have
1450          * the same "wrap" indicator as the current completion indicator
1451          */
1452         spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1453
1454         pMpTcb = pAdapter->TxRing.CurrSendHead;
1455         while (pMpTcb &&
1456                ServiceComplete.bits.wrap != pMpTcb->WrIndex.bits.wrap  &&
1457                ServiceComplete.bits.val < pMpTcb->WrIndex.bits.val) {
1458                 pAdapter->TxRing.nBusySend--;
1459                 pAdapter->TxRing.CurrSendHead = pMpTcb->Next;
1460                 if (pMpTcb->Next == NULL) {
1461                         pAdapter->TxRing.CurrSendTail = NULL;
1462                 }
1463
1464                 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1465                 MP_FREE_SEND_PACKET_FUN(pAdapter, pMpTcb);
1466                 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1467
1468                 /* Goto the next packet */
1469                 pMpTcb = pAdapter->TxRing.CurrSendHead;
1470         }
1471         while (pMpTcb &&
1472                ServiceComplete.bits.wrap == pMpTcb->WrIndex.bits.wrap &&
1473                ServiceComplete.bits.val > pMpTcb->WrIndex.bits.val) {
1474                 pAdapter->TxRing.nBusySend--;
1475                 pAdapter->TxRing.CurrSendHead = pMpTcb->Next;
1476                 if (pMpTcb->Next == NULL) {
1477                         pAdapter->TxRing.CurrSendTail = NULL;
1478                 }
1479
1480                 spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1481                 MP_FREE_SEND_PACKET_FUN(pAdapter, pMpTcb);
1482                 spin_lock_irqsave(&pAdapter->TCBSendQLock, lockflags);
1483
1484                 /* Goto the next packet */
1485                 pMpTcb = pAdapter->TxRing.CurrSendHead;
1486         }
1487
1488         /* Wake up the queue when we hit a low-water mark */
1489         if (pAdapter->TxRing.nBusySend <= (NUM_TCB / 3)) {
1490                 netif_wake_queue(pAdapter->netdev);
1491         }
1492
1493         spin_unlock_irqrestore(&pAdapter->TCBSendQLock, lockflags);
1494 }
1495
1496 /**
1497  * et131x_check_send_wait_list - Helper routine for the interrupt handler
1498  * @pAdapter: pointer to our adapter
1499  *
1500  * Takes packets from the send wait queue and posts them to the device (if
1501  * room available).
1502  */
1503 static void et131x_check_send_wait_list(struct et131x_adapter *pAdapter)
1504 {
1505         unsigned long lockflags;
1506
1507         spin_lock_irqsave(&pAdapter->SendWaitLock, lockflags);
1508
1509         while (!list_empty(&pAdapter->TxRing.SendWaitQueue) &&
1510                MP_TCB_RESOURCES_AVAILABLE(pAdapter)) {
1511                 struct list_head *pEntry;
1512
1513                 DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
1514
1515                 pEntry = pAdapter->TxRing.SendWaitQueue.next;
1516
1517                 pAdapter->TxRing.nWaitSend--;
1518
1519                 DBG_WARNING(et131x_dbginfo,
1520                             "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
1521                             pAdapter->TxRing.nWaitSend);
1522         }
1523
1524         spin_unlock_irqrestore(&pAdapter->SendWaitLock, lockflags);
1525 }