Staging: sxg: Locking related changes. Fix locking levels
[linux-2.6] / drivers / staging / sxg / sxg.c
1 /**************************************************************************
2  *
3  * Copyright (C) 2000-2008 Alacritech, Inc.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above
12  *    copyright notice, this list of conditions and the following
13  *    disclaimer in the documentation and/or other materials provided
14  *    with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ALACRITECH, INC. OR
20  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * The views and conclusions contained in the software and documentation
30  * are those of the authors and should not be interpreted as representing
31  * official policies, either expressed or implied, of Alacritech, Inc.
32  *
33  **************************************************************************/
34
35 /*
36  * FILENAME: sxg.c
37  *
38  * The SXG driver for Alacritech's 10Gbe products.
39  *
40  * NOTE: This is the standard, non-accelerated version of Alacritech's
41  *       IS-NIC driver.
42  */
43
44 #include <linux/kernel.h>
45 #include <linux/string.h>
46 #include <linux/errno.h>
47 #include <linux/module.h>
48 #include <linux/moduleparam.h>
49 #include <linux/ioport.h>
50 #include <linux/slab.h>
51 #include <linux/interrupt.h>
52 #include <linux/timer.h>
53 #include <linux/pci.h>
54 #include <linux/spinlock.h>
55 #include <linux/init.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/ethtool.h>
59 #include <linux/skbuff.h>
60 #include <linux/delay.h>
61 #include <linux/types.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/mii.h>
64
65 #define SLIC_GET_STATS_ENABLED          0
66 #define LINUX_FREES_ADAPTER_RESOURCES   1
67 #define SXG_OFFLOAD_IP_CHECKSUM         0
68 #define SXG_POWER_MANAGEMENT_ENABLED    0
69 #define VPCI                            0
70 #define ATK_DEBUG                       1
71
72 #include "sxg_os.h"
73 #include "sxghw.h"
74 #include "sxghif.h"
75 #include "sxg.h"
76 #include "sxgdbg.h"
77
78 #include "sxgphycode.h"
79 #include "saharadbgdownload.h"
80
81 static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
82                                       enum sxg_buffer_type BufferType);
83 static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
84                                                 void *RcvBlock,
85                                                 dma_addr_t PhysicalAddress,
86                                                 u32 Length);
87 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
88                                              struct sxg_scatter_gather *SxgSgl,
89                                              dma_addr_t PhysicalAddress,
90                                              u32 Length);
91
92 static void sxg_mcast_init_crc32(void);
93 static int sxg_entry_open(struct net_device *dev);
94 static int sxg_entry_halt(struct net_device *dev);
95 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
96 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
97 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
98 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
99                                 struct sxg_scatter_gather *SxgSgl);
100
101 static void sxg_handle_interrupt(struct adapter_t *adapter);
102 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
103 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId);
104 static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context);
105 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
106                                         struct sxg_event *Event);
107 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
108 static bool sxg_mac_filter(struct adapter_t *adapter,
109                            struct ether_header *EtherHdr, ushort length);
110
111 static struct net_device_stats *sxg_get_stats(struct net_device * dev);
112 void sxg_free_resources(struct adapter_t *adapter);
113 void sxg_free_rcvblocks(struct adapter_t *adapter);
114 void sxg_free_sgl_buffers(struct adapter_t *adapter);
115 void sxg_unmap_resources(struct adapter_t *adapter);
116 void sxg_free_mcast_addrs(struct adapter_t *adapter);
117 void sxg_collect_statistics(struct adapter_t *adapter);
118
119 #define XXXTODO 0
120
121 static int sxg_mac_set_address(struct net_device *dev, void *ptr);
122 static void sxg_mcast_set_list(struct net_device *dev);
123
124 static void sxg_adapter_set_hwaddr(struct adapter_t *adapter);
125
126 static void sxg_unmap_mmio_space(struct adapter_t *adapter);
127
128 static int sxg_initialize_adapter(struct adapter_t *adapter);
129 static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
130 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
131                                            unsigned char Index);
132 static int sxg_initialize_link(struct adapter_t *adapter);
133 static int sxg_phy_init(struct adapter_t *adapter);
134 static void sxg_link_event(struct adapter_t *adapter);
135 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
136 static void sxg_link_state(struct adapter_t *adapter,
137                                 enum SXG_LINK_STATE LinkState);
138 static int sxg_write_mdio_reg(struct adapter_t *adapter,
139                               u32 DevAddr, u32 RegAddr, u32 Value);
140 static int sxg_read_mdio_reg(struct adapter_t *adapter,
141                              u32 DevAddr, u32 RegAddr, u32 *pValue);
142
143 static unsigned int sxg_first_init = 1;
144 static char *sxg_banner =
145     "Alacritech SLIC Technology(tm) Server and Storage \
146          10Gbe Accelerator (Non-Accelerated)\n";
147
148 static int sxg_debug = 1;
149 static int debug = -1;
150 static struct net_device *head_netdevice = NULL;
151
152 static struct sxgbase_driver sxg_global = {
153         .dynamic_intagg = 1,
154 };
155 static int intagg_delay = 100;
156 static u32 dynamic_intagg = 0;
157
158 char sxg_driver_name[] = "sxg";
159 #define DRV_AUTHOR      "Alacritech, Inc. Engineering"
160 #define DRV_DESCRIPTION                                                 \
161         "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
162 #define DRV_COPYRIGHT                                                   \
163         "Copyright 2000-2008 Alacritech, Inc.  All rights reserved."
164
165 MODULE_AUTHOR(DRV_AUTHOR);
166 MODULE_DESCRIPTION(DRV_DESCRIPTION);
167 MODULE_LICENSE("GPL");
168
169 module_param(dynamic_intagg, int, 0);
170 MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
171 module_param(intagg_delay, int, 0);
172 MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
173
174 static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
175         {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
176         {0,}
177 };
178
179 MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
180
181 static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
182 {
183         writel(value, reg);
184         if (flush)
185                 mb();
186 }
187
188 static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
189                                    u64 value, u32 cpu)
190 {
191         u32 value_high = (u32) (value >> 32);
192         u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
193         unsigned long flags;
194
195         spin_lock_irqsave(&adapter->Bit64RegLock, flags);
196         writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
197         writel(value_low, reg);
198         spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
199 }
200
201 static void sxg_init_driver(void)
202 {
203         if (sxg_first_init) {
204                 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
205                           __func__, jiffies);
206                 sxg_first_init = 0;
207                 spin_lock_init(&sxg_global.driver_lock);
208         }
209 }
210
211 static void sxg_dbg_macaddrs(struct adapter_t *adapter)
212 {
213         DBG_ERROR("  (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
214                   adapter->netdev->name, adapter->currmacaddr[0],
215                   adapter->currmacaddr[1], adapter->currmacaddr[2],
216                   adapter->currmacaddr[3], adapter->currmacaddr[4],
217                   adapter->currmacaddr[5]);
218         DBG_ERROR("  (%s) mac  %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
219                   adapter->netdev->name, adapter->macaddr[0],
220                   adapter->macaddr[1], adapter->macaddr[2],
221                   adapter->macaddr[3], adapter->macaddr[4],
222                   adapter->macaddr[5]);
223         return;
224 }
225
226 /* SXG Globals */
227 static struct sxg_driver SxgDriver;
228
229 #ifdef  ATKDBG
230 static struct sxg_trace_buffer LSxgTraceBuffer;
231 #endif /* ATKDBG */
232 static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
233
234 /*
235  * sxg_download_microcode
236  *
237  * Download Microcode to Sahara adapter
238  *
239  * Arguments -
240  *              adapter         - A pointer to our adapter structure
241  *              UcodeSel        - microcode file selection
242  *
243  * Return
244  *      int
245  */
246 static bool sxg_download_microcode(struct adapter_t *adapter,
247                                                 enum SXG_UCODE_SEL UcodeSel)
248 {
249         struct sxg_hw_regs *HwRegs = adapter->HwRegs;
250         u32 Section;
251         u32 ThisSectionSize;
252         u32 *Instruction = NULL;
253         u32 BaseAddress, AddressOffset, Address;
254         /* u32 Failure; */
255         u32 ValueRead;
256         u32 i;
257         u32 numSections = 0;
258         u32 sectionSize[16];
259         u32 sectionStart[16];
260
261         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
262                   adapter, 0, 0, 0);
263         DBG_ERROR("sxg: %s ENTER\n", __func__);
264
265         switch (UcodeSel) {
266         case SXG_UCODE_SAHARA:  /* Sahara operational ucode */
267                 numSections = SNumSections;
268                 for (i = 0; i < numSections; i++) {
269                         sectionSize[i] = SSectionSize[i];
270                         sectionStart[i] = SSectionStart[i];
271                 }
272                 break;
273         default:
274                 printk(KERN_ERR KBUILD_MODNAME
275                        ": Woah, big error with the microcode!\n");
276                 break;
277         }
278
279         DBG_ERROR("sxg: RESET THE CARD\n");
280         /* First, reset the card */
281         WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
282
283         /*
284          * Download each section of the microcode as specified in
285          * its download file.  The *download.c file is generated using
286          * the saharaobjtoc facility which converts the metastep .obj
287          * file to a .c file which contains a two dimentional array.
288          */
289         for (Section = 0; Section < numSections; Section++) {
290                 DBG_ERROR("sxg: SECTION # %d\n", Section);
291                 switch (UcodeSel) {
292                 case SXG_UCODE_SAHARA:
293                         Instruction = (u32 *) & SaharaUCode[Section][0];
294                         break;
295                 default:
296                         ASSERT(0);
297                         break;
298                 }
299                 BaseAddress = sectionStart[Section];
300                 /* Size in instructions */
301                 ThisSectionSize = sectionSize[Section] / 12;
302                 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
303                      AddressOffset++) {
304                         Address = BaseAddress + AddressOffset;
305                         ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
306                         /* Write instruction bits 31 - 0 */
307                         WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
308                         /* Write instruction bits 63-32 */
309                         WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
310                                   FLUSH);
311                         /* Write instruction bits 95-64 */
312                         WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
313                                   FLUSH);
314                         /* Write instruction address with the WRITE bit set */
315                         WRITE_REG(HwRegs->UcodeAddr,
316                                   (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
317                         /*
318                          * Sahara bug in the ucode download logic - the write to DataLow
319                          * for the next instruction could get corrupted.  To avoid this,
320                          * write to DataLow again for this instruction (which may get
321                          * corrupted, but it doesn't matter), then increment the address
322                          * and write the data for the next instruction to DataLow.  That
323                          * write should succeed.
324                          */
325                         WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
326                         /* Advance 3 u32S to start of next instruction */
327                         Instruction += 3;
328                 }
329         }
330         /*
331          * Now repeat the entire operation reading the instruction back and
332          * checking for parity errors
333          */
334         for (Section = 0; Section < numSections; Section++) {
335                 DBG_ERROR("sxg: check SECTION # %d\n", Section);
336                 switch (UcodeSel) {
337                 case SXG_UCODE_SAHARA:
338                         Instruction = (u32 *) & SaharaUCode[Section][0];
339                         break;
340                 default:
341                         ASSERT(0);
342                         break;
343                 }
344                 BaseAddress = sectionStart[Section];
345                 /* Size in instructions */
346                 ThisSectionSize = sectionSize[Section] / 12;
347                 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
348                      AddressOffset++) {
349                         Address = BaseAddress + AddressOffset;
350                         /* Write the address with the READ bit set */
351                         WRITE_REG(HwRegs->UcodeAddr,
352                                   (Address | MICROCODE_ADDRESS_READ), FLUSH);
353                         /* Read it back and check parity bit. */
354                         READ_REG(HwRegs->UcodeAddr, ValueRead);
355                         if (ValueRead & MICROCODE_ADDRESS_PARITY) {
356                                 DBG_ERROR("sxg: %s PARITY ERROR\n",
357                                           __func__);
358
359                                 return FALSE;   /* Parity error */
360                         }
361                         ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
362                         /* Read the instruction back and compare */
363                         READ_REG(HwRegs->UcodeDataLow, ValueRead);
364                         if (ValueRead != *Instruction) {
365                                 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
366                                           __func__);
367                                 return FALSE;   /* Miscompare */
368                         }
369                         READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
370                         if (ValueRead != *(Instruction + 1)) {
371                                 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
372                                           __func__);
373                                 return FALSE;   /* Miscompare */
374                         }
375                         READ_REG(HwRegs->UcodeDataHigh, ValueRead);
376                         if (ValueRead != *(Instruction + 2)) {
377                                 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
378                                           __func__);
379                                 return FALSE;   /* Miscompare */
380                         }
381                         /* Advance 3 u32S to start of next instruction */
382                         Instruction += 3;
383                 }
384         }
385
386         /* Everything OK, Go. */
387         WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
388
389         /*
390          * Poll the CardUp register to wait for microcode to initialize
391          * Give up after 10,000 attemps (500ms).
392          */
393         for (i = 0; i < 10000; i++) {
394                 udelay(50);
395                 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
396                 if (ValueRead == 0xCAFE) {
397                         DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
398                         break;
399                 }
400         }
401         if (i == 10000) {
402                 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
403
404                 return FALSE;   /* Timeout */
405         }
406         /*
407          * Now write the LoadSync register.  This is used to
408          * synchronize with the card so it can scribble on the memory
409          * that contained 0xCAFE from the "CardUp" step above
410          */
411         if (UcodeSel == SXG_UCODE_SAHARA) {
412                 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
413         }
414
415         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
416                   adapter, 0, 0, 0);
417         DBG_ERROR("sxg: %s EXIT\n", __func__);
418
419         return (TRUE);
420 }
421
422 /*
423  * sxg_allocate_resources - Allocate memory and locks
424  *
425  * Arguments -
426  *      adapter - A pointer to our adapter structure
427  *
428  * Return - int
429  */
430 static int sxg_allocate_resources(struct adapter_t *adapter)
431 {
432         int status;
433         u32 i;
434         u32 RssIds, IsrCount;
435         /* struct sxg_xmt_ring  *XmtRing; */
436         /* struct sxg_rcv_ring  *RcvRing; */
437
438         DBG_ERROR("%s ENTER\n", __func__);
439
440         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
441                   adapter, 0, 0, 0);
442
443         /* Windows tells us how many CPUs it plans to use for */
444         /* RSS */
445         RssIds = SXG_RSS_CPU_COUNT(adapter);
446         IsrCount = adapter->MsiEnabled ? RssIds : 1;
447
448         DBG_ERROR("%s Setup the spinlocks\n", __func__);
449
450         /* Allocate spinlocks and initialize listheads first. */
451         spin_lock_init(&adapter->RcvQLock);
452         spin_lock_init(&adapter->SglQLock);
453         spin_lock_init(&adapter->XmtZeroLock);
454         spin_lock_init(&adapter->Bit64RegLock);
455         spin_lock_init(&adapter->AdapterLock);
456         atomic_set(&adapter->pending_allocations, 0);
457
458         DBG_ERROR("%s Setup the lists\n", __func__);
459
460         InitializeListHead(&adapter->FreeRcvBuffers);
461         InitializeListHead(&adapter->FreeRcvBlocks);
462         InitializeListHead(&adapter->AllRcvBlocks);
463         InitializeListHead(&adapter->FreeSglBuffers);
464         InitializeListHead(&adapter->AllSglBuffers);
465
466         /*
467          * Mark these basic allocations done.  This flags essentially
468          * tells the SxgFreeResources routine that it can grab spinlocks
469          * and reference listheads.
470          */
471         adapter->BasicAllocations = TRUE;
472         /*
473          * Main allocation loop.  Start with the maximum supported by
474          * the microcode and back off if memory allocation
475          * fails.  If we hit a minimum, fail.
476          */
477
478         for (;;) {
479                 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
480                           (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
481
482                 /*
483                  * Start with big items first - receive and transmit rings.
484                  * At the moment I'm going to keep the ring size fixed and
485                  * adjust the TCBs if we fail.  Later we might
486                  * consider reducing the ring size as well..
487                  */
488                 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
489                                                  sizeof(struct sxg_xmt_ring) *
490                                                  1,
491                                                  &adapter->PXmtRings);
492                 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
493
494                 if (!adapter->XmtRings) {
495                         goto per_tcb_allocation_failed;
496                 }
497                 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
498
499                 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
500                           (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
501                 adapter->RcvRings =
502                     pci_alloc_consistent(adapter->pcidev,
503                                          sizeof(struct sxg_rcv_ring) * 1,
504                                          &adapter->PRcvRings);
505                 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
506                 if (!adapter->RcvRings) {
507                         goto per_tcb_allocation_failed;
508                 }
509                 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
510                 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
511                 adapter->pucode_stats = pci_map_single(adapter->pcidev,
512                                                 adapter->ucode_stats,
513                                                 sizeof(struct sxg_ucode_stats),
514                                                 PCI_DMA_FROMDEVICE);
515 //              memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
516                 break;
517
518               per_tcb_allocation_failed:
519                 /* an allocation failed.  Free any successful allocations. */
520                 if (adapter->XmtRings) {
521                         pci_free_consistent(adapter->pcidev,
522                                             sizeof(struct sxg_xmt_ring) * 1,
523                                             adapter->XmtRings,
524                                             adapter->PXmtRings);
525                         adapter->XmtRings = NULL;
526                 }
527                 if (adapter->RcvRings) {
528                         pci_free_consistent(adapter->pcidev,
529                                             sizeof(struct sxg_rcv_ring) * 1,
530                                             adapter->RcvRings,
531                                             adapter->PRcvRings);
532                         adapter->RcvRings = NULL;
533                 }
534                 /* Loop around and try again.... */
535                 if (adapter->ucode_stats) {
536                         pci_unmap_single(adapter->pcidev,
537                                         sizeof(struct sxg_ucode_stats),
538                                         adapter->pucode_stats, PCI_DMA_FROMDEVICE);
539                         adapter->ucode_stats = NULL;
540                 }
541
542         }
543
544         DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
545         /* Initialize rcv zero and xmt zero rings */
546         SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
547         SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
548
549         /* Sanity check receive data structure format */
550         /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
551                (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
552         ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
553                SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
554
555         /*
556          * Allocate receive data buffers.  We allocate a block of buffers and
557          * a corresponding descriptor block at once.  See sxghw.h:SXG_RCV_BLOCK
558          */
559         for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
560                                 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
561                         sxg_allocate_buffer_memory(adapter,
562                                 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
563                                            SXG_BUFFER_TYPE_RCV);
564         }
565         /*
566          * NBL resource allocation can fail in the 'AllocateComplete' routine,
567          * which doesn't return status.  Make sure we got the number of buffers
568          * we requested
569          */
570         if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
571                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
572                           adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
573                           0);
574                 return (STATUS_RESOURCES);
575         }
576
577         DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
578                   (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
579
580         /* Allocate event queues. */
581         adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
582                                            sizeof(struct sxg_event_ring) *
583                                            RssIds,
584                                            &adapter->PEventRings);
585
586         if (!adapter->EventRings) {
587                 /* Caller will call SxgFreeAdapter to clean up above
588                  * allocations */
589                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
590                           adapter, SXG_MAX_ENTRIES, 0, 0);
591                 status = STATUS_RESOURCES;
592                 goto per_tcb_allocation_failed;
593         }
594         memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
595
596         DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
597         /* Allocate ISR */
598         adapter->Isr = pci_alloc_consistent(adapter->pcidev,
599                                             IsrCount, &adapter->PIsr);
600         if (!adapter->Isr) {
601                 /* Caller will call SxgFreeAdapter to clean up above
602                  * allocations */
603                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
604                           adapter, SXG_MAX_ENTRIES, 0, 0);
605                 status = STATUS_RESOURCES;
606                 goto per_tcb_allocation_failed;
607         }
608         memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
609
610         DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
611                   __func__, (unsigned int)sizeof(u32));
612
613         /* Allocate shared XMT ring zero index location */
614         adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
615                                                          sizeof(u32),
616                                                          &adapter->
617                                                          PXmtRingZeroIndex);
618         if (!adapter->XmtRingZeroIndex) {
619                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
620                           adapter, SXG_MAX_ENTRIES, 0, 0);
621                 status = STATUS_RESOURCES;
622                 goto per_tcb_allocation_failed;
623         }
624         memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
625
626         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
627                   adapter, SXG_MAX_ENTRIES, 0, 0);
628
629         DBG_ERROR("%s EXIT\n", __func__);
630         return (STATUS_SUCCESS);
631 }
632
633 /*
634  * sxg_config_pci -
635  *
636  * Set up PCI Configuration space
637  *
638  * Arguments -
639  *              pcidev                  - A pointer to our adapter structure
640  */
641 static void sxg_config_pci(struct pci_dev *pcidev)
642 {
643         u16 pci_command;
644         u16 new_command;
645
646         pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
647         DBG_ERROR("sxg: %s  PCI command[%4.4x]\n", __func__, pci_command);
648         /* Set the command register */
649         new_command = pci_command | (
650                                      /* Memory Space Enable */
651                                      PCI_COMMAND_MEMORY |
652                                      /* Bus master enable */
653                                      PCI_COMMAND_MASTER |
654                                      /* Memory write and invalidate */
655                                      PCI_COMMAND_INVALIDATE |
656                                      /* Parity error response */
657                                      PCI_COMMAND_PARITY |
658                                      /* System ERR */
659                                      PCI_COMMAND_SERR |
660                                      /* Fast back-to-back */
661                                      PCI_COMMAND_FAST_BACK);
662         if (pci_command != new_command) {
663                 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
664                           __func__, pci_command, new_command);
665                 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
666         }
667 }
668
669 /*
670  * sxg_read_config
671  *      @adapter : Pointer to the adapter structure for the card
672  * This function will read the configuration data from EEPROM/FLASH
673  */
674 static inline int sxg_read_config(struct adapter_t *adapter)
675 {
676         /* struct sxg_config    data; */
677         struct sw_cfg_data      *data;
678         dma_addr_t              p_addr;
679         unsigned long           status;
680         unsigned long           i;
681
682         data = pci_alloc_consistent(adapter->pcidev,
683                                         sizeof(struct sw_cfg_data), &p_addr);
684         if(!data) {
685                 /*
686                  * We cant get even this much memory. Raise a hell
687                  * Get out of here
688                  */
689                 printk(KERN_ERR"%s : Could not allocate memory for reading \
690                                  EEPROM\n", __FUNCTION__);
691                 return -ENOMEM;
692         }
693
694         WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
695
696         WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
697         for(i=0; i<1000; i++) {
698                 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
699                 if (status != SXG_CFG_TIMEOUT)
700                         break;
701                 mdelay(1);                      /* Do we really need this */
702         }
703
704         switch(status) {
705         /* Config read from EEPROM succeeded */
706         case SXG_CFG_LOAD_EEPROM:
707         /* Config read from Flash succeeded */
708         case SXG_CFG_LOAD_FLASH:
709         /* Copy the MAC address to adapter structure */
710                 /* TODO: We are not doing the remaining part : FRU,
711                  * etc
712                  */
713                 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
714                               sizeof(struct sxg_config_mac));
715                 break;
716         case SXG_CFG_TIMEOUT:
717         case SXG_CFG_LOAD_INVALID:
718         case SXG_CFG_LOAD_ERROR:
719         default:        /* Fix default handler later */
720                 printk(KERN_WARNING"%s  : We could not read the config \
721                         word. Status = %ld\n", __FUNCTION__, status);
722                 break;
723         }
724         pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
725                                 p_addr);
726         if (adapter->netdev) {
727                 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
728                 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
729         }
730         printk("LINSYS : These are the new MAC address\n");
731         sxg_dbg_macaddrs(adapter);
732
733         return status;
734 }
735
736 static int sxg_entry_probe(struct pci_dev *pcidev,
737                            const struct pci_device_id *pci_tbl_entry)
738 {
739         static int did_version = 0;
740         int err;
741         struct net_device *netdev;
742         struct adapter_t *adapter;
743         void __iomem *memmapped_ioaddr;
744         u32 status = 0;
745         ulong mmio_start = 0;
746         ulong mmio_len = 0;
747
748         DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
749                   __func__, jiffies, smp_processor_id());
750
751         /* Initialize trace buffer */
752 #ifdef ATKDBG
753         SxgTraceBuffer = &LSxgTraceBuffer;
754         SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
755 #endif
756
757         sxg_global.dynamic_intagg = dynamic_intagg;
758
759         err = pci_enable_device(pcidev);
760
761         DBG_ERROR("Call pci_enable_device(%p)  status[%x]\n", pcidev, err);
762         if (err) {
763                 return err;
764         }
765
766         if (sxg_debug > 0 && did_version++ == 0) {
767                 printk(KERN_INFO "%s\n", sxg_banner);
768                 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
769         }
770
771         if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
772                 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
773         } else {
774                 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
775                         DBG_ERROR
776                             ("No usable DMA configuration, aborting  err[%x]\n",
777                              err);
778                         return err;
779                 }
780                 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
781         }
782
783         DBG_ERROR("Call pci_request_regions\n");
784
785         err = pci_request_regions(pcidev, sxg_driver_name);
786         if (err) {
787                 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
788                 return err;
789         }
790
791         DBG_ERROR("call pci_set_master\n");
792         pci_set_master(pcidev);
793
794         DBG_ERROR("call alloc_etherdev\n");
795         netdev = alloc_etherdev(sizeof(struct adapter_t));
796         if (!netdev) {
797                 err = -ENOMEM;
798                 goto err_out_exit_sxg_probe;
799         }
800         DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
801
802         SET_NETDEV_DEV(netdev, &pcidev->dev);
803
804         pci_set_drvdata(pcidev, netdev);
805         adapter = netdev_priv(netdev);
806         adapter->netdev = netdev;
807         adapter->pcidev = pcidev;
808
809         mmio_start = pci_resource_start(pcidev, 0);
810         mmio_len = pci_resource_len(pcidev, 0);
811
812         DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
813                   mmio_start, mmio_len);
814
815         memmapped_ioaddr = ioremap(mmio_start, mmio_len);
816         DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
817                   memmapped_ioaddr);
818         if (!memmapped_ioaddr) {
819                 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
820                           __func__, mmio_len, mmio_start);
821                 goto err_out_free_mmio_region;
822         }
823
824         DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
825               len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
826                                               mmio_len, pcidev->irq);
827
828         adapter->HwRegs = (void *)memmapped_ioaddr;
829         adapter->base_addr = memmapped_ioaddr;
830
831         mmio_start = pci_resource_start(pcidev, 2);
832         mmio_len = pci_resource_len(pcidev, 2);
833
834         DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
835                   mmio_start, mmio_len);
836
837         memmapped_ioaddr = ioremap(mmio_start, mmio_len);
838         DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
839                   memmapped_ioaddr);
840         if (!memmapped_ioaddr) {
841                 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
842                           __func__, mmio_len, mmio_start);
843                 goto err_out_free_mmio_region;
844         }
845
846         DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
847                   "start[%lx] len[%lx], IRQ %d.\n", __func__,
848                   memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
849
850         adapter->UcodeRegs = (void *)memmapped_ioaddr;
851
852         adapter->State = SXG_STATE_INITIALIZING;
853         /*
854          * Maintain a list of all adapters anchored by
855          * the global SxgDriver structure.
856          */
857         adapter->Next = SxgDriver.Adapters;
858         SxgDriver.Adapters = adapter;
859         adapter->AdapterID = ++SxgDriver.AdapterID;
860
861         /* Initialize CRC table used to determine multicast hash */
862         sxg_mcast_init_crc32();
863
864         adapter->JumboEnabled = FALSE;
865         adapter->RssEnabled = FALSE;
866         if (adapter->JumboEnabled) {
867                 adapter->FrameSize = JUMBOMAXFRAME;
868                 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
869         } else {
870                 adapter->FrameSize = ETHERMAXFRAME;
871                 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
872         }
873
874         /*
875          *    status = SXG_READ_EEPROM(adapter);
876          *    if (!status) {
877          *        goto sxg_init_bad;
878          *    }
879          */
880
881         DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
882         sxg_config_pci(pcidev);
883         DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
884
885         DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
886         sxg_init_driver();
887         DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
888
889         adapter->vendid = pci_tbl_entry->vendor;
890         adapter->devid = pci_tbl_entry->device;
891         adapter->subsysid = pci_tbl_entry->subdevice;
892         adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
893         adapter->functionnumber = (pcidev->devfn & 0x7);
894         adapter->memorylength = pci_resource_len(pcidev, 0);
895         adapter->irq = pcidev->irq;
896         adapter->next_netdevice = head_netdevice;
897         head_netdevice = netdev;
898         adapter->port = 0;      /*adapter->functionnumber; */
899
900         /* Allocate memory and other resources */
901         DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
902         status = sxg_allocate_resources(adapter);
903         DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
904                   __func__, status);
905         if (status != STATUS_SUCCESS) {
906                 goto err_out_unmap;
907         }
908
909         DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
910         if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
911                 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
912                           __func__);
913                 sxg_read_config(adapter);
914                 sxg_adapter_set_hwaddr(adapter);
915         } else {
916                 adapter->state = ADAPT_FAIL;
917                 adapter->linkstate = LINK_DOWN;
918                 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
919         }
920
921         netdev->base_addr = (unsigned long)adapter->base_addr;
922         netdev->irq = adapter->irq;
923         netdev->open = sxg_entry_open;
924         netdev->stop = sxg_entry_halt;
925         netdev->hard_start_xmit = sxg_send_packets;
926         netdev->do_ioctl = sxg_ioctl;
927 #if XXXTODO
928         netdev->set_mac_address = sxg_mac_set_address;
929 #endif
930         netdev->get_stats = sxg_get_stats;
931         netdev->set_multicast_list = sxg_mcast_set_list;
932         SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
933
934         strcpy(netdev->name, "eth%d");
935         /*  strcpy(netdev->name, pci_name(pcidev)); */
936         if ((err = register_netdev(netdev))) {
937                 DBG_ERROR("Cannot register net device, aborting. %s\n",
938                           netdev->name);
939                 goto err_out_unmap;
940         }
941
942         DBG_ERROR
943             ("sxg: %s addr 0x%lx, irq %d, MAC addr \
944                 %02X:%02X:%02X:%02X:%02X:%02X\n",
945              netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
946              netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
947              netdev->dev_addr[4], netdev->dev_addr[5]);
948
949         /* sxg_init_bad: */
950         ASSERT(status == FALSE);
951         /* sxg_free_adapter(adapter); */
952
953         DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
954                   status, jiffies, smp_processor_id());
955         return status;
956
957       err_out_unmap:
958         iounmap((void *)memmapped_ioaddr);
959
960       err_out_free_mmio_region:
961         release_mem_region(mmio_start, mmio_len);
962
963       err_out_exit_sxg_probe:
964
965         DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
966                   smp_processor_id());
967
968         return -ENODEV;
969 }
970
971 /*
972  * LINE BASE Interrupt routines..
973  *
974  * sxg_disable_interrupt
975  *
976  * DisableInterrupt Handler
977  *
978  * Arguments:
979  *
980  *   adapter:   Our adapter structure
981  *
982  * Return Value:
983  *      None.
984  */
985 static void sxg_disable_interrupt(struct adapter_t *adapter)
986 {
987         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
988                   adapter, adapter->InterruptsEnabled, 0, 0);
989         /* For now, RSS is disabled with line based interrupts */
990         ASSERT(adapter->RssEnabled == FALSE);
991         ASSERT(adapter->MsiEnabled == FALSE);
992         /* Turn off interrupts by writing to the icr register. */
993         WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
994
995         adapter->InterruptsEnabled = 0;
996
997         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
998                   adapter, adapter->InterruptsEnabled, 0, 0);
999 }
1000
1001 /*
1002  * sxg_enable_interrupt
1003  *
1004  * EnableInterrupt Handler
1005  *
1006  * Arguments:
1007  *
1008  *   adapter:   Our adapter structure
1009  *
1010  * Return Value:
1011  *      None.
1012  */
1013 static void sxg_enable_interrupt(struct adapter_t *adapter)
1014 {
1015         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1016                   adapter, adapter->InterruptsEnabled, 0, 0);
1017         /* For now, RSS is disabled with line based interrupts */
1018         ASSERT(adapter->RssEnabled == FALSE);
1019         ASSERT(adapter->MsiEnabled == FALSE);
1020         /* Turn on interrupts by writing to the icr register. */
1021         WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1022
1023         adapter->InterruptsEnabled = 1;
1024
1025         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1026                   adapter, 0, 0, 0);
1027 }
1028
1029 /*
1030  * sxg_isr - Process an line-based interrupt
1031  *
1032  * Arguments:
1033  *              Context         - Our adapter structure
1034  *              QueueDefault    - Output parameter to queue to default CPU
1035  *              TargetCpus      - Output bitmap to schedule DPC's
1036  *
1037  * Return Value: TRUE if our interrupt
1038  */
1039 static irqreturn_t sxg_isr(int irq, void *dev_id)
1040 {
1041         struct net_device *dev = (struct net_device *) dev_id;
1042         struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1043
1044         if(adapter->state != ADAPT_UP)
1045                 return IRQ_NONE;
1046         adapter->Stats.NumInts++;
1047         if (adapter->Isr[0] == 0) {
1048                 /*
1049                  * The SLIC driver used to experience a number of spurious
1050                  * interrupts due to the delay associated with the masking of
1051                  * the interrupt (we'd bounce back in here).  If we see that
1052                  * again with Sahara,add a READ_REG of the Icr register after
1053                  * the WRITE_REG below.
1054                  */
1055                 adapter->Stats.FalseInts++;
1056                 return IRQ_NONE;
1057         }
1058         /*
1059          * Move the Isr contents and clear the value in
1060          * shared memory, and mask interrupts
1061          */
1062         adapter->IsrCopy[0] = adapter->Isr[0];
1063         adapter->Isr[0] = 0;
1064         WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1065         /* ASSERT(adapter->IsrDpcsPending == 0); */
1066 #if XXXTODO                     /* RSS Stuff */
1067         /*
1068          * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1069          * schedule DPC's based on event queues.
1070          */
1071         if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1072                 for (i = 0;
1073                      i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1074                      i++) {
1075                         struct sxg_event_ring *EventRing =
1076                                                 &adapter->EventRings[i];
1077                         struct sxg_event *Event =
1078                             &EventRing->Ring[adapter->NextEvent[i]];
1079                         unsigned char Cpu =
1080                             adapter->RssSystemInfo->RssIdToCpu[i];
1081                         if (Event->Status & EVENT_STATUS_VALID) {
1082                                 adapter->IsrDpcsPending++;
1083                                 CpuMask |= (1 << Cpu);
1084                         }
1085                 }
1086         }
1087         /*
1088          * Now, either schedule the CPUs specified by the CpuMask,
1089          * or queue default
1090          */
1091         if (CpuMask) {
1092                 *QueueDefault = FALSE;
1093         } else {
1094                 adapter->IsrDpcsPending = 1;
1095                 *QueueDefault = TRUE;
1096         }
1097         *TargetCpus = CpuMask;
1098 #endif
1099         /*  There are no DPCs in Linux, so call the handler now */
1100         sxg_handle_interrupt(adapter);
1101
1102         return IRQ_HANDLED;
1103 }
1104
1105 int debug_inthandler = 0;
1106
1107 static void sxg_handle_interrupt(struct adapter_t *adapter)
1108 {
1109         /* unsigned char           RssId   = 0; */
1110         u32 NewIsr;
1111
1112         if (++debug_inthandler  < 20) {
1113                 DBG_ERROR("Enter sxg_handle_interrupt ISR[%x]\n",
1114                           adapter->IsrCopy[0]);
1115         }
1116         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1117                   adapter, adapter->IsrCopy[0], 0, 0);
1118         /* For now, RSS is disabled with line based interrupts */
1119         ASSERT(adapter->RssEnabled == FALSE);
1120         ASSERT(adapter->MsiEnabled == FALSE);
1121         ASSERT(adapter->IsrCopy[0]);
1122
1123         /* Always process the event queue. */
1124         sxg_process_event_queue(adapter,
1125                                 (adapter->RssEnabled ? /*RssId */ 0 : 0));
1126
1127 #if XXXTODO                     /* RSS stuff */
1128         if (--adapter->IsrDpcsPending) {
1129                 /* We're done. */
1130                 ASSERT(adapter->RssEnabled);
1131                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1132                           adapter, 0, 0, 0);
1133                 return;
1134         }
1135 #endif
1136         /* Last (or only) DPC processes the ISR and clears the interrupt. */
1137         NewIsr = sxg_process_isr(adapter, 0);
1138         /* Reenable interrupts */
1139         adapter->IsrCopy[0] = 0;
1140         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1141                   adapter, NewIsr, 0, 0);
1142
1143         if (debug_inthandler < 20) {
1144                 DBG_ERROR
1145                     ("Exit sxg_handle_interrupt2 after enabling interrupt\n");
1146         }
1147
1148         WRITE_REG(adapter->UcodeRegs[0].Isr, NewIsr, TRUE);
1149
1150         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1151                   adapter, 0, 0, 0);
1152 }
1153
1154 /*
1155  * sxg_process_isr - Process an interrupt.  Called from the line-based and
1156  *                      message based interrupt DPC routines
1157  *
1158  * Arguments:
1159  *              adapter                 - Our adapter structure
1160  *              Queue                   - The ISR that needs processing
1161  *
1162  * Return Value:
1163  *      None
1164  */
1165 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1166 {
1167         u32 Isr = adapter->IsrCopy[MessageId];
1168         u32 NewIsr = 0;
1169
1170         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1171                   adapter, Isr, 0, 0);
1172
1173         DBG_ERROR("%s: Entering with %d ISR value\n", __FUNCTION__, Isr);
1174         /* Error */
1175         if (Isr & SXG_ISR_ERR) {
1176                 if (Isr & SXG_ISR_PDQF) {
1177                         adapter->Stats.PdqFull++;
1178                         DBG_ERROR("%s: SXG_ISR_ERR  PDQF!!\n", __func__);
1179                 }
1180                 /* No host buffer */
1181                 if (Isr & SXG_ISR_RMISS) {
1182                         /*
1183                          * There is a bunch of code in the SLIC driver which
1184                          * attempts to process more receive events per DPC
1185                          * if we start to fall behind.  We'll probablyd
1186                          * need to do something similar here, but hold
1187                          * off for now.  I don't want to make the code more
1188                          * complicated than strictly needed.
1189                          */
1190                         adapter->Stats.RcvNoBuffer++;
1191                         adapter->stats.rx_missed_errors++;
1192                         if (adapter->Stats.RcvNoBuffer < 5) {
1193                                 DBG_ERROR("%s: SXG_ISR_ERR  RMISS!!\n",
1194                                           __func__);
1195                         }
1196                 }
1197                 /* Card crash */
1198                 if (Isr & SXG_ISR_DEAD) {
1199                         /*
1200                          * Set aside the crash info and set the adapter state
1201                          * to RESET
1202                          */
1203                         adapter->CrashCpu = (unsigned char)
1204                                 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
1205                         adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1206                         adapter->Dead = TRUE;
1207                         DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1208                                   adapter->CrashLocation, adapter->CrashCpu);
1209                 }
1210                 /* Event ring full */
1211                 if (Isr & SXG_ISR_ERFULL) {
1212                         /*
1213                          * Same issue as RMISS, really.  This means the
1214                          * host is falling behind the card.  Need to increase
1215                          * event ring size, process more events per interrupt,
1216                          * and/or reduce/remove interrupt aggregation.
1217                          */
1218                         adapter->Stats.EventRingFull++;
1219                         DBG_ERROR("%s: SXG_ISR_ERR  EVENT RING FULL!!\n",
1220                                   __func__);
1221                 }
1222                 /* Transmit drop - no DRAM buffers or XMT error */
1223                 if (Isr & SXG_ISR_XDROP) {
1224                         adapter->Stats.XmtDrops++;
1225                         adapter->Stats.XmtErrors++;
1226                         DBG_ERROR("%s: SXG_ISR_ERR  XDROP!!\n", __func__);
1227                 }
1228         }
1229         /* Slowpath send completions */
1230         if (Isr & SXG_ISR_SPSEND) {
1231                 sxg_complete_slow_send(adapter, 1);
1232         }
1233         /* Dump */
1234         if (Isr & SXG_ISR_UPC) {
1235                 /* Maybe change when debug is added.. */
1236                 ASSERT(adapter->DumpCmdRunning);
1237                 adapter->DumpCmdRunning = FALSE;
1238         }
1239         /* Link event */
1240         if (Isr & SXG_ISR_LINK) {
1241                 sxg_link_event(adapter);
1242         }
1243         /* Debug - breakpoint hit */
1244         if (Isr & SXG_ISR_BREAK) {
1245                 /*
1246                  * At the moment AGDB isn't written to support interactive
1247                  * debug sessions.  When it is, this interrupt will be used to
1248                  * signal AGDB that it has hit a breakpoint.  For now, ASSERT.
1249                  */
1250                 ASSERT(0);
1251         }
1252         /* Heartbeat response */
1253         if (Isr & SXG_ISR_PING) {
1254                 adapter->PingOutstanding = FALSE;
1255         }
1256         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1257                   adapter, Isr, NewIsr, 0);
1258
1259         return (NewIsr);
1260 }
1261
1262 /*
1263  * sxg_process_event_queue - Process our event queue
1264  *
1265  * Arguments:
1266  *              - adapter       - Adapter structure
1267  *              - RssId         - The event queue requiring processing
1268  *
1269  * Return Value:
1270  *      None.
1271  */
1272 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
1273 {
1274         struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1275         struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1276         u32 EventsProcessed = 0, Batches = 0;
1277         u32 num_skbs = 0;
1278         struct sk_buff *skb;
1279 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1280         struct sk_buff *prev_skb = NULL;
1281         struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1282         u32 Index;
1283         struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1284 #endif
1285         u32 ReturnStatus = 0;
1286
1287         ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1288                (adapter->State == SXG_STATE_PAUSING) ||
1289                (adapter->State == SXG_STATE_PAUSED) ||
1290                (adapter->State == SXG_STATE_HALTING));
1291         /*
1292          * We may still have unprocessed events on the queue if
1293          * the card crashed.  Don't process them.
1294          */
1295         if (adapter->Dead) {
1296                 return (0);
1297         }
1298         /*
1299          *  In theory there should only be a single processor that
1300          * accesses this queue, and only at interrupt-DPC time.  So/
1301          * we shouldn't need a lock for any of this.
1302          */
1303         while (Event->Status & EVENT_STATUS_VALID) {
1304                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1305                           Event, Event->Code, Event->Status,
1306                           adapter->NextEvent);
1307                 switch (Event->Code) {
1308                 case EVENT_CODE_BUFFERS:
1309                         /* struct sxg_ring_info Head & Tail == unsigned char */
1310                         ASSERT(!(Event->CommandIndex & 0xFF00));
1311                         sxg_complete_descriptor_blocks(adapter,
1312                                                        Event->CommandIndex);
1313                         break;
1314                 case EVENT_CODE_SLOWRCV:
1315                         --adapter->RcvBuffersOnCard;
1316                         if ((skb = sxg_slow_receive(adapter, Event))) {
1317                                 u32 rx_bytes;
1318 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1319                                 /* Add it to our indication list */
1320                                 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1321                                                    IndicationList, num_skbs);
1322                                 /*
1323                                  * Linux, we just pass up each skb to the
1324                                  * protocol above at this point, there is no
1325                                  * capability of an indication list.
1326                                  */
1327 #else
1328                                 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1329                                 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1330                                 rx_bytes = Event->Length;
1331                                 adapter->stats.rx_packets++;
1332                                 adapter->stats.rx_bytes += rx_bytes;
1333 #if SXG_OFFLOAD_IP_CHECKSUM
1334                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1335 #endif
1336                                 skb->dev = adapter->netdev;
1337                                 netif_rx(skb);
1338 #endif
1339                         }
1340                         break;
1341                 default:
1342                         DBG_ERROR("%s: ERROR  Invalid EventCode %d\n",
1343                                   __func__, Event->Code);
1344                 /* ASSERT(0); */
1345                 }
1346                 /*
1347                  * See if we need to restock card receive buffers.
1348                  * There are two things to note here:
1349                  *  First - This test is not SMP safe.  The
1350                  *    adapter->BuffersOnCard field is protected via atomic
1351                  *    interlocked calls, but we do not protect it with respect
1352                  *    to these tests.  The only way to do that is with a lock,
1353                  *    and I don't want to grab a lock every time we adjust the
1354                  *    BuffersOnCard count.  Instead, we allow the buffer
1355                  *    replenishment to be off once in a while. The worst that
1356                  *    can happen is the card is given on more-or-less descriptor
1357                  *    block than the arbitrary value we've chosen. No big deal
1358                  *    In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1359                  *    is adjusted.
1360                  *  Second - We expect this test to rarely
1361                  *    evaluate to true.  We attempt to refill descriptor blocks
1362                  *    as they are returned to us (sxg_complete_descriptor_blocks)
1363                  *    so The only time this should evaluate to true is when
1364                  *    sxg_complete_descriptor_blocks failed to allocate
1365                  *    receive buffers.
1366                  */
1367                 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1368                         sxg_stock_rcv_buffers(adapter);
1369                 }
1370                 /*
1371                  * It's more efficient to just set this to zero.
1372                  * But clearing the top bit saves potential debug info...
1373                  */
1374                 Event->Status &= ~EVENT_STATUS_VALID;
1375                 /* Advance to the next event */
1376                 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1377                 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1378                 EventsProcessed++;
1379                 if (EventsProcessed == EVENT_RING_BATCH) {
1380                         /* Release a batch of events back to the card */
1381                         WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1382                                   EVENT_RING_BATCH, FALSE);
1383                         EventsProcessed = 0;
1384                         /*
1385                          * If we've processed our batch limit, break out of the
1386                          * loop and return SXG_ISR_EVENT to arrange for us to
1387                          * be called again
1388                          */
1389                         if (Batches++ == EVENT_BATCH_LIMIT) {
1390                                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1391                                           TRACE_NOISY, "EvtLimit", Batches,
1392                                           adapter->NextEvent, 0, 0);
1393                                 ReturnStatus = SXG_ISR_EVENT;
1394                                 break;
1395                         }
1396                 }
1397         }
1398 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1399         /* Indicate any received dumb-nic frames */
1400         SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1401 #endif
1402         /* Release events back to the card. */
1403         if (EventsProcessed) {
1404                 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1405                           EventsProcessed, FALSE);
1406         }
1407         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1408                   Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1409
1410         return (ReturnStatus);
1411 }
1412
1413 /*
1414  * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1415  *
1416  * Arguments -
1417  *      adapter         - A pointer to our adapter structure
1418  *      irq_context     - An integer to denote if we are in interrupt context
1419  * Return
1420  *      None
1421  */
1422 static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context)
1423 {
1424         struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1425         struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
1426         u32 *ContextType;
1427         struct sxg_cmd *XmtCmd;
1428         unsigned long flags;
1429         unsigned long sgl_flags;
1430         unsigned int processed_count = 0;
1431
1432         /*
1433          * NOTE - This lock is dropped and regrabbed in this loop.
1434          * This means two different processors can both be running/
1435          * through this loop. Be *very* careful.
1436          */
1437         if(irq_context) {
1438                 if(!spin_trylock(&adapter->XmtZeroLock))
1439                         goto lock_busy;
1440         }
1441         else
1442                 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1443
1444         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1445                   adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1446
1447         while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1448                 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT)  {
1449                 /*
1450                  * Locate the current Cmd (ring descriptor entry), and
1451                  * associated SGL, and advance the tail
1452                  */
1453                 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1454                 ASSERT(ContextType);
1455                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1456                           XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1457                 /* Clear the SGL field. */
1458                 XmtCmd->Sgl = 0;
1459
1460                 switch (*ContextType) {
1461                 case SXG_SGL_DUMB:
1462                         {
1463                                 struct sk_buff *skb;
1464                                 struct sxg_scatter_gather *SxgSgl =
1465                                         (struct sxg_scatter_gather *)ContextType;
1466                                 dma64_addr_t FirstSgeAddress;
1467                                 u32 FirstSgeLength;
1468
1469                                 /* Dumb-nic send.  Command context is the dumb-nic SGL */
1470                                 skb = (struct sk_buff *)ContextType;
1471                                 skb = SxgSgl->DumbPacket;
1472                                 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1473                                 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
1474                                 /* Complete the send */
1475                                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1476                                           TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1477                                           0, 0);
1478                                 ASSERT(adapter->Stats.XmtQLen);
1479                                 adapter->Stats.XmtQLen--;/* within XmtZeroLock */
1480                                 adapter->Stats.XmtOk++;
1481                                 /*
1482                                  * Now drop the lock and complete the send
1483                                  * back to Microsoft.  We need to drop the lock
1484                                  * because Microsoft can come back with a
1485                                  * chimney send, which results in a double trip
1486                                  * in SxgTcpOuput
1487                                  */
1488                                 if(irq_context)
1489                                         spin_unlock(&adapter->XmtZeroLock);
1490                                 else
1491                                         spin_unlock_irqrestore(
1492                                                 &adapter->XmtZeroLock, flags);
1493
1494                                 SxgSgl->DumbPacket = NULL;
1495                                 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1496                                                         FirstSgeAddress,
1497                                                         FirstSgeLength);
1498                                 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL,
1499                                                 irq_context);
1500                                 /* and reacquire.. */
1501                                 if(irq_context) {
1502                                         if(!spin_trylock(&adapter->XmtZeroLock))
1503                                                 goto lock_busy;
1504                                 }
1505                                 else
1506                                         spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1507                         }
1508                         break;
1509                 default:
1510                         ASSERT(0);
1511                 }
1512         }
1513         if(irq_context)
1514                 spin_unlock(&adapter->XmtZeroLock);
1515         else
1516                 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1517 lock_busy:
1518         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1519                   adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1520 }
1521
1522 /*
1523  * sxg_slow_receive
1524  *
1525  * Arguments -
1526  *      adapter         - A pointer to our adapter structure
1527  *      Event           - Receive event
1528  *
1529  * Return - skb
1530  */
1531 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1532                                                 struct sxg_event *Event)
1533 {
1534         u32 BufferSize = adapter->ReceiveBufferSize;
1535         struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1536         struct sk_buff *Packet;
1537         static int read_counter = 0;
1538
1539         RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
1540         if(read_counter++ & 0x100)
1541         {
1542                 sxg_collect_statistics(adapter);
1543                 read_counter = 0;
1544         }
1545         ASSERT(RcvDataBufferHdr);
1546         ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1547         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1548                   RcvDataBufferHdr, RcvDataBufferHdr->State,
1549                   /*RcvDataBufferHdr->VirtualAddress*/ 0);
1550         /* Drop rcv frames in non-running state */
1551         switch (adapter->State) {
1552         case SXG_STATE_RUNNING:
1553                 break;
1554         case SXG_STATE_PAUSING:
1555         case SXG_STATE_PAUSED:
1556         case SXG_STATE_HALTING:
1557                 goto drop;
1558         default:
1559                 ASSERT(0);
1560                 goto drop;
1561         }
1562
1563         /*
1564          * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1565          *              RcvDataBufferHdr->VirtualAddress, Event->Length);
1566          */
1567
1568         /* Change buffer state to UPSTREAM */
1569         RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1570         if (Event->Status & EVENT_STATUS_RCVERR) {
1571                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1572                           Event, Event->Status, Event->HostHandle, 0);
1573                 /* XXXTODO - Remove this print later */
1574                 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
1575                           SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1576                 sxg_process_rcv_error(adapter, *(u32 *)
1577                                       SXG_RECEIVE_DATA_LOCATION
1578                                       (RcvDataBufferHdr));
1579                 goto drop;
1580         }
1581 #if XXXTODO                     /* VLAN stuff */
1582         /* If there's a VLAN tag, extract it and validate it */
1583         if (((struct ether_header *)
1584                 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1585                                                         == ETHERTYPE_VLAN) {
1586                 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1587                     STATUS_SUCCESS) {
1588                         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1589                                   "BadVlan", Event,
1590                                   SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1591                                   Event->Length, 0);
1592                         goto drop;
1593                 }
1594         }
1595 #endif
1596         /* Dumb-nic frame.  See if it passes our mac filter and update stats */
1597
1598         /*
1599          * ASK if (!sxg_mac_filter(adapter,
1600          *              SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1601          *              Event->Length)) {
1602          *      SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1603          * Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1604          * Event->Length, 0);
1605          *      goto drop;
1606          * }
1607          */
1608
1609         Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1610         SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1611         Packet->protocol = eth_type_trans(Packet, adapter->netdev);
1612
1613         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1614                   RcvDataBufferHdr, Packet, Event->Length, 0);
1615         /* Lastly adjust the receive packet length. */
1616         RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
1617         RcvDataBufferHdr->PhysicalAddress = NULL;
1618         SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1619         if (RcvDataBufferHdr->skb)
1620         {
1621                 spin_lock(&adapter->RcvQLock);
1622                 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1623                 // adapter->RcvBuffersOnCard ++;
1624                 spin_unlock(&adapter->RcvQLock);
1625         }
1626         return (Packet);
1627
1628       drop:
1629         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1630                   RcvDataBufferHdr, Event->Length, 0, 0);
1631         adapter->Stats.RcvDiscards++;
1632         spin_lock(&adapter->RcvQLock);
1633         SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1634         spin_unlock(&adapter->RcvQLock);
1635         return (NULL);
1636 }
1637
1638 /*
1639  * sxg_process_rcv_error - process receive error and update
1640  * stats
1641  *
1642  * Arguments:
1643  *              adapter         - Adapter structure
1644  *              ErrorStatus     - 4-byte receive error status
1645  *
1646  * Return Value         : None
1647  */
1648 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
1649 {
1650         u32 Error;
1651
1652         adapter->Stats.RcvErrors++;
1653
1654         if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1655                 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1656                 switch (Error) {
1657                 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1658                         adapter->Stats.TransportCsum++;
1659                         break;
1660                 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1661                         adapter->Stats.TransportUflow++;
1662                         break;
1663                 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1664                         adapter->Stats.TransportHdrLen++;
1665                         break;
1666                 }
1667         }
1668         if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1669                 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1670                 switch (Error) {
1671                 case SXG_RCV_STATUS_NETWORK_CSUM:
1672                         adapter->Stats.NetworkCsum++;
1673                         break;
1674                 case SXG_RCV_STATUS_NETWORK_UFLOW:
1675                         adapter->Stats.NetworkUflow++;
1676                         break;
1677                 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1678                         adapter->Stats.NetworkHdrLen++;
1679                         break;
1680                 }
1681         }
1682         if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1683                 adapter->Stats.Parity++;
1684         }
1685         if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1686                 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1687                 switch (Error) {
1688                 case SXG_RCV_STATUS_LINK_PARITY:
1689                         adapter->Stats.LinkParity++;
1690                         break;
1691                 case SXG_RCV_STATUS_LINK_EARLY:
1692                         adapter->Stats.LinkEarly++;
1693                         break;
1694                 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1695                         adapter->Stats.LinkBufOflow++;
1696                         break;
1697                 case SXG_RCV_STATUS_LINK_CODE:
1698                         adapter->Stats.LinkCode++;
1699                         break;
1700                 case SXG_RCV_STATUS_LINK_DRIBBLE:
1701                         adapter->Stats.LinkDribble++;
1702                         break;
1703                 case SXG_RCV_STATUS_LINK_CRC:
1704                         adapter->Stats.LinkCrc++;
1705                         break;
1706                 case SXG_RCV_STATUS_LINK_OFLOW:
1707                         adapter->Stats.LinkOflow++;
1708                         break;
1709                 case SXG_RCV_STATUS_LINK_UFLOW:
1710                         adapter->Stats.LinkUflow++;
1711                         break;
1712                 }
1713         }
1714 }
1715
1716 /*
1717  * sxg_mac_filter
1718  *
1719  * Arguments:
1720  *              adapter         - Adapter structure
1721  *              pether          - Ethernet header
1722  *              length          - Frame length
1723  *
1724  * Return Value : TRUE if the frame is to be allowed
1725  */
1726 static bool sxg_mac_filter(struct adapter_t *adapter,
1727                 struct ether_header *EtherHdr, ushort length)
1728 {
1729         bool EqualAddr;
1730
1731         if (SXG_MULTICAST_PACKET(EtherHdr)) {
1732                 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1733                         /* broadcast */
1734                         if (adapter->MacFilter & MAC_BCAST) {
1735                                 adapter->Stats.DumbRcvBcastPkts++;
1736                                 adapter->Stats.DumbRcvBcastBytes += length;
1737                                 adapter->Stats.DumbRcvPkts++;
1738                                 adapter->Stats.DumbRcvBytes += length;
1739                                 return (TRUE);
1740                         }
1741                 } else {
1742                         /* multicast */
1743                         if (adapter->MacFilter & MAC_ALLMCAST) {
1744                                 adapter->Stats.DumbRcvMcastPkts++;
1745                                 adapter->Stats.DumbRcvMcastBytes += length;
1746                                 adapter->Stats.DumbRcvPkts++;
1747                                 adapter->Stats.DumbRcvBytes += length;
1748                                 return (TRUE);
1749                         }
1750                         if (adapter->MacFilter & MAC_MCAST) {
1751                                 struct sxg_multicast_address *MulticastAddrs =
1752                                     adapter->MulticastAddrs;
1753                                 while (MulticastAddrs) {
1754                                         ETHER_EQ_ADDR(MulticastAddrs->Address,
1755                                                       EtherHdr->ether_dhost,
1756                                                       EqualAddr);
1757                                         if (EqualAddr) {
1758                                                 adapter->Stats.
1759                                                     DumbRcvMcastPkts++;
1760                                                 adapter->Stats.
1761                                                     DumbRcvMcastBytes += length;
1762                                                 adapter->Stats.DumbRcvPkts++;
1763                                                 adapter->Stats.DumbRcvBytes +=
1764                                                     length;
1765                                                 return (TRUE);
1766                                         }
1767                                         MulticastAddrs = MulticastAddrs->Next;
1768                                 }
1769                         }
1770                 }
1771         } else if (adapter->MacFilter & MAC_DIRECTED) {
1772                 /*
1773                  * Not broadcast or multicast.  Must be directed at us or
1774                  * the card is in promiscuous mode.  Either way, consider it
1775                  * ours if MAC_DIRECTED is set
1776                  */
1777                 adapter->Stats.DumbRcvUcastPkts++;
1778                 adapter->Stats.DumbRcvUcastBytes += length;
1779                 adapter->Stats.DumbRcvPkts++;
1780                 adapter->Stats.DumbRcvBytes += length;
1781                 return (TRUE);
1782         }
1783         if (adapter->MacFilter & MAC_PROMISC) {
1784                 /* Whatever it is, keep it. */
1785                 adapter->Stats.DumbRcvPkts++;
1786                 adapter->Stats.DumbRcvBytes += length;
1787                 return (TRUE);
1788         }
1789         adapter->Stats.RcvDiscards++;
1790         return (FALSE);
1791 }
1792
1793 static int sxg_register_interrupt(struct adapter_t *adapter)
1794 {
1795         if (!adapter->intrregistered) {
1796                 int retval;
1797
1798                 DBG_ERROR
1799                     ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
1800                      __func__, adapter, adapter->netdev->irq, NR_IRQS);
1801
1802                 spin_unlock_irqrestore(&sxg_global.driver_lock,
1803                                        sxg_global.flags);
1804
1805                 retval = request_irq(adapter->netdev->irq,
1806                                      &sxg_isr,
1807                                      IRQF_SHARED,
1808                                      adapter->netdev->name, adapter->netdev);
1809
1810                 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1811
1812                 if (retval) {
1813                         DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1814                                   adapter->netdev->name, retval);
1815                         return (retval);
1816                 }
1817                 adapter->intrregistered = 1;
1818                 adapter->IntRegistered = TRUE;
1819                 /* Disable RSS with line-based interrupts */
1820                 adapter->MsiEnabled = FALSE;
1821                 adapter->RssEnabled = FALSE;
1822                 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
1823                           __func__, adapter, adapter->netdev->irq);
1824         }
1825         return (STATUS_SUCCESS);
1826 }
1827
1828 static void sxg_deregister_interrupt(struct adapter_t *adapter)
1829 {
1830         DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
1831 #if XXXTODO
1832         slic_init_cleanup(adapter);
1833 #endif
1834         memset(&adapter->stats, 0, sizeof(struct net_device_stats));
1835         adapter->error_interrupts = 0;
1836         adapter->rcv_interrupts = 0;
1837         adapter->xmit_interrupts = 0;
1838         adapter->linkevent_interrupts = 0;
1839         adapter->upr_interrupts = 0;
1840         adapter->num_isrs = 0;
1841         adapter->xmit_completes = 0;
1842         adapter->rcv_broadcasts = 0;
1843         adapter->rcv_multicasts = 0;
1844         adapter->rcv_unicasts = 0;
1845         DBG_ERROR("sxg: %s EXIT\n", __func__);
1846 }
1847
1848 /*
1849  *  sxg_if_init
1850  *
1851  *  Perform initialization of our slic interface.
1852  *
1853  */
1854 static int sxg_if_init(struct adapter_t *adapter)
1855 {
1856         struct net_device *dev = adapter->netdev;
1857         int status = 0;
1858
1859         DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
1860                   __func__, adapter->netdev->name,
1861                   adapter->state,
1862                   adapter->linkstate, dev->flags);
1863
1864         /* adapter should be down at this point */
1865         if (adapter->state != ADAPT_DOWN) {
1866                 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
1867                 return (-EIO);
1868         }
1869         ASSERT(adapter->linkstate == LINK_DOWN);
1870
1871         adapter->devflags_prev = dev->flags;
1872         adapter->macopts = MAC_DIRECTED;
1873         if (dev->flags) {
1874                 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
1875                           adapter->netdev->name);
1876                 if (dev->flags & IFF_BROADCAST) {
1877                         adapter->macopts |= MAC_BCAST;
1878                         DBG_ERROR("BCAST ");
1879                 }
1880                 if (dev->flags & IFF_PROMISC) {
1881                         adapter->macopts |= MAC_PROMISC;
1882                         DBG_ERROR("PROMISC ");
1883                 }
1884                 if (dev->flags & IFF_ALLMULTI) {
1885                         adapter->macopts |= MAC_ALLMCAST;
1886                         DBG_ERROR("ALL_MCAST ");
1887                 }
1888                 if (dev->flags & IFF_MULTICAST) {
1889                         adapter->macopts |= MAC_MCAST;
1890                         DBG_ERROR("MCAST ");
1891                 }
1892                 DBG_ERROR("\n");
1893         }
1894         status = sxg_register_interrupt(adapter);
1895         if (status != STATUS_SUCCESS) {
1896                 DBG_ERROR("sxg_if_init: sxg_register_interrupt FAILED %x\n",
1897                           status);
1898                 sxg_deregister_interrupt(adapter);
1899                 return (status);
1900         }
1901
1902         adapter->state = ADAPT_UP;
1903
1904         /*    clear any pending events, then enable interrupts */
1905         DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
1906
1907         return (STATUS_SUCCESS);
1908 }
1909
1910 static int sxg_entry_open(struct net_device *dev)
1911 {
1912         struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1913         int status;
1914
1915         ASSERT(adapter);
1916         DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
1917                   adapter->activated);
1918         DBG_ERROR
1919             ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
1920              __func__, adapter->netdev->name, jiffies, smp_processor_id(),
1921              adapter->netdev, adapter, adapter->port);
1922
1923         netif_stop_queue(adapter->netdev);
1924
1925         spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1926         if (!adapter->activated) {
1927                 sxg_global.num_sxg_ports_active++;
1928                 adapter->activated = 1;
1929         }
1930         /* Initialize the adapter */
1931         DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
1932         status = sxg_initialize_adapter(adapter);
1933         DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
1934                   __func__, status);
1935
1936         if (status == STATUS_SUCCESS) {
1937                 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
1938                 status = sxg_if_init(adapter);
1939                 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
1940                           status);
1941         }
1942
1943         if (status != STATUS_SUCCESS) {
1944                 if (adapter->activated) {
1945                         sxg_global.num_sxg_ports_active--;
1946                         adapter->activated = 0;
1947                 }
1948                 spin_unlock_irqrestore(&sxg_global.driver_lock,
1949                                        sxg_global.flags);
1950                 return (status);
1951         }
1952         DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
1953
1954         /* Enable interrupts */
1955         SXG_ENABLE_ALL_INTERRUPTS(adapter);
1956
1957         DBG_ERROR("sxg: %s EXIT\n", __func__);
1958
1959         spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1960         return STATUS_SUCCESS;
1961 }
1962
1963 static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1964 {
1965         struct net_device *dev = pci_get_drvdata(pcidev);
1966         u32 mmio_start = 0;
1967         unsigned int mmio_len = 0;
1968         struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1969
1970         flush_scheduled_work();
1971
1972         /* Deallocate Resources */
1973         unregister_netdev(dev);
1974         sxg_free_resources(adapter);
1975
1976         ASSERT(adapter);
1977         DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
1978                   adapter);
1979
1980         mmio_start = pci_resource_start(pcidev, 0);
1981         mmio_len = pci_resource_len(pcidev, 0);
1982
1983         DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__,
1984                   mmio_start, mmio_len);
1985         release_mem_region(mmio_start, mmio_len);
1986
1987         mmio_start = pci_resource_start(pcidev, 2);
1988         mmio_len = pci_resource_len(pcidev, 2);
1989
1990         DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
1991                   mmio_start, mmio_len);
1992         release_mem_region(mmio_start, mmio_len);
1993
1994         pci_disable_device(pcidev);
1995
1996         DBG_ERROR("sxg: %s deallocate device\n", __func__);
1997         kfree(dev);
1998         DBG_ERROR("sxg: %s EXIT\n", __func__);
1999 }
2000
2001 static int sxg_entry_halt(struct net_device *dev)
2002 {
2003         struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2004
2005         spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2006         DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
2007
2008         netif_stop_queue(adapter->netdev);
2009         adapter->state = ADAPT_DOWN;
2010         adapter->linkstate = LINK_DOWN;
2011         adapter->devflags_prev = 0;
2012         DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2013                   __func__, dev->name, adapter, adapter->state);
2014
2015         DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
2016         DBG_ERROR("sxg: %s EXIT\n", __func__);
2017
2018         /* Disable interrupts */
2019         SXG_DISABLE_ALL_INTERRUPTS(adapter);
2020
2021         spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2022
2023         sxg_deregister_interrupt(adapter);
2024         return (STATUS_SUCCESS);
2025 }
2026
2027 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2028 {
2029         ASSERT(rq);
2030 /*      DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
2031         switch (cmd) {
2032         case SIOCSLICSETINTAGG:
2033                 {
2034                         /* struct adapter_t *adapter = (struct adapter_t *)
2035                          * netdev_priv(dev);
2036                          */
2037                         u32 data[7];
2038                         u32 intagg;
2039
2040                         if (copy_from_user(data, rq->ifr_data, 28)) {
2041                                 DBG_ERROR("copy_from_user FAILED  getting \
2042                                          initial params\n");
2043                                 return -EFAULT;
2044                         }
2045                         intagg = data[0];
2046                         printk(KERN_EMERG
2047                                "%s: set interrupt aggregation to %d\n",
2048                                __func__, intagg);
2049                         return 0;
2050                 }
2051
2052         default:
2053                 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
2054                 return -EOPNOTSUPP;
2055         }
2056         return 0;
2057 }
2058
2059 #define NORMAL_ETHFRAME     0
2060
2061 /*
2062  * sxg_send_packets - Send a skb packet
2063  *
2064  * Arguments:
2065  *                      skb - The packet to send
2066  *                      dev - Our linux net device that refs our adapter
2067  *
2068  * Return:
2069  *              0   regardless of outcome    XXXTODO refer to e1000 driver
2070  */
2071 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
2072 {
2073         struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2074         u32 status = STATUS_SUCCESS;
2075
2076         /*
2077          * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2078          *        skb);
2079          */
2080         printk("ASK:sxg_send_packets: skb[%p]\n", skb);
2081
2082         /* Check the adapter state */
2083         switch (adapter->State) {
2084         case SXG_STATE_INITIALIZING:
2085         case SXG_STATE_HALTED:
2086         case SXG_STATE_SHUTDOWN:
2087                 ASSERT(0);      /* unexpected */
2088                 /* fall through */
2089         case SXG_STATE_RESETTING:
2090         case SXG_STATE_SLEEP:
2091         case SXG_STATE_BOOTDIAG:
2092         case SXG_STATE_DIAG:
2093         case SXG_STATE_HALTING:
2094                 status = STATUS_FAILURE;
2095                 break;
2096         case SXG_STATE_RUNNING:
2097                 if (adapter->LinkState != SXG_LINK_UP) {
2098                         status = STATUS_FAILURE;
2099                 }
2100                 break;
2101         default:
2102                 ASSERT(0);
2103                 status = STATUS_FAILURE;
2104         }
2105         if (status != STATUS_SUCCESS) {
2106                 goto xmit_fail;
2107         }
2108         /* send a packet */
2109         status = sxg_transmit_packet(adapter, skb);
2110         if (status == STATUS_SUCCESS) {
2111                 goto xmit_done;
2112         }
2113
2114       xmit_fail:
2115         /* reject & complete all the packets if they cant be sent */
2116         if (status != STATUS_SUCCESS) {
2117 #if XXXTODO
2118         /* sxg_send_packets_fail(adapter, skb, status); */
2119 #else
2120                 SXG_DROP_DUMB_SEND(adapter, skb);
2121                 adapter->stats.tx_dropped++;
2122                 return NETDEV_TX_BUSY;
2123 #endif
2124         }
2125         DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
2126                   status);
2127
2128       xmit_done:
2129         return NETDEV_TX_OK;
2130 }
2131
2132 /*
2133  * sxg_transmit_packet
2134  *
2135  * This function transmits a single packet.
2136  *
2137  * Arguments -
2138  *              adapter                 - Pointer to our adapter structure
2139  *      skb             - The packet to be sent
2140  *
2141  * Return - STATUS of send
2142  */
2143 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2144 {
2145         struct sxg_x64_sgl         *pSgl;
2146         struct sxg_scatter_gather  *SxgSgl;
2147         unsigned long sgl_flags;
2148         /* void *SglBuffer; */
2149         /* u32 SglBufferLength; */
2150
2151         /*
2152          * The vast majority of work is done in the shared
2153          * sxg_dumb_sgl routine.
2154          */
2155         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2156                   adapter, skb, 0, 0);
2157
2158         /* Allocate a SGL buffer */
2159         SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
2160         if (!SxgSgl) {
2161                 adapter->Stats.NoSglBuf++;
2162                 adapter->Stats.XmtErrors++;
2163                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2164                           adapter, skb, 0, 0);
2165                 return (STATUS_RESOURCES);
2166         }
2167         ASSERT(SxgSgl->adapter == adapter);
2168         /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2169         SglBufferLength = SXG_SGL_BUF_SIZE; */
2170         SxgSgl->VlanTag.VlanTci = 0;
2171         SxgSgl->VlanTag.VlanTpid = 0;
2172         SxgSgl->Type = SXG_SGL_DUMB;
2173         SxgSgl->DumbPacket = skb;
2174         pSgl = NULL;
2175
2176         /* Call the common sxg_dumb_sgl routine to complete the send. */
2177         return (sxg_dumb_sgl(pSgl, SxgSgl));
2178 }
2179
2180 /*
2181  * sxg_dumb_sgl
2182  *
2183  * Arguments:
2184  *              pSgl     -
2185  *              SxgSgl   - struct sxg_scatter_gather
2186  *
2187  * Return Value:
2188  *      Status of send operation.
2189  */
2190 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2191                                 struct sxg_scatter_gather *SxgSgl)
2192 {
2193         struct adapter_t *adapter = SxgSgl->adapter;
2194         struct sk_buff *skb = SxgSgl->DumbPacket;
2195         /* For now, all dumb-nic sends go on RSS queue zero */
2196         struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2197         struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2198         struct sxg_cmd *XmtCmd = NULL;
2199         /* u32 Index = 0; */
2200         u32 DataLength = skb->len;
2201         /* unsigned int BufLen; */
2202         /* u32 SglOffset; */
2203         u64 phys_addr;
2204         unsigned long flags;
2205
2206         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2207                   pSgl, SxgSgl, 0, 0);
2208
2209         /* Set aside a pointer to the sgl */
2210         SxgSgl->pSgl = pSgl;
2211
2212         /* Sanity check that our SGL format is as we expect. */
2213         ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
2214         /* Shouldn't be a vlan tag on this frame */
2215         ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2216         ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2217
2218         /*
2219          * From here below we work with the SGL placed in our
2220          * buffer.
2221          */
2222
2223         SxgSgl->Sgl.NumberOfElements = 1;
2224
2225         /* Grab the spinlock and acquire a command */
2226         spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2227         SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2228         if (XmtCmd == NULL) {
2229                 /*
2230                  * Call sxg_complete_slow_send to see if we can
2231                  * free up any XmtRingZero entries and then try again
2232                  */
2233
2234                 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2235                 sxg_complete_slow_send(adapter, 0);
2236                 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2237                 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2238                 if (XmtCmd == NULL) {
2239                         adapter->Stats.XmtZeroFull++;
2240                         goto abortcmd;
2241                 }
2242         }
2243         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2244                   XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2245         /* Update stats */
2246         adapter->Stats.DumbXmtPkts++;
2247         adapter->stats.tx_packets++;
2248         adapter->Stats.DumbXmtBytes += DataLength;
2249         adapter->stats.tx_bytes += DataLength;
2250 #if XXXTODO                     /* Stats stuff */
2251         if (SXG_MULTICAST_PACKET(EtherHdr)) {
2252                 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2253                         adapter->Stats.DumbXmtBcastPkts++;
2254                         adapter->Stats.DumbXmtBcastBytes += DataLength;
2255                 } else {
2256                         adapter->Stats.DumbXmtMcastPkts++;
2257                         adapter->Stats.DumbXmtMcastBytes += DataLength;
2258                 }
2259         } else {
2260                 adapter->Stats.DumbXmtUcastPkts++;
2261                 adapter->Stats.DumbXmtUcastBytes += DataLength;
2262         }
2263 #endif
2264         /*
2265          * Fill in the command
2266          * Copy out the first SGE to the command and adjust for offset
2267          */
2268         phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
2269                            PCI_DMA_TODEVICE);
2270         memset(XmtCmd, '\0', sizeof(*XmtCmd));
2271         XmtCmd->Buffer.FirstSgeAddress = phys_addr;
2272         XmtCmd->Buffer.FirstSgeLength = DataLength;
2273         XmtCmd->Buffer.SgeOffset = 0;
2274         XmtCmd->Buffer.TotalLength = DataLength;
2275         XmtCmd->SgEntries = 1;
2276         XmtCmd->Flags = 0;
2277         /*
2278          * Advance transmit cmd descripter by 1.
2279          * NOTE - See comments in SxgTcpOutput where we write
2280          * to the XmtCmd register regarding CPU ID values and/or
2281          * multiple commands.
2282          */
2283         WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
2284         adapter->Stats.XmtQLen++;       /* Stats within lock */
2285         spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2286         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2287                   XmtCmd, pSgl, SxgSgl, 0);
2288         return  STATUS_SUCCESS;
2289
2290       abortcmd:
2291         /*
2292          * NOTE - Only jump to this label AFTER grabbing the
2293          * XmtZeroLock, and DO NOT DROP IT between the
2294          * command allocation and the following abort.
2295          */
2296         if (XmtCmd) {
2297                 SXG_ABORT_CMD(XmtRingInfo);
2298         }
2299         spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2300         return STATUS_FAILURE;
2301
2302 /*
2303  * failsgl:
2304  *      Jump to this label if failure occurs before the
2305  *      XmtZeroLock is grabbed
2306  */
2307         adapter->Stats.XmtErrors++;
2308         adapter->stats.tx_errors++;
2309         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2310                   pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2311         /* SxgSgl->DumbPacket is the skb */
2312         // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2313 }
2314
2315 /*
2316  * Link management functions
2317  *
2318  * sxg_initialize_link - Initialize the link stuff
2319  *
2320  * Arguments -
2321  *      adapter         - A pointer to our adapter structure
2322  *
2323  * Return
2324  *      status
2325  */
2326 static int sxg_initialize_link(struct adapter_t *adapter)
2327 {
2328         struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2329         u32 Value;
2330         u32 ConfigData;
2331         u32 MaxFrame;
2332         int status;
2333
2334         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2335                   adapter, 0, 0, 0);
2336
2337         /* Reset PHY and XGXS module */
2338         WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2339
2340         /* Reset transmit configuration register */
2341         WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2342
2343         /* Reset receive configuration register */
2344         WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2345
2346         /* Reset all MAC modules */
2347         WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2348
2349         /*
2350          * Link address 0
2351          * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2352          * is stored with the first nibble (0a) in the byte 0
2353          * of the Mac address.  Possibly reverse?
2354          */
2355         Value = *(u32 *) adapter->macaddr;
2356         WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2357         /* also write the MAC address to the MAC.  Endian is reversed. */
2358         WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2359         Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
2360         WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2361         /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2362         Value = ntohl(Value);
2363         WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2364         /* Link address 1 */
2365         WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2366         WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2367         /* Link address 2 */
2368         WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2369         WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2370         /* Link address 3 */
2371         WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2372         WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2373
2374         /* Enable MAC modules */
2375         WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2376
2377         /* Configure MAC */
2378         WRITE_REG(HwRegs->MacConfig1, (
2379                                         /* Allow sending of pause */
2380                                         AXGMAC_CFG1_XMT_PAUSE |
2381                                         /* Enable XMT */
2382                                         AXGMAC_CFG1_XMT_EN |
2383                                         /* Enable detection of pause */
2384                                         AXGMAC_CFG1_RCV_PAUSE |
2385                                         /* Enable receive */
2386                                         AXGMAC_CFG1_RCV_EN |
2387                                         /* short frame detection */
2388                                         AXGMAC_CFG1_SHORT_ASSERT |
2389                                         /* Verify frame length */
2390                                         AXGMAC_CFG1_CHECK_LEN |
2391                                         /* Generate FCS */
2392                                         AXGMAC_CFG1_GEN_FCS |
2393                                         /* Pad frames to 64 bytes */
2394                                         AXGMAC_CFG1_PAD_64),
2395                                   TRUE);
2396
2397         /* Set AXGMAC max frame length if jumbo.  Not needed for standard MTU */
2398         if (adapter->JumboEnabled) {
2399                 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2400         }
2401         /*
2402          * AMIIM Configuration Register -
2403          * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2404          * (bottom bits) of this register is used to determine the MDC frequency
2405          * as specified in the A-XGMAC Design Document. This value must not be
2406          * zero.  The following value (62 or 0x3E) is based on our MAC transmit
2407          * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2408          * frequency of 2.5 MHz (see the PHY spec), we get:
2409          *      312.5/(2*(X+1)) < 2.5  ==> X = 62.
2410          * This value happens to be the default value for this register, so we
2411          * really don't have to do this.
2412          */
2413         WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2414
2415         /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2416         WRITE_REG(HwRegs->LinkStatus,
2417                   (LS_PHY_CLR_RESET |
2418                    LS_XGXS_ENABLE |
2419                    LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2420         DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2421
2422         /*
2423          * Per information given by Aeluros, wait 100 ms after removing reset.
2424          * It's not enough to wait for the self-clearing reset bit in reg 0 to
2425          * clear.
2426          */
2427         mdelay(100);
2428
2429         /* Verify the PHY has come up by checking that the Reset bit has
2430          * cleared.
2431          */
2432         status = sxg_read_mdio_reg(adapter,
2433                                 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2434                                 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2435                                 &Value);
2436         DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2437                                          (Value & PMA_CONTROL1_RESET));
2438         if (status != STATUS_SUCCESS)
2439                 return (STATUS_FAILURE);
2440         if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2441                 return (STATUS_FAILURE);
2442
2443         /* The SERDES should be initialized by now - confirm */
2444         READ_REG(HwRegs->LinkStatus, Value);
2445         if (Value & LS_SERDES_DOWN)     /* verify SERDES is initialized */
2446                 return (STATUS_FAILURE);
2447
2448         /* The XAUI link should also be up - confirm */
2449         if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2450                 return (STATUS_FAILURE);
2451
2452         /* Initialize the PHY */
2453         status = sxg_phy_init(adapter);
2454         if (status != STATUS_SUCCESS)
2455                 return (STATUS_FAILURE);
2456
2457         /* Enable the Link Alarm */
2458
2459         /* MIIM_DEV_PHY_PMA             - PHY PMA/PMD module
2460          * LASI_CONTROL                 - LASI control register
2461          * LASI_CTL_LS_ALARM_ENABLE     - enable link alarm bit
2462          */
2463         status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2464                                     LASI_CONTROL,
2465                                     LASI_CTL_LS_ALARM_ENABLE);
2466         if (status != STATUS_SUCCESS)
2467                 return (STATUS_FAILURE);
2468
2469         /* XXXTODO - temporary - verify bit is set */
2470
2471         /* MIIM_DEV_PHY_PMA             - PHY PMA/PMD module
2472          * LASI_CONTROL                 - LASI control register
2473          */
2474         status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2475                                    LASI_CONTROL,
2476                                    &Value);
2477
2478         if (status != STATUS_SUCCESS)
2479                 return (STATUS_FAILURE);
2480         if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2481                 DBG_ERROR("Error!  LASI Control Alarm Enable bit not set!\n");
2482         }
2483         /* Enable receive */
2484         MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2485         ConfigData = (RCV_CONFIG_ENABLE |
2486                       RCV_CONFIG_ENPARSE |
2487                       RCV_CONFIG_RCVBAD |
2488                       RCV_CONFIG_RCVPAUSE |
2489                       RCV_CONFIG_TZIPV6 |
2490                       RCV_CONFIG_TZIPV4 |
2491                       RCV_CONFIG_HASH_16 |
2492                       RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2493         WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2494
2495         WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2496
2497         /* Mark the link as down.  We'll get a link event when it comes up. */
2498         sxg_link_state(adapter, SXG_LINK_DOWN);
2499
2500         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2501                   adapter, 0, 0, 0);
2502         return (STATUS_SUCCESS);
2503 }
2504
2505 /*
2506  * sxg_phy_init - Initialize the PHY
2507  *
2508  * Arguments -
2509  *      adapter         - A pointer to our adapter structure
2510  *
2511  * Return
2512  *      status
2513  */
2514 static int sxg_phy_init(struct adapter_t *adapter)
2515 {
2516         u32 Value;
2517         struct phy_ucode *p;
2518         int status;
2519
2520         DBG_ERROR("ENTER %s\n", __func__);
2521
2522         /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2523          * 0xC205 - PHY ID register (?)
2524          * &Value - XXXTODO - add def
2525          */
2526         status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2527                                    0xC205,
2528                                    &Value);
2529         if (status != STATUS_SUCCESS)
2530                 return (STATUS_FAILURE);
2531
2532         if (Value == 0x0012) {
2533                 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2534                 DBG_ERROR("AEL2005C PHY detected.  Downloading PHY \
2535                                  microcode.\n");
2536
2537                 /* Initialize AEL2005C PHY and download PHY microcode */
2538                 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2539                         if (p->Addr == 0) {
2540                                 /* if address == 0, data == sleep time in ms */
2541                                 mdelay(p->Data);
2542                         } else {
2543                         /* write the given data to the specified address */
2544                                 status = sxg_write_mdio_reg(adapter,
2545                                                         MIIM_DEV_PHY_PMA,
2546                                                         /* PHY address */
2547                                                             p->Addr,
2548                                                         /* PHY data */
2549                                                             p->Data);
2550                                 if (status != STATUS_SUCCESS)
2551                                         return (STATUS_FAILURE);
2552                         }
2553                 }
2554         }
2555         DBG_ERROR("EXIT %s\n", __func__);
2556
2557         return (STATUS_SUCCESS);
2558 }
2559
2560 /*
2561  * sxg_link_event - Process a link event notification from the card
2562  *
2563  * Arguments -
2564  *      adapter         - A pointer to our adapter structure
2565  *
2566  * Return
2567  *      None
2568  */
2569 static void sxg_link_event(struct adapter_t *adapter)
2570 {
2571         struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2572         enum SXG_LINK_STATE LinkState;
2573         int status;
2574         u32 Value;
2575
2576         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2577                   adapter, 0, 0, 0);
2578         DBG_ERROR("ENTER %s\n", __func__);
2579
2580         /* Check the Link Status register.  We should have a Link Alarm. */
2581         READ_REG(HwRegs->LinkStatus, Value);
2582         if (Value & LS_LINK_ALARM) {
2583                 /*
2584                  * We got a Link Status alarm.  First, pause to let the
2585                  * link state settle (it can bounce a number of times)
2586                  */
2587                 mdelay(10);
2588
2589                 /* Now clear the alarm by reading the LASI status register. */
2590                 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2591                 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2592                                         /* LASI status register */
2593                                            LASI_STATUS,
2594                                            &Value);
2595                 if (status != STATUS_SUCCESS) {
2596                         DBG_ERROR("Error reading LASI Status MDIO register!\n");
2597                         sxg_link_state(adapter, SXG_LINK_DOWN);
2598                 /* ASSERT(0); */
2599                 }
2600                 ASSERT(Value & LASI_STATUS_LS_ALARM);
2601
2602                 /* Now get and set the link state */
2603                 LinkState = sxg_get_link_state(adapter);
2604                 sxg_link_state(adapter, LinkState);
2605                 DBG_ERROR("SXG: Link Alarm occurred.  Link is %s\n",
2606                           ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2607         } else {
2608                 /*
2609                  * XXXTODO - Assuming Link Attention is only being generated
2610                  * for the Link Alarm pin (and not for a XAUI Link Status change)
2611                  * , then it's impossible to get here.  Yet we've gotten here
2612                  * twice (under extreme conditions - bouncing the link up and
2613                  * down many times a second). Needs further investigation.
2614                  */
2615                 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2616                 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2617                 /* ASSERT(0); */
2618         }
2619         DBG_ERROR("EXIT %s\n", __func__);
2620
2621 }
2622
2623 /*
2624  * sxg_get_link_state - Determine if the link is up or down
2625  *
2626  * Arguments -
2627  *      adapter         - A pointer to our adapter structure
2628  *
2629  * Return
2630  *      Link State
2631  */
2632 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
2633 {
2634         int status;
2635         u32 Value;
2636
2637         DBG_ERROR("ENTER %s\n", __func__);
2638
2639         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2640                   adapter, 0, 0, 0);
2641
2642         /*
2643          * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
2644          * the following 3 bits (from 3 different MDIO registers) are all true.
2645          */
2646
2647         /* MIIM_DEV_PHY_PMA -  PHY PMA/PMD module */
2648         status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2649                                 /* PMA/PMD Receive Signal Detect register */
2650                                    PHY_PMA_RCV_DET,
2651                                    &Value);
2652         if (status != STATUS_SUCCESS)
2653                 goto bad;
2654
2655         /* If PMA/PMD receive signal detect is 0, then the link is down */
2656         if (!(Value & PMA_RCV_DETECT))
2657                 return (SXG_LINK_DOWN);
2658
2659         /* MIIM_DEV_PHY_PCS - PHY PCS module */
2660         status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
2661                                 /* PCS 10GBASE-R Status 1 register */
2662                                    PHY_PCS_10G_STATUS1,
2663                                    &Value);
2664         if (status != STATUS_SUCCESS)
2665                 goto bad;
2666
2667         /* If PCS is not locked to receive blocks, then the link is down */
2668         if (!(Value & PCS_10B_BLOCK_LOCK))
2669                 return (SXG_LINK_DOWN);
2670
2671         status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
2672                                 /* XS Lane Status register */
2673                                    PHY_XS_LANE_STATUS,
2674                                    &Value);
2675         if (status != STATUS_SUCCESS)
2676                 goto bad;
2677
2678         /* If XS transmit lanes are not aligned, then the link is down */
2679         if (!(Value & XS_LANE_ALIGN))
2680                 return (SXG_LINK_DOWN);
2681
2682         /* All 3 bits are true, so the link is up */
2683         DBG_ERROR("EXIT %s\n", __func__);
2684
2685         return (SXG_LINK_UP);
2686
2687       bad:
2688         /* An error occurred reading an MDIO register. This shouldn't happen. */
2689         DBG_ERROR("Error reading an MDIO register!\n");
2690         ASSERT(0);
2691         return (SXG_LINK_DOWN);
2692 }
2693
2694 static void sxg_indicate_link_state(struct adapter_t *adapter,
2695                                     enum SXG_LINK_STATE LinkState)
2696 {
2697         if (adapter->LinkState == SXG_LINK_UP) {
2698                 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
2699                           __func__);
2700                 netif_start_queue(adapter->netdev);
2701         } else {
2702                 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
2703                           __func__);
2704                 netif_stop_queue(adapter->netdev);
2705         }
2706 }
2707
2708 /*
2709  * sxg_link_state - Set the link state and if necessary, indicate.
2710  *      This routine the central point of processing for all link state changes.
2711  *      Nothing else in the driver should alter the link state or perform
2712  *      link state indications
2713  *
2714  * Arguments -
2715  *      adapter         - A pointer to our adapter structure
2716  *      LinkState       - The link state
2717  *
2718  * Return
2719  *      None
2720  */
2721 static void sxg_link_state(struct adapter_t *adapter,
2722                                 enum SXG_LINK_STATE LinkState)
2723 {
2724         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2725                   adapter, LinkState, adapter->LinkState, adapter->State);
2726
2727         DBG_ERROR("ENTER %s\n", __func__);
2728
2729         /*
2730          * Hold the adapter lock during this routine.  Maybe move
2731          * the lock to the caller.
2732          */
2733         /* IMP TODO : Check if we can survive without taking this lock */
2734 //      spin_lock(&adapter->AdapterLock);
2735         if (LinkState == adapter->LinkState) {
2736                 /* Nothing changed.. */
2737 //              spin_unlock(&adapter->AdapterLock);
2738                 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
2739                                          __func__, LinkState);
2740                 return;
2741         }
2742         /* Save the adapter state */
2743         adapter->LinkState = LinkState;
2744
2745         /* Drop the lock and indicate link state */
2746 //      spin_unlock(&adapter->AdapterLock);
2747         DBG_ERROR("EXIT #1 %s\n", __func__);
2748
2749         sxg_indicate_link_state(adapter, LinkState);
2750 }
2751
2752 /*
2753  * sxg_write_mdio_reg - Write to a register on the MDIO bus
2754  *
2755  * Arguments -
2756  *      adapter         - A pointer to our adapter structure
2757  *  DevAddr     - MDIO device number being addressed
2758  *  RegAddr     - register address for the specified MDIO device
2759  *  Value               - value to write to the MDIO register
2760  *
2761  * Return
2762  *      status
2763  */
2764 static int sxg_write_mdio_reg(struct adapter_t *adapter,
2765                               u32 DevAddr, u32 RegAddr, u32 Value)
2766 {
2767         struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2768         /* Address operation (written to MIIM field reg) */
2769         u32 AddrOp;
2770         /* Write operation (written to MIIM field reg) */
2771         u32 WriteOp;
2772         u32 Cmd;/* Command (written to MIIM command reg) */
2773         u32 ValueRead;
2774         u32 Timeout;
2775
2776         /* DBG_ERROR("ENTER %s\n", __func__); */
2777
2778         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2779                   adapter, 0, 0, 0);
2780
2781         /* Ensure values don't exceed field width */
2782         DevAddr &= 0x001F;      /* 5-bit field */
2783         RegAddr &= 0xFFFF;      /* 16-bit field */
2784         Value &= 0xFFFF;        /* 16-bit field */
2785
2786         /* Set MIIM field register bits for an MIIM address operation */
2787         AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2788             (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2789             (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2790             (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2791
2792         /* Set MIIM field register bits for an MIIM write operation */
2793         WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2794             (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2795             (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2796             (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2797
2798         /* Set MIIM command register bits to execute an MIIM command */
2799         Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2800
2801         /* Reset the command register command bit (in case it's not 0) */
2802         WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2803
2804         /* MIIM write to set the address of the specified MDIO register */
2805         WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2806
2807         /* Write to MIIM Command Register to execute to address operation */
2808         WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2809
2810         /* Poll AMIIM Indicator register to wait for completion */
2811         Timeout = SXG_LINK_TIMEOUT;
2812         do {
2813                 udelay(100);    /* Timeout in 100us units */
2814                 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2815                 if (--Timeout == 0) {
2816                         return (STATUS_FAILURE);
2817                 }
2818         } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2819
2820         /* Reset the command register command bit */
2821         WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2822
2823         /* MIIM write to set up an MDIO write operation */
2824         WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2825
2826         /* Write to MIIM Command Register to execute the write operation */
2827         WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2828
2829         /* Poll AMIIM Indicator register to wait for completion */
2830         Timeout = SXG_LINK_TIMEOUT;
2831         do {
2832                 udelay(100);    /* Timeout in 100us units */
2833                 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2834                 if (--Timeout == 0) {
2835                         return (STATUS_FAILURE);
2836                 }
2837         } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2838
2839         /* DBG_ERROR("EXIT %s\n", __func__); */
2840
2841         return (STATUS_SUCCESS);
2842 }
2843
2844 /*
2845  * sxg_read_mdio_reg - Read a register on the MDIO bus
2846  *
2847  * Arguments -
2848  *      adapter         - A pointer to our adapter structure
2849  *  DevAddr     - MDIO device number being addressed
2850  *  RegAddr     - register address for the specified MDIO device
2851  *  pValue      - pointer to where to put data read from the MDIO register
2852  *
2853  * Return
2854  *      status
2855  */
2856 static int sxg_read_mdio_reg(struct adapter_t *adapter,
2857                              u32 DevAddr, u32 RegAddr, u32 *pValue)
2858 {
2859         struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2860         u32 AddrOp;     /* Address operation (written to MIIM field reg) */
2861         u32 ReadOp;     /* Read operation (written to MIIM field reg) */
2862         u32 Cmd;        /* Command (written to MIIM command reg) */
2863         u32 ValueRead;
2864         u32 Timeout;
2865
2866         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2867                   adapter, 0, 0, 0);
2868         DBG_ERROR("ENTER %s\n", __FUNCTION__);
2869
2870         /* Ensure values don't exceed field width */
2871         DevAddr &= 0x001F;      /* 5-bit field */
2872         RegAddr &= 0xFFFF;      /* 16-bit field */
2873
2874         /* Set MIIM field register bits for an MIIM address operation */
2875         AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2876             (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2877             (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2878             (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2879
2880         /* Set MIIM field register bits for an MIIM read operation */
2881         ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2882             (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2883             (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2884             (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
2885
2886         /* Set MIIM command register bits to execute an MIIM command */
2887         Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2888
2889         /* Reset the command register command bit (in case it's not 0) */
2890         WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2891
2892         /* MIIM write to set the address of the specified MDIO register */
2893         WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2894
2895         /* Write to MIIM Command Register to execute to address operation */
2896         WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2897
2898         /* Poll AMIIM Indicator register to wait for completion */
2899         Timeout = SXG_LINK_TIMEOUT;
2900         do {
2901                 udelay(100);    /* Timeout in 100us units */
2902                 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2903                 if (--Timeout == 0) {
2904             DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
2905
2906                         return (STATUS_FAILURE);
2907                 }
2908         } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2909
2910         /* Reset the command register command bit */
2911         WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2912
2913         /* MIIM write to set up an MDIO register read operation */
2914         WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
2915
2916         /* Write to MIIM Command Register to execute the read operation */
2917         WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2918
2919         /* Poll AMIIM Indicator register to wait for completion */
2920         Timeout = SXG_LINK_TIMEOUT;
2921         do {
2922                 udelay(100);    /* Timeout in 100us units */
2923                 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2924                 if (--Timeout == 0) {
2925             DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
2926
2927                         return (STATUS_FAILURE);
2928                 }
2929         } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2930
2931         /* Read the MDIO register data back from the field register */
2932         READ_REG(HwRegs->MacAmiimField, *pValue);
2933         *pValue &= 0xFFFF;      /* data is in the lower 16 bits */
2934
2935         DBG_ERROR("EXIT %s\n", __FUNCTION__);
2936
2937         return (STATUS_SUCCESS);
2938 }
2939
2940 /*
2941  * Functions to obtain the CRC corresponding to the destination mac address.
2942  * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
2943  * the polynomial:
2944  *   x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
2945  *    + x^4 + x^2 + x^1.
2946  *
2947  * After the CRC for the 6 bytes is generated (but before the value is
2948  * complemented), we must then transpose the value and return bits 30-23.
2949  */
2950 static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
2951 static u32 sxg_crc_init;        /* Is table initialized */
2952
2953 /* Contruct the CRC32 table */
2954 static void sxg_mcast_init_crc32(void)
2955 {
2956         u32 c;                  /*  CRC shit reg */
2957         u32 e = 0;              /*  Poly X-or pattern */
2958         int i;                  /*  counter */
2959         int k;                  /*  byte being shifted into crc  */
2960
2961         static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
2962
2963         for (i = 0; i < sizeof(p) / sizeof(int); i++) {
2964                 e |= 1L << (31 - p[i]);
2965         }
2966
2967         for (i = 1; i < 256; i++) {
2968                 c = i;
2969                 for (k = 8; k; k--) {
2970                         c = c & 1 ? (c >> 1) ^ e : c >> 1;
2971                 }
2972                 sxg_crc_table[i] = c;
2973         }
2974 }
2975
2976 /*
2977  *  Return the MAC hast as described above.
2978  */
2979 static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
2980 {
2981         u32 crc;
2982         char *p;
2983         int i;
2984         unsigned char machash = 0;
2985
2986         if (!sxg_crc_init) {
2987                 sxg_mcast_init_crc32();
2988                 sxg_crc_init = 1;
2989         }
2990
2991         crc = 0xFFFFFFFF;       /* Preload shift register, per crc-32 spec */
2992         for (i = 0, p = macaddr; i < 6; ++p, ++i) {
2993                 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
2994         }
2995
2996         /* Return bits 1-8, transposed */
2997         for (i = 1; i < 9; i++) {
2998                 machash |= (((crc >> i) & 1) << (8 - i));
2999         }
3000
3001         return (machash);
3002 }
3003
3004 static void sxg_mcast_set_mask(struct adapter_t *adapter)
3005 {
3006         struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
3007
3008         DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__,
3009                   adapter->netdev->name, (unsigned int)adapter->MacFilter,
3010                   adapter->MulticastMask);
3011
3012         if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
3013                 /*
3014                  * Turn on all multicast addresses. We have to do this for
3015                  * promiscuous mode as well as ALLMCAST mode.  It saves the
3016                  * Microcode from having keep state about the MAC configuration
3017                  */
3018                 /* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n
3019                  *                              SLUT MODE!!!\n",__func__);
3020                  */
3021                 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3022                 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
3023                 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3024                  * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3025                  */
3026
3027         } else {
3028                 /*
3029                  * Commit our multicast mast to the SLIC by writing to the
3030                  * multicast address mask registers
3031                  */
3032                 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3033                           __func__, adapter->netdev->name,
3034                           ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3035                           ((ulong)
3036                            ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3037
3038                 WRITE_REG(sxg_regs->McastLow,
3039                           (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3040                 WRITE_REG(sxg_regs->McastHigh,
3041                           (u32) ((adapter->
3042                                   MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3043         }
3044 }
3045
3046 /*
3047  *  Allocate a mcast_address structure to hold the multicast address.
3048  *  Link it in.
3049  */
3050 static int sxg_mcast_add_list(struct adapter_t *adapter, char *address)
3051 {
3052         struct mcast_address *mcaddr, *mlist;
3053         bool equaladdr;
3054
3055         /* Check to see if it already exists */
3056         mlist = adapter->mcastaddrs;
3057         while (mlist) {
3058                 ETHER_EQ_ADDR(mlist->address, address, equaladdr);
3059                 if (equaladdr) {
3060                         return (STATUS_SUCCESS);
3061                 }
3062                 mlist = mlist->next;
3063         }
3064
3065         /* Doesn't already exist.  Allocate a structure to hold it */
3066         mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
3067         if (mcaddr == NULL)
3068                 return 1;
3069
3070         memcpy(mcaddr->address, address, 6);
3071
3072         mcaddr->next = adapter->mcastaddrs;
3073         adapter->mcastaddrs = mcaddr;
3074
3075         return (STATUS_SUCCESS);
3076 }
3077
3078 static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
3079 {
3080         unsigned char crcpoly;
3081
3082         /* Get the CRC polynomial for the mac address */
3083         crcpoly = sxg_mcast_get_mac_hash(address);
3084
3085         /*
3086          * We only have space on the SLIC for 64 entries.  Lop
3087          * off the top two bits. (2^6 = 64)
3088          */
3089         crcpoly &= 0x3F;
3090
3091         /* OR in the new bit into our 64 bit mask. */
3092         adapter->MulticastMask |= (u64) 1 << crcpoly;
3093 }
3094
3095 static void sxg_mcast_set_list(struct net_device *dev)
3096 {
3097         struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3098
3099         ASSERT(adapter);
3100         if (dev->flags & IFF_PROMISC) {
3101                 adapter->MacFilter |= MAC_PROMISC;
3102         }
3103         //XXX handle other flags as well
3104         sxg_mcast_set_mask(adapter);
3105 }
3106
3107 static void sxg_unmap_mmio_space(struct adapter_t *adapter)
3108 {
3109 #if LINUX_FREES_ADAPTER_RESOURCES
3110 /*
3111  *       if (adapter->Regs) {
3112  *              iounmap(adapter->Regs);
3113  *      }
3114  *      adapter->slic_regs = NULL;
3115  */
3116 #endif
3117 }
3118
3119 void sxg_free_sgl_buffers(struct adapter_t *adapter)
3120 {
3121         struct list_entry               *ple;
3122         struct sxg_scatter_gather       *Sgl;
3123
3124         while(!(IsListEmpty(&adapter->AllSglBuffers))) {
3125                 ple = RemoveHeadList(&adapter->AllSglBuffers);
3126                 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3127                 kfree(Sgl);
3128                 adapter->AllSglBufferCount--;
3129         }
3130 }
3131
3132 void sxg_free_rcvblocks(struct adapter_t *adapter)
3133 {
3134         u32                             i;
3135         void                            *temp_RcvBlock;
3136         struct list_entry               *ple;
3137         struct sxg_rcv_block_hdr        *RcvBlockHdr;
3138         struct sxg_rcv_data_buffer_hdr  *RcvDataBufferHdr;
3139         ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3140                     (adapter->state == SXG_STATE_HALTING));
3141         while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3142
3143                  ple = RemoveHeadList(&adapter->AllRcvBlocks);
3144                  RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3145
3146                 if(RcvBlockHdr->VirtualAddress) {
3147                         temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3148
3149                         for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3150                              i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3151                                 RcvDataBufferHdr =
3152                                         (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3153                                 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3154                         }
3155                 }
3156
3157                 pci_free_consistent(adapter->pcidev,
3158                                          SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3159                                          RcvBlockHdr->VirtualAddress,
3160                                          RcvBlockHdr->PhysicalAddress);
3161                 adapter->AllRcvBlockCount--;
3162         }
3163         ASSERT(adapter->AllRcvBlockCount == 0);
3164         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3165                         adapter, 0, 0, 0);
3166 }
3167 void sxg_free_mcast_addrs(struct adapter_t *adapter)
3168 {
3169         struct sxg_multicast_address    *address;
3170         while(adapter->MulticastAddrs) {
3171                 address = adapter->MulticastAddrs;
3172                 adapter->MulticastAddrs = address->Next;
3173                 kfree(address);
3174          }
3175
3176         adapter->MulticastMask= 0;
3177 }
3178
3179 void sxg_unmap_resources(struct adapter_t *adapter)
3180 {
3181         if(adapter->HwRegs) {
3182                 iounmap((void *)adapter->HwRegs);
3183          }
3184         if(adapter->UcodeRegs) {
3185                 iounmap((void *)adapter->UcodeRegs);
3186         }
3187
3188         ASSERT(adapter->AllRcvBlockCount == 0);
3189         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3190                            adapter, 0, 0, 0);
3191 }
3192
3193
3194
3195 /*
3196  * sxg_free_resources - Free everything allocated in SxgAllocateResources
3197  *
3198  * Arguments -
3199  *      adapter         - A pointer to our adapter structure
3200  *
3201  * Return
3202  *      none
3203  */
3204 void sxg_free_resources(struct adapter_t *adapter)
3205 {
3206         u32 RssIds, IsrCount;
3207         u32 i;
3208         struct net_device *netdev = adapter->netdev;
3209         RssIds = SXG_RSS_CPU_COUNT(adapter);
3210         IsrCount = adapter->MsiEnabled ? RssIds : 1;
3211
3212         if (adapter->BasicAllocations == FALSE) {
3213                 /*
3214                  * No allocations have been made, including spinlocks,
3215                  * or listhead initializations.  Return.
3216                  */
3217                 return;
3218         }
3219
3220         /* Free Irq */
3221         free_irq(adapter->netdev->irq, netdev);
3222
3223
3224         if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
3225                 sxg_free_rcvblocks(adapter);
3226         }
3227         if (!(IsListEmpty(&adapter->AllSglBuffers))) {
3228                 sxg_free_sgl_buffers(adapter);
3229         }
3230
3231         if (adapter->XmtRingZeroIndex) {
3232                 pci_free_consistent(adapter->pcidev,
3233                                     sizeof(u32),
3234                                     adapter->XmtRingZeroIndex,
3235                                     adapter->PXmtRingZeroIndex);
3236         }
3237         if (adapter->Isr) {
3238                 pci_free_consistent(adapter->pcidev,
3239                                     sizeof(u32) * IsrCount,
3240                                     adapter->Isr, adapter->PIsr);
3241         }
3242
3243         if (adapter->EventRings) {
3244                 pci_free_consistent(adapter->pcidev,
3245                                     sizeof(struct sxg_event_ring) * RssIds,
3246                                     adapter->EventRings, adapter->PEventRings);
3247         }
3248         if (adapter->RcvRings) {
3249                 pci_free_consistent(adapter->pcidev,
3250                                    sizeof(struct sxg_rcv_ring) * 1,
3251                                    adapter->RcvRings,
3252                                    adapter->PRcvRings);
3253                 adapter->RcvRings = NULL;
3254         }
3255
3256         if(adapter->XmtRings) {
3257                 pci_free_consistent(adapter->pcidev,
3258                                             sizeof(struct sxg_xmt_ring) * 1,
3259                                             adapter->XmtRings,
3260                                             adapter->PXmtRings);
3261                         adapter->XmtRings = NULL;
3262         }
3263
3264         if (adapter->ucode_stats) {
3265                 pci_unmap_single(adapter->pcidev,
3266                                 sizeof(struct sxg_ucode_stats),
3267                                  adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3268                 adapter->ucode_stats = NULL;
3269         }
3270
3271
3272         /* Unmap register spaces */
3273         sxg_unmap_resources(adapter);
3274
3275         sxg_free_mcast_addrs(adapter);
3276
3277         adapter->BasicAllocations = FALSE;
3278
3279 }
3280
3281 /*
3282  * sxg_allocate_complete -
3283  *
3284  * This routine is called when a memory allocation has completed.
3285  *
3286  * Arguments -
3287  *      struct adapter_t *      - Our adapter structure
3288  *      VirtualAddress  - Memory virtual address
3289  *      PhysicalAddress - Memory physical address
3290  *      Length          - Length of memory allocated (or 0)
3291  *      Context         - The type of buffer allocated
3292  *
3293  * Return
3294  *      None.
3295  */
3296 static void sxg_allocate_complete(struct adapter_t *adapter,
3297                                   void *VirtualAddress,
3298                                   dma_addr_t PhysicalAddress,
3299                                   u32 Length, enum sxg_buffer_type Context)
3300 {
3301         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3302                   adapter, VirtualAddress, Length, Context);
3303         ASSERT(atomic_read(&adapter->pending_allocations));
3304         atomic_dec(&adapter->pending_allocations);
3305
3306         switch (Context) {
3307
3308         case SXG_BUFFER_TYPE_RCV:
3309                 sxg_allocate_rcvblock_complete(adapter,
3310                                                VirtualAddress,
3311                                                PhysicalAddress, Length);
3312                 break;
3313         case SXG_BUFFER_TYPE_SGL:
3314                 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
3315                                                  VirtualAddress,
3316                                                  PhysicalAddress, Length);
3317                 break;
3318         }
3319         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3320                   adapter, VirtualAddress, Length, Context);
3321 }
3322
3323 /*
3324  * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3325  *              synchronous and asynchronous buffer allocations
3326  *
3327  * Arguments -
3328  *      adapter         - A pointer to our adapter structure
3329  *      Size            - block size to allocate
3330  *      BufferType      - Type of buffer to allocate
3331  *
3332  * Return
3333  *      int
3334  */
3335 static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
3336                                       u32 Size, enum sxg_buffer_type BufferType)
3337 {
3338         int status;
3339         void *Buffer;
3340         dma_addr_t pBuffer;
3341
3342         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3343                   adapter, Size, BufferType, 0);
3344         /*
3345          * Grab the adapter lock and check the state. If we're in anything other
3346          * than INITIALIZING or RUNNING state, fail.  This is to prevent
3347          * allocations in an improper driver state
3348          */
3349
3350         atomic_inc(&adapter->pending_allocations);
3351
3352         if(BufferType != SXG_BUFFER_TYPE_SGL)
3353                 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3354         else {
3355                 Buffer = kzalloc(Size, GFP_ATOMIC);
3356                 pBuffer = NULL;
3357         }
3358         if (Buffer == NULL) {
3359                 /*
3360                  * Decrement the AllocationsPending count while holding
3361                  * the lock.  Pause processing relies on this
3362                  */
3363                 atomic_dec(&adapter->pending_allocations);
3364                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3365                           adapter, Size, BufferType, 0);
3366                 return (STATUS_RESOURCES);
3367         }
3368         sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
3369         status = STATUS_SUCCESS;
3370
3371         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3372                   adapter, Size, BufferType, status);
3373         return (status);
3374 }
3375
3376 /*
3377  * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3378  *                                      block allocation
3379  *
3380  * Arguments -
3381  *      adapter                         - A pointer to our adapter structure
3382  *      RcvBlock                        - receive block virtual address
3383  *      PhysicalAddress         - Physical address
3384  *      Length                          - Memory length
3385  *
3386  * Return
3387  */
3388 static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
3389                                            void *RcvBlock,
3390                                            dma_addr_t PhysicalAddress,
3391                                            u32 Length)
3392 {
3393         u32 i;
3394         u32 BufferSize = adapter->ReceiveBufferSize;
3395         u64 Paddr;
3396         void *temp_RcvBlock;
3397         struct sxg_rcv_block_hdr *RcvBlockHdr;
3398         struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3399         struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3400         struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3401
3402         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3403                   adapter, RcvBlock, Length, 0);
3404         if (RcvBlock == NULL) {
3405                 goto fail;
3406         }
3407         memset(RcvBlock, 0, Length);
3408         ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3409                (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3410         ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
3411         /*
3412          * First, initialize the contained pool of receive data buffers.
3413          * This initialization requires NBL/NB/MDL allocations, if any of them
3414          * fail, free the block and return without queueing the shared memory
3415          */
3416         //RcvDataBuffer = RcvBlock;
3417         temp_RcvBlock = RcvBlock;
3418         for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3419                  i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3420                 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3421                                         temp_RcvBlock;
3422                 /* For FREE macro assertion */
3423                 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3424                 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3425                 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3426                         goto fail;
3427
3428         }
3429
3430         /*
3431          * Place this entire block of memory on the AllRcvBlocks queue so it
3432          * can be free later
3433          */
3434
3435         RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3436                         SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
3437         RcvBlockHdr->VirtualAddress = RcvBlock;
3438         RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3439         spin_lock(&adapter->RcvQLock);
3440         adapter->AllRcvBlockCount++;
3441         InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3442         spin_unlock(&adapter->RcvQLock);
3443
3444         /* Now free the contained receive data buffers that we
3445          * initialized above */
3446         temp_RcvBlock = RcvBlock;
3447         for (i = 0, Paddr = PhysicalAddress;
3448              i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3449              i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3450              temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3451                 RcvDataBufferHdr =
3452                         (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3453                 spin_lock(&adapter->RcvQLock);
3454                 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3455                 spin_unlock(&adapter->RcvQLock);
3456         }
3457
3458         /* Locate the descriptor block and put it on a separate free queue */
3459         RcvDescriptorBlock =
3460             (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
3461                                          SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3462                                          (SXG_RCV_DATA_HDR_SIZE));
3463         RcvDescriptorBlockHdr =
3464             (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
3465                                              SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3466                                              (SXG_RCV_DATA_HDR_SIZE));
3467         RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3468         RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3469         spin_lock(&adapter->RcvQLock);
3470         SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3471         spin_unlock(&adapter->RcvQLock);
3472         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3473                   adapter, RcvBlock, Length, 0);
3474         return;
3475 fail:
3476         /* Free any allocated resources */
3477         if (RcvBlock) {
3478                 temp_RcvBlock = RcvBlock;
3479                 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3480                      i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3481                         RcvDataBufferHdr =
3482                             (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3483                         SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3484                 }
3485                 pci_free_consistent(adapter->pcidev,
3486                                     Length, RcvBlock, PhysicalAddress);
3487         }
3488         DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3489         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3490                   adapter, adapter->FreeRcvBufferCount,
3491                   adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3492         adapter->Stats.NoMem++;
3493 }
3494
3495 /*
3496  * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3497  *
3498  * Arguments -
3499  *      adapter                         - A pointer to our adapter structure
3500  *      SxgSgl                          - struct sxg_scatter_gather buffer
3501  *      PhysicalAddress         - Physical address
3502  *      Length                          - Memory length
3503  *
3504  * Return
3505  */
3506 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
3507                                              struct sxg_scatter_gather *SxgSgl,
3508                                              dma_addr_t PhysicalAddress,
3509                                              u32 Length)
3510 {
3511         unsigned long sgl_flags;
3512         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3513                   adapter, SxgSgl, Length, 0);
3514         if(!in_irq())
3515                 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
3516         else
3517                 spin_lock(&adapter->SglQLock);
3518         adapter->AllSglBufferCount++;
3519         /* PhysicalAddress; */
3520         SxgSgl->PhysicalAddress = PhysicalAddress;
3521         /* Initialize backpointer once */
3522         SxgSgl->adapter = adapter;
3523         InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3524         if(!in_irq())
3525                 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3526         else
3527                 spin_unlock(&adapter->SglQLock);
3528         SxgSgl->State = SXG_BUFFER_BUSY;
3529         SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL, in_irq());
3530         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3531                   adapter, SxgSgl, Length, 0);
3532 }
3533
3534
3535 static void sxg_adapter_set_hwaddr(struct adapter_t *adapter)
3536 {
3537         /*
3538          *  DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
3539          *  funct#[%d]\n", __func__, card->config_set,
3540          *  adapter->port, adapter->physport, adapter->functionnumber);
3541          *
3542          *  sxg_dbg_macaddrs(adapter);
3543          */
3544         /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
3545          *                      __FUNCTION__);
3546          */
3547
3548         /* sxg_dbg_macaddrs(adapter); */
3549
3550         struct net_device * dev = adapter->netdev;
3551         if(!dev)
3552         {
3553                 printk("sxg: Dev is Null\n");
3554         }
3555
3556         DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3557
3558         if (netif_running(dev)) {
3559                 return -EBUSY;
3560         }
3561         if (!adapter) {
3562                 return -EBUSY;
3563         }
3564
3565         if (!(adapter->currmacaddr[0] ||
3566               adapter->currmacaddr[1] ||
3567               adapter->currmacaddr[2] ||
3568               adapter->currmacaddr[3] ||
3569               adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3570                 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3571         }
3572         if (adapter->netdev) {
3573                 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3574                 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
3575         }
3576         /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
3577         sxg_dbg_macaddrs(adapter);
3578
3579 }
3580
3581 #if XXXTODO
3582 static int sxg_mac_set_address(struct net_device *dev, void *ptr)
3583 {
3584         struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3585         struct sockaddr *addr = ptr;
3586
3587         DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
3588
3589         if (netif_running(dev)) {
3590                 return -EBUSY;
3591         }
3592         if (!adapter) {
3593                 return -EBUSY;
3594         }
3595         DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3596                   __func__, adapter->netdev->name, adapter->currmacaddr[0],
3597                   adapter->currmacaddr[1], adapter->currmacaddr[2],
3598                   adapter->currmacaddr[3], adapter->currmacaddr[4],
3599                   adapter->currmacaddr[5]);
3600         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3601         memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3602         DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3603                   __func__, adapter->netdev->name, adapter->currmacaddr[0],
3604                   adapter->currmacaddr[1], adapter->currmacaddr[2],
3605                   adapter->currmacaddr[3], adapter->currmacaddr[4],
3606                   adapter->currmacaddr[5]);
3607
3608         sxg_config_set(adapter, TRUE);
3609         return 0;
3610 }
3611 #endif
3612
3613 /*
3614  * SXG DRIVER FUNCTIONS  (below)
3615  *
3616  * sxg_initialize_adapter - Initialize adapter
3617  *
3618  * Arguments -
3619  *      adapter         - A pointer to our adapter structure
3620  *
3621  * Return - int
3622  */
3623 static int sxg_initialize_adapter(struct adapter_t *adapter)
3624 {
3625         u32 RssIds, IsrCount;
3626         u32 i;
3627         int status;
3628
3629         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3630                   adapter, 0, 0, 0);
3631
3632         RssIds = 1;             /*  XXXTODO  SXG_RSS_CPU_COUNT(adapter); */
3633         IsrCount = adapter->MsiEnabled ? RssIds : 1;
3634
3635         /*
3636          * Sanity check SXG_UCODE_REGS structure definition to
3637          * make sure the length is correct
3638          */
3639         ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
3640
3641         /* Disable interrupts */
3642         SXG_DISABLE_ALL_INTERRUPTS(adapter);
3643
3644         /* Set MTU */
3645         ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3646                (adapter->FrameSize == JUMBOMAXFRAME));
3647         WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3648
3649         /* Set event ring base address and size */
3650         WRITE_REG64(adapter,
3651                     adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3652         WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3653
3654         /* Per-ISR initialization */
3655         for (i = 0; i < IsrCount; i++) {
3656                 u64 Addr;
3657                 /* Set interrupt status pointer */
3658                 Addr = adapter->PIsr + (i * sizeof(u32));
3659                 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3660         }
3661
3662         /* XMT ring zero index */
3663         WRITE_REG64(adapter,
3664                     adapter->UcodeRegs[0].SPSendIndex,
3665                     adapter->PXmtRingZeroIndex, 0);
3666
3667         /* Per-RSS initialization */
3668         for (i = 0; i < RssIds; i++) {
3669                 /* Release all event ring entries to the Microcode */
3670                 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3671                           TRUE);
3672         }
3673
3674         /* Transmit ring base and size */
3675         WRITE_REG64(adapter,
3676                     adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3677         WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3678
3679         /* Receive ring base and size */
3680         WRITE_REG64(adapter,
3681                     adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3682         WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3683
3684         /* Populate the card with receive buffers */
3685         sxg_stock_rcv_buffers(adapter);
3686
3687         /*
3688          * Initialize checksum offload capabilities.  At the moment we always
3689          * enable IP and TCP receive checksums on the card. Depending on the
3690          * checksum configuration specified by the user, we can choose to
3691          * report or ignore the checksum information provided by the card.
3692          */
3693         WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3694                   SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3695
3696         /* Initialize the MAC, XAUI */
3697         DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
3698         status = sxg_initialize_link(adapter);
3699         DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
3700                   status);
3701         if (status != STATUS_SUCCESS) {
3702                 return (status);
3703         }
3704         /*
3705          * Initialize Dead to FALSE.
3706          * SlicCheckForHang or SlicDumpThread will take it from here.
3707          */
3708         adapter->Dead = FALSE;
3709         adapter->PingOutstanding = FALSE;
3710         adapter->State = SXG_STATE_RUNNING;
3711
3712         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
3713                   adapter, 0, 0, 0);
3714         return (STATUS_SUCCESS);
3715 }
3716
3717 /*
3718  * sxg_fill_descriptor_block - Populate a descriptor block and give it to
3719  * the card.  The caller should hold the RcvQLock
3720  *
3721  * Arguments -
3722  *      adapter         - A pointer to our adapter structure
3723  *  RcvDescriptorBlockHdr       - Descriptor block to fill
3724  *
3725  * Return
3726  *      status
3727  */
3728 static int sxg_fill_descriptor_block(struct adapter_t *adapter,
3729              struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
3730 {
3731         u32 i;
3732         struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
3733         struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3734         struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3735         struct sxg_cmd *RingDescriptorCmd;
3736         struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
3737
3738         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
3739                   adapter, adapter->RcvBuffersOnCard,
3740                   adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3741
3742         ASSERT(RcvDescriptorBlockHdr);
3743
3744         /*
3745          * If we don't have the resources to fill the descriptor block,
3746          * return failure
3747          */
3748         if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3749             SXG_RING_FULL(RcvRingInfo)) {
3750                 adapter->Stats.NoMem++;
3751                 return (STATUS_FAILURE);
3752         }
3753         /* Get a ring descriptor command */
3754         SXG_GET_CMD(RingZero,
3755                     RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3756         ASSERT(RingDescriptorCmd);
3757         RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
3758         RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
3759                                  RcvDescriptorBlockHdr->VirtualAddress;
3760
3761         /* Fill in the descriptor block */
3762         for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3763                 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3764                 ASSERT(RcvDataBufferHdr);
3765 //              ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
3766                 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
3767                         SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
3768                                                 adapter->ReceiveBufferSize);
3769                         if(RcvDataBufferHdr->skb)
3770                                 RcvDataBufferHdr->SxgDumbRcvPacket =
3771                                                 RcvDataBufferHdr->skb;
3772                         else
3773                                 goto no_memory;
3774                 }
3775                 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
3776                 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
3777                 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
3778                                             (void *)RcvDataBufferHdr;
3779
3780                 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3781                     RcvDataBufferHdr->PhysicalAddress;
3782         }
3783         /* Add the descriptor block to receive descriptor ring 0 */
3784         RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3785
3786         /*
3787          * RcvBuffersOnCard is not protected via the receive lock (see
3788          * sxg_process_event_queue) We don't want to grap a lock every time a
3789          * buffer is returned to us, so we use atomic interlocked functions
3790          * instead.
3791          */
3792         adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3793
3794         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
3795                   RcvDescriptorBlockHdr,
3796                   RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
3797
3798         WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
3799         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
3800                   adapter, adapter->RcvBuffersOnCard,
3801                   adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3802         return (STATUS_SUCCESS);
3803 no_memory:
3804         return (-ENOMEM);
3805 }
3806
3807 /*
3808  * sxg_stock_rcv_buffers - Stock the card with receive buffers
3809  *
3810  * Arguments -
3811  *      adapter         - A pointer to our adapter structure
3812  *
3813  * Return
3814  *      None
3815  */
3816 static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
3817 {
3818         struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3819
3820         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3821                   adapter, adapter->RcvBuffersOnCard,
3822                   adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3823         /*
3824          * First, see if we've got less than our minimum threshold of
3825          * receive buffers, there isn't an allocation in progress, and
3826          * we haven't exceeded our maximum.. get another block of buffers
3827          * None of this needs to be SMP safe.  It's round numbers.
3828          */
3829         if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3830             (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
3831             (atomic_read(&adapter->pending_allocations) == 0)) {
3832                 sxg_allocate_buffer_memory(adapter,
3833                                            SXG_RCV_BLOCK_SIZE
3834                                            (SXG_RCV_DATA_HDR_SIZE),
3835                                            SXG_BUFFER_TYPE_RCV);
3836         }
3837         /* Now grab the RcvQLock lock and proceed */
3838         spin_lock(&adapter->RcvQLock);
3839         while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
3840                 struct list_entry *_ple;
3841
3842                 /* Get a descriptor block */
3843                 RcvDescriptorBlockHdr = NULL;
3844                 if (adapter->FreeRcvBlockCount) {
3845                         _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
3846                         RcvDescriptorBlockHdr =
3847                             container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
3848                                          FreeList);
3849                         adapter->FreeRcvBlockCount--;
3850                         RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
3851                 }
3852
3853                 if (RcvDescriptorBlockHdr == NULL) {
3854                         /* Bail out.. */
3855                         adapter->Stats.NoMem++;
3856                         break;
3857                 }
3858                 /* Fill in the descriptor block and give it to the card */
3859                 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3860                     STATUS_FAILURE) {
3861                         /* Free the descriptor block */
3862                         SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3863                                                       RcvDescriptorBlockHdr);
3864                         break;
3865                 }
3866         }
3867         spin_unlock(&adapter->RcvQLock);
3868         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
3869                   adapter, adapter->RcvBuffersOnCard,
3870                   adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3871 }
3872
3873 /*
3874  * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
3875  * completed by the microcode
3876  *
3877  * Arguments -
3878  *      adapter         - A pointer to our adapter structure
3879  *      Index           - Where the microcode is up to
3880  *
3881  * Return
3882  *      None
3883  */
3884 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
3885                                            unsigned char Index)
3886 {
3887         struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
3888         struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
3889         struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3890         struct sxg_cmd *RingDescriptorCmd;
3891
3892         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
3893                   adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3894
3895         /* Now grab the RcvQLock lock and proceed */
3896         spin_lock(&adapter->RcvQLock);
3897         ASSERT(Index != RcvRingInfo->Tail);
3898         while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
3899                                         RcvRingInfo->Tail) > 3) {
3900                 /*
3901                  * Locate the current Cmd (ring descriptor entry), and
3902                  * associated receive descriptor block, and advance
3903                  * the tail
3904                  */
3905                 SXG_RETURN_CMD(RingZero,
3906                                RcvRingInfo,
3907                                RingDescriptorCmd, RcvDescriptorBlockHdr);
3908                 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
3909                           RcvRingInfo->Head, RcvRingInfo->Tail,
3910                           RingDescriptorCmd, RcvDescriptorBlockHdr);
3911
3912                 /* Clear the SGL field */
3913                 RingDescriptorCmd->Sgl = 0;
3914                 /*
3915                  * Attempt to refill it and hand it right back to the
3916                  * card.  If we fail to refill it, free the descriptor block
3917                  * header.  The card will be restocked later via the
3918                  * RcvBuffersOnCard test
3919                  */
3920                 if (sxg_fill_descriptor_block(adapter,
3921                          RcvDescriptorBlockHdr) == STATUS_FAILURE)
3922                         SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3923                                                       RcvDescriptorBlockHdr);
3924         }
3925         spin_unlock(&adapter->RcvQLock);
3926         SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
3927                   adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3928 }
3929
3930 /*
3931  * Read the statistics which the card has been maintaining.
3932  */
3933 void sxg_collect_statistics(struct adapter_t *adapter)
3934 {
3935         if(adapter->ucode_stats)
3936                 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats, adapter->pucode_stats, 0);
3937         adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
3938         adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
3939         adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
3940 }
3941
3942 static struct net_device_stats *sxg_get_stats(struct net_device * dev)
3943 {
3944         struct adapter_t *adapter = netdev_priv(dev);
3945
3946         sxg_collect_statistics(adapter);
3947         return (&adapter->stats);
3948 }
3949
3950 static struct pci_driver sxg_driver = {
3951         .name = sxg_driver_name,
3952         .id_table = sxg_pci_tbl,
3953         .probe = sxg_entry_probe,
3954         .remove = sxg_entry_remove,
3955 #if SXG_POWER_MANAGEMENT_ENABLED
3956         .suspend = sxgpm_suspend,
3957         .resume = sxgpm_resume,
3958 #endif
3959         /* .shutdown   =     slic_shutdown,  MOOK_INVESTIGATE */
3960 };
3961
3962 static int __init sxg_module_init(void)
3963 {
3964         sxg_init_driver();
3965
3966         if (debug >= 0)
3967                 sxg_debug = debug;
3968
3969         return pci_register_driver(&sxg_driver);
3970 }
3971
3972 static void __exit sxg_module_cleanup(void)
3973 {
3974         pci_unregister_driver(&sxg_driver);
3975 }
3976
3977 module_init(sxg_module_init);
3978 module_exit(sxg_module_cleanup);