[IWLWIFI]: add iwlwifi wireless drivers
[linux-2.6] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/dma-mapping.h>
24
25 #include <asm/uaccess.h>
26 #include <asm/io.h>
27 #include <asm/irq.h>
28
29 #include "b44.h"
30
31 #define DRV_MODULE_NAME         "b44"
32 #define PFX DRV_MODULE_NAME     ": "
33 #define DRV_MODULE_VERSION      "1.01"
34 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
35
36 #define B44_DEF_MSG_ENABLE        \
37         (NETIF_MSG_DRV          | \
38          NETIF_MSG_PROBE        | \
39          NETIF_MSG_LINK         | \
40          NETIF_MSG_TIMER        | \
41          NETIF_MSG_IFDOWN       | \
42          NETIF_MSG_IFUP         | \
43          NETIF_MSG_RX_ERR       | \
44          NETIF_MSG_TX_ERR)
45
46 /* length of time before we decide the hardware is borked,
47  * and dev->tx_timeout() should be called to fix the problem
48  */
49 #define B44_TX_TIMEOUT                  (5 * HZ)
50
51 /* hardware minimum and maximum for a single frame's data payload */
52 #define B44_MIN_MTU                     60
53 #define B44_MAX_MTU                     1500
54
55 #define B44_RX_RING_SIZE                512
56 #define B44_DEF_RX_RING_PENDING         200
57 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
58                                  B44_RX_RING_SIZE)
59 #define B44_TX_RING_SIZE                512
60 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
61 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
62                                  B44_TX_RING_SIZE)
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_OFFSET           30
73 #define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET + 64)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE        0x400
80 #define B44_PATTERN_SIZE        0x80
81 #define B44_PMASK_BASE          0x600
82 #define B44_PMASK_SIZE          0x10
83 #define B44_MAX_PATTERNS        16
84 #define B44_ETHIPV6UDP_HLEN     62
85 #define B44_ETHIPV4UDP_HLEN     42
86
87 static char version[] __devinitdata =
88         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
94
95 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98
99 static struct pci_device_id b44_pci_tbl[] = {
100         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106         { }     /* terminate list with empty entry */
107 };
108
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
113
114 #define B44_FULL_RESET          1
115 #define B44_FULL_RESET_SKIP_PHY 2
116 #define B44_PARTIAL_RESET       3
117
118 static void b44_init_hw(struct b44 *, int);
119
120 static int dma_desc_align_mask;
121 static int dma_desc_sync_size;
122
123 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124 #define _B44(x...)      # x,
125 B44_STAT_REG_DECLARE
126 #undef _B44
127 };
128
129 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
130                                                 dma_addr_t dma_base,
131                                                 unsigned long offset,
132                                                 enum dma_data_direction dir)
133 {
134         dma_sync_single_range_for_device(&pdev->dev, dma_base,
135                                          offset & dma_desc_align_mask,
136                                          dma_desc_sync_size, dir);
137 }
138
139 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
140                                              dma_addr_t dma_base,
141                                              unsigned long offset,
142                                              enum dma_data_direction dir)
143 {
144         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
145                                       offset & dma_desc_align_mask,
146                                       dma_desc_sync_size, dir);
147 }
148
149 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
150 {
151         return readl(bp->regs + reg);
152 }
153
154 static inline void bw32(const struct b44 *bp,
155                         unsigned long reg, unsigned long val)
156 {
157         writel(val, bp->regs + reg);
158 }
159
160 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
161                         u32 bit, unsigned long timeout, const int clear)
162 {
163         unsigned long i;
164
165         for (i = 0; i < timeout; i++) {
166                 u32 val = br32(bp, reg);
167
168                 if (clear && !(val & bit))
169                         break;
170                 if (!clear && (val & bit))
171                         break;
172                 udelay(10);
173         }
174         if (i == timeout) {
175                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
176                        "%lx to %s.\n",
177                        bp->dev->name,
178                        bit, reg,
179                        (clear ? "clear" : "set"));
180                 return -ENODEV;
181         }
182         return 0;
183 }
184
185 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
186  * buzz words used on this company's website :-)
187  *
188  * All of these routines must be invoked with bp->lock held and
189  * interrupts disabled.
190  */
191
192 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
193 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
194
195 static u32 ssb_get_core_rev(struct b44 *bp)
196 {
197         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
198 }
199
200 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
201 {
202         u32 bar_orig, pci_rev, val;
203
204         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206         pci_rev = ssb_get_core_rev(bp);
207
208         val = br32(bp, B44_SBINTVEC);
209         val |= cores;
210         bw32(bp, B44_SBINTVEC, val);
211
212         val = br32(bp, SSB_PCI_TRANS_2);
213         val |= SSB_PCI_PREF | SSB_PCI_BURST;
214         bw32(bp, SSB_PCI_TRANS_2, val);
215
216         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
217
218         return pci_rev;
219 }
220
221 static void ssb_core_disable(struct b44 *bp)
222 {
223         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
224                 return;
225
226         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
231         br32(bp, B44_SBTMSLOW);
232         udelay(1);
233         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234         br32(bp, B44_SBTMSLOW);
235         udelay(1);
236 }
237
238 static void ssb_core_reset(struct b44 *bp)
239 {
240         u32 val;
241
242         ssb_core_disable(bp);
243         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
244         br32(bp, B44_SBTMSLOW);
245         udelay(1);
246
247         /* Clear SERR if set, this is a hw bug workaround.  */
248         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249                 bw32(bp, B44_SBTMSHIGH, 0);
250
251         val = br32(bp, B44_SBIMSTATE);
252         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
254
255         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256         br32(bp, B44_SBTMSLOW);
257         udelay(1);
258
259         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260         br32(bp, B44_SBTMSLOW);
261         udelay(1);
262 }
263
264 static int ssb_core_unit(struct b44 *bp)
265 {
266 #if 0
267         u32 val = br32(bp, B44_SBADMATCH0);
268         u32 base;
269
270         type = val & SBADMATCH0_TYPE_MASK;
271         switch (type) {
272         case 0:
273                 base = val & SBADMATCH0_BS0_MASK;
274                 break;
275
276         case 1:
277                 base = val & SBADMATCH0_BS1_MASK;
278                 break;
279
280         case 2:
281         default:
282                 base = val & SBADMATCH0_BS2_MASK;
283                 break;
284         };
285 #endif
286         return 0;
287 }
288
289 static int ssb_is_core_up(struct b44 *bp)
290 {
291         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
292                 == SBTMSLOW_CLOCK);
293 }
294
295 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
296 {
297         u32 val;
298
299         val  = ((u32) data[2]) << 24;
300         val |= ((u32) data[3]) << 16;
301         val |= ((u32) data[4]) <<  8;
302         val |= ((u32) data[5]) <<  0;
303         bw32(bp, B44_CAM_DATA_LO, val);
304         val = (CAM_DATA_HI_VALID |
305                (((u32) data[0]) << 8) |
306                (((u32) data[1]) << 0));
307         bw32(bp, B44_CAM_DATA_HI, val);
308         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
309                             (index << CAM_CTRL_INDEX_SHIFT)));
310         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
311 }
312
313 static inline void __b44_disable_ints(struct b44 *bp)
314 {
315         bw32(bp, B44_IMASK, 0);
316 }
317
318 static void b44_disable_ints(struct b44 *bp)
319 {
320         __b44_disable_ints(bp);
321
322         /* Flush posted writes. */
323         br32(bp, B44_IMASK);
324 }
325
326 static void b44_enable_ints(struct b44 *bp)
327 {
328         bw32(bp, B44_IMASK, bp->imask);
329 }
330
331 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
332 {
333         int err;
334
335         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339                              (reg << MDIO_DATA_RA_SHIFT) |
340                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
343
344         return err;
345 }
346
347 static int b44_writephy(struct b44 *bp, int reg, u32 val)
348 {
349         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
353                              (reg << MDIO_DATA_RA_SHIFT) |
354                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355                              (val & MDIO_DATA_DATA)));
356         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
357 }
358
359 /* miilib interface */
360 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361  * due to code existing before miilib use was added to this driver.
362  * Someone should remove this artificial driver limitation in
363  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
364  */
365 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
366 {
367         u32 val;
368         struct b44 *bp = netdev_priv(dev);
369         int rc = b44_readphy(bp, location, &val);
370         if (rc)
371                 return 0xffffffff;
372         return val;
373 }
374
375 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
376                          int val)
377 {
378         struct b44 *bp = netdev_priv(dev);
379         b44_writephy(bp, location, val);
380 }
381
382 static int b44_phy_reset(struct b44 *bp)
383 {
384         u32 val;
385         int err;
386
387         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
388         if (err)
389                 return err;
390         udelay(100);
391         err = b44_readphy(bp, MII_BMCR, &val);
392         if (!err) {
393                 if (val & BMCR_RESET) {
394                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
395                                bp->dev->name);
396                         err = -ENODEV;
397                 }
398         }
399
400         return 0;
401 }
402
403 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
404 {
405         u32 val;
406
407         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
408         bp->flags |= pause_flags;
409
410         val = br32(bp, B44_RXCONFIG);
411         if (pause_flags & B44_FLAG_RX_PAUSE)
412                 val |= RXCONFIG_FLOW;
413         else
414                 val &= ~RXCONFIG_FLOW;
415         bw32(bp, B44_RXCONFIG, val);
416
417         val = br32(bp, B44_MAC_FLOW);
418         if (pause_flags & B44_FLAG_TX_PAUSE)
419                 val |= (MAC_FLOW_PAUSE_ENAB |
420                         (0xc0 & MAC_FLOW_RX_HI_WATER));
421         else
422                 val &= ~MAC_FLOW_PAUSE_ENAB;
423         bw32(bp, B44_MAC_FLOW, val);
424 }
425
426 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
427 {
428         u32 pause_enab = 0;
429
430         /* The driver supports only rx pause by default because
431            the b44 mac tx pause mechanism generates excessive
432            pause frames.
433            Use ethtool to turn on b44 tx pause if necessary.
434          */
435         if ((local & ADVERTISE_PAUSE_CAP) &&
436             (local & ADVERTISE_PAUSE_ASYM)){
437                 if ((remote & LPA_PAUSE_ASYM) &&
438                     !(remote & LPA_PAUSE_CAP))
439                         pause_enab |= B44_FLAG_RX_PAUSE;
440         }
441
442         __b44_set_flow_ctrl(bp, pause_enab);
443 }
444
445 static int b44_setup_phy(struct b44 *bp)
446 {
447         u32 val;
448         int err;
449
450         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
451                 goto out;
452         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
453                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
454                 goto out;
455         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
456                 goto out;
457         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
458                                 val | MII_TLEDCTRL_ENABLE)) != 0)
459                 goto out;
460
461         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
462                 u32 adv = ADVERTISE_CSMA;
463
464                 if (bp->flags & B44_FLAG_ADV_10HALF)
465                         adv |= ADVERTISE_10HALF;
466                 if (bp->flags & B44_FLAG_ADV_10FULL)
467                         adv |= ADVERTISE_10FULL;
468                 if (bp->flags & B44_FLAG_ADV_100HALF)
469                         adv |= ADVERTISE_100HALF;
470                 if (bp->flags & B44_FLAG_ADV_100FULL)
471                         adv |= ADVERTISE_100FULL;
472
473                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
474                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
475
476                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
477                         goto out;
478                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479                                                        BMCR_ANRESTART))) != 0)
480                         goto out;
481         } else {
482                 u32 bmcr;
483
484                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
485                         goto out;
486                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
487                 if (bp->flags & B44_FLAG_100_BASE_T)
488                         bmcr |= BMCR_SPEED100;
489                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
490                         bmcr |= BMCR_FULLDPLX;
491                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
492                         goto out;
493
494                 /* Since we will not be negotiating there is no safe way
495                  * to determine if the link partner supports flow control
496                  * or not.  So just disable it completely in this case.
497                  */
498                 b44_set_flow_ctrl(bp, 0, 0);
499         }
500
501 out:
502         return err;
503 }
504
505 static void b44_stats_update(struct b44 *bp)
506 {
507         unsigned long reg;
508         u32 *val;
509
510         val = &bp->hw_stats.tx_good_octets;
511         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
512                 *val++ += br32(bp, reg);
513         }
514
515         /* Pad */
516         reg += 8*4UL;
517
518         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
519                 *val++ += br32(bp, reg);
520         }
521 }
522
523 static void b44_link_report(struct b44 *bp)
524 {
525         if (!netif_carrier_ok(bp->dev)) {
526                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
527         } else {
528                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
529                        bp->dev->name,
530                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
531                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
532
533                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
534                        "%s for RX.\n",
535                        bp->dev->name,
536                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
537                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
538         }
539 }
540
541 static void b44_check_phy(struct b44 *bp)
542 {
543         u32 bmsr, aux;
544
545         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
547             (bmsr != 0xffff)) {
548                 if (aux & MII_AUXCTRL_SPEED)
549                         bp->flags |= B44_FLAG_100_BASE_T;
550                 else
551                         bp->flags &= ~B44_FLAG_100_BASE_T;
552                 if (aux & MII_AUXCTRL_DUPLEX)
553                         bp->flags |= B44_FLAG_FULL_DUPLEX;
554                 else
555                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
556
557                 if (!netif_carrier_ok(bp->dev) &&
558                     (bmsr & BMSR_LSTATUS)) {
559                         u32 val = br32(bp, B44_TX_CTRL);
560                         u32 local_adv, remote_adv;
561
562                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
563                                 val |= TX_CTRL_DUPLEX;
564                         else
565                                 val &= ~TX_CTRL_DUPLEX;
566                         bw32(bp, B44_TX_CTRL, val);
567
568                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
569                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
570                             !b44_readphy(bp, MII_LPA, &remote_adv))
571                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
572
573                         /* Link now up */
574                         netif_carrier_on(bp->dev);
575                         b44_link_report(bp);
576                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
577                         /* Link now down */
578                         netif_carrier_off(bp->dev);
579                         b44_link_report(bp);
580                 }
581
582                 if (bmsr & BMSR_RFAULT)
583                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
584                                bp->dev->name);
585                 if (bmsr & BMSR_JCD)
586                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
587                                bp->dev->name);
588         }
589 }
590
591 static void b44_timer(unsigned long __opaque)
592 {
593         struct b44 *bp = (struct b44 *) __opaque;
594
595         spin_lock_irq(&bp->lock);
596
597         b44_check_phy(bp);
598
599         b44_stats_update(bp);
600
601         spin_unlock_irq(&bp->lock);
602
603         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
604 }
605
606 static void b44_tx(struct b44 *bp)
607 {
608         u32 cur, cons;
609
610         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611         cur /= sizeof(struct dma_desc);
612
613         /* XXX needs updating when NETIF_F_SG is supported */
614         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615                 struct ring_info *rp = &bp->tx_buffers[cons];
616                 struct sk_buff *skb = rp->skb;
617
618                 BUG_ON(skb == NULL);
619
620                 pci_unmap_single(bp->pdev,
621                                  pci_unmap_addr(rp, mapping),
622                                  skb->len,
623                                  PCI_DMA_TODEVICE);
624                 rp->skb = NULL;
625                 dev_kfree_skb_irq(skb);
626         }
627
628         bp->tx_cons = cons;
629         if (netif_queue_stopped(bp->dev) &&
630             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631                 netif_wake_queue(bp->dev);
632
633         bw32(bp, B44_GPTIMER, 0);
634 }
635
636 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
637  * before the DMA address you give it.  So we allocate 30 more bytes
638  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639  * point the chip at 30 bytes past where the rx_header will go.
640  */
641 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
642 {
643         struct dma_desc *dp;
644         struct ring_info *src_map, *map;
645         struct rx_header *rh;
646         struct sk_buff *skb;
647         dma_addr_t mapping;
648         int dest_idx;
649         u32 ctrl;
650
651         src_map = NULL;
652         if (src_idx >= 0)
653                 src_map = &bp->rx_buffers[src_idx];
654         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655         map = &bp->rx_buffers[dest_idx];
656         skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
657         if (skb == NULL)
658                 return -ENOMEM;
659
660         mapping = pci_map_single(bp->pdev, skb->data,
661                                  RX_PKT_BUF_SZ,
662                                  PCI_DMA_FROMDEVICE);
663
664         /* Hardware bug work-around, the chip is unable to do PCI DMA
665            to/from anything above 1GB :-( */
666         if (dma_mapping_error(mapping) ||
667                 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
668                 /* Sigh... */
669                 if (!dma_mapping_error(mapping))
670                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
671                 dev_kfree_skb_any(skb);
672                 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
673                 if (skb == NULL)
674                         return -ENOMEM;
675                 mapping = pci_map_single(bp->pdev, skb->data,
676                                          RX_PKT_BUF_SZ,
677                                          PCI_DMA_FROMDEVICE);
678                 if (dma_mapping_error(mapping) ||
679                         mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
680                         if (!dma_mapping_error(mapping))
681                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
682                         dev_kfree_skb_any(skb);
683                         return -ENOMEM;
684                 }
685         }
686
687         rh = (struct rx_header *) skb->data;
688         skb_reserve(skb, RX_PKT_OFFSET);
689
690         rh->len = 0;
691         rh->flags = 0;
692
693         map->skb = skb;
694         pci_unmap_addr_set(map, mapping, mapping);
695
696         if (src_map != NULL)
697                 src_map->skb = NULL;
698
699         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
700         if (dest_idx == (B44_RX_RING_SIZE - 1))
701                 ctrl |= DESC_CTRL_EOT;
702
703         dp = &bp->rx_ring[dest_idx];
704         dp->ctrl = cpu_to_le32(ctrl);
705         dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
706
707         if (bp->flags & B44_FLAG_RX_RING_HACK)
708                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
709                                              dest_idx * sizeof(dp),
710                                              DMA_BIDIRECTIONAL);
711
712         return RX_PKT_BUF_SZ;
713 }
714
715 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
716 {
717         struct dma_desc *src_desc, *dest_desc;
718         struct ring_info *src_map, *dest_map;
719         struct rx_header *rh;
720         int dest_idx;
721         __le32 ctrl;
722
723         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
724         dest_desc = &bp->rx_ring[dest_idx];
725         dest_map = &bp->rx_buffers[dest_idx];
726         src_desc = &bp->rx_ring[src_idx];
727         src_map = &bp->rx_buffers[src_idx];
728
729         dest_map->skb = src_map->skb;
730         rh = (struct rx_header *) src_map->skb->data;
731         rh->len = 0;
732         rh->flags = 0;
733         pci_unmap_addr_set(dest_map, mapping,
734                            pci_unmap_addr(src_map, mapping));
735
736         if (bp->flags & B44_FLAG_RX_RING_HACK)
737                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
738                                           src_idx * sizeof(src_desc),
739                                           DMA_BIDIRECTIONAL);
740
741         ctrl = src_desc->ctrl;
742         if (dest_idx == (B44_RX_RING_SIZE - 1))
743                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
744         else
745                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
746
747         dest_desc->ctrl = ctrl;
748         dest_desc->addr = src_desc->addr;
749
750         src_map->skb = NULL;
751
752         if (bp->flags & B44_FLAG_RX_RING_HACK)
753                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
754                                              dest_idx * sizeof(dest_desc),
755                                              DMA_BIDIRECTIONAL);
756
757         pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
758                                        RX_PKT_BUF_SZ,
759                                        PCI_DMA_FROMDEVICE);
760 }
761
762 static int b44_rx(struct b44 *bp, int budget)
763 {
764         int received;
765         u32 cons, prod;
766
767         received = 0;
768         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
769         prod /= sizeof(struct dma_desc);
770         cons = bp->rx_cons;
771
772         while (cons != prod && budget > 0) {
773                 struct ring_info *rp = &bp->rx_buffers[cons];
774                 struct sk_buff *skb = rp->skb;
775                 dma_addr_t map = pci_unmap_addr(rp, mapping);
776                 struct rx_header *rh;
777                 u16 len;
778
779                 pci_dma_sync_single_for_cpu(bp->pdev, map,
780                                             RX_PKT_BUF_SZ,
781                                             PCI_DMA_FROMDEVICE);
782                 rh = (struct rx_header *) skb->data;
783                 len = le16_to_cpu(rh->len);
784                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
785                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
786                 drop_it:
787                         b44_recycle_rx(bp, cons, bp->rx_prod);
788                 drop_it_no_recycle:
789                         bp->stats.rx_dropped++;
790                         goto next_pkt;
791                 }
792
793                 if (len == 0) {
794                         int i = 0;
795
796                         do {
797                                 udelay(2);
798                                 barrier();
799                                 len = le16_to_cpu(rh->len);
800                         } while (len == 0 && i++ < 5);
801                         if (len == 0)
802                                 goto drop_it;
803                 }
804
805                 /* Omit CRC. */
806                 len -= 4;
807
808                 if (len > RX_COPY_THRESHOLD) {
809                         int skb_size;
810                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
811                         if (skb_size < 0)
812                                 goto drop_it;
813                         pci_unmap_single(bp->pdev, map,
814                                          skb_size, PCI_DMA_FROMDEVICE);
815                         /* Leave out rx_header */
816                         skb_put(skb, len + RX_PKT_OFFSET);
817                         skb_pull(skb, RX_PKT_OFFSET);
818                 } else {
819                         struct sk_buff *copy_skb;
820
821                         b44_recycle_rx(bp, cons, bp->rx_prod);
822                         copy_skb = dev_alloc_skb(len + 2);
823                         if (copy_skb == NULL)
824                                 goto drop_it_no_recycle;
825
826                         skb_reserve(copy_skb, 2);
827                         skb_put(copy_skb, len);
828                         /* DMA sync done above, copy just the actual packet */
829                         skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
830                                                          copy_skb->data, len);
831                         skb = copy_skb;
832                 }
833                 skb->ip_summed = CHECKSUM_NONE;
834                 skb->protocol = eth_type_trans(skb, bp->dev);
835                 netif_receive_skb(skb);
836                 bp->dev->last_rx = jiffies;
837                 received++;
838                 budget--;
839         next_pkt:
840                 bp->rx_prod = (bp->rx_prod + 1) &
841                         (B44_RX_RING_SIZE - 1);
842                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
843         }
844
845         bp->rx_cons = cons;
846         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
847
848         return received;
849 }
850
851 static int b44_poll(struct napi_struct *napi, int budget)
852 {
853         struct b44 *bp = container_of(napi, struct b44, napi);
854         struct net_device *netdev = bp->dev;
855         int work_done;
856
857         spin_lock_irq(&bp->lock);
858
859         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
860                 /* spin_lock(&bp->tx_lock); */
861                 b44_tx(bp);
862                 /* spin_unlock(&bp->tx_lock); */
863         }
864         spin_unlock_irq(&bp->lock);
865
866         work_done = 0;
867         if (bp->istat & ISTAT_RX)
868                 work_done += b44_rx(bp, budget);
869
870         if (bp->istat & ISTAT_ERRORS) {
871                 unsigned long flags;
872
873                 spin_lock_irqsave(&bp->lock, flags);
874                 b44_halt(bp);
875                 b44_init_rings(bp);
876                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
877                 netif_wake_queue(bp->dev);
878                 spin_unlock_irqrestore(&bp->lock, flags);
879                 work_done = 0;
880         }
881
882         if (work_done < budget) {
883                 netif_rx_complete(netdev, napi);
884                 b44_enable_ints(bp);
885         }
886
887         return work_done;
888 }
889
890 static irqreturn_t b44_interrupt(int irq, void *dev_id)
891 {
892         struct net_device *dev = dev_id;
893         struct b44 *bp = netdev_priv(dev);
894         u32 istat, imask;
895         int handled = 0;
896
897         spin_lock(&bp->lock);
898
899         istat = br32(bp, B44_ISTAT);
900         imask = br32(bp, B44_IMASK);
901
902         /* The interrupt mask register controls which interrupt bits
903          * will actually raise an interrupt to the CPU when set by hw/firmware,
904          * but doesn't mask off the bits.
905          */
906         istat &= imask;
907         if (istat) {
908                 handled = 1;
909
910                 if (unlikely(!netif_running(dev))) {
911                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
912                         goto irq_ack;
913                 }
914
915                 if (netif_rx_schedule_prep(dev, &bp->napi)) {
916                         /* NOTE: These writes are posted by the readback of
917                          *       the ISTAT register below.
918                          */
919                         bp->istat = istat;
920                         __b44_disable_ints(bp);
921                         __netif_rx_schedule(dev, &bp->napi);
922                 } else {
923                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
924                                dev->name);
925                 }
926
927 irq_ack:
928                 bw32(bp, B44_ISTAT, istat);
929                 br32(bp, B44_ISTAT);
930         }
931         spin_unlock(&bp->lock);
932         return IRQ_RETVAL(handled);
933 }
934
935 static void b44_tx_timeout(struct net_device *dev)
936 {
937         struct b44 *bp = netdev_priv(dev);
938
939         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
940                dev->name);
941
942         spin_lock_irq(&bp->lock);
943
944         b44_halt(bp);
945         b44_init_rings(bp);
946         b44_init_hw(bp, B44_FULL_RESET);
947
948         spin_unlock_irq(&bp->lock);
949
950         b44_enable_ints(bp);
951
952         netif_wake_queue(dev);
953 }
954
955 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
956 {
957         struct b44 *bp = netdev_priv(dev);
958         int rc = NETDEV_TX_OK;
959         dma_addr_t mapping;
960         u32 len, entry, ctrl;
961
962         len = skb->len;
963         spin_lock_irq(&bp->lock);
964
965         /* This is a hard error, log it. */
966         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
967                 netif_stop_queue(dev);
968                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
969                        dev->name);
970                 goto err_out;
971         }
972
973         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
974         if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
975                 struct sk_buff *bounce_skb;
976
977                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
978                 if (!dma_mapping_error(mapping))
979                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
980
981                 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
982                 if (!bounce_skb)
983                         goto err_out;
984
985                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
986                                          len, PCI_DMA_TODEVICE);
987                 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
988                         if (!dma_mapping_error(mapping))
989                                 pci_unmap_single(bp->pdev, mapping,
990                                                  len, PCI_DMA_TODEVICE);
991                         dev_kfree_skb_any(bounce_skb);
992                         goto err_out;
993                 }
994
995                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
996                 dev_kfree_skb_any(skb);
997                 skb = bounce_skb;
998         }
999
1000         entry = bp->tx_prod;
1001         bp->tx_buffers[entry].skb = skb;
1002         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1003
1004         ctrl  = (len & DESC_CTRL_LEN);
1005         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1006         if (entry == (B44_TX_RING_SIZE - 1))
1007                 ctrl |= DESC_CTRL_EOT;
1008
1009         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1010         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1011
1012         if (bp->flags & B44_FLAG_TX_RING_HACK)
1013                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1014                                              entry * sizeof(bp->tx_ring[0]),
1015                                              DMA_TO_DEVICE);
1016
1017         entry = NEXT_TX(entry);
1018
1019         bp->tx_prod = entry;
1020
1021         wmb();
1022
1023         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1024         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1025                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1026         if (bp->flags & B44_FLAG_REORDER_BUG)
1027                 br32(bp, B44_DMATX_PTR);
1028
1029         if (TX_BUFFS_AVAIL(bp) < 1)
1030                 netif_stop_queue(dev);
1031
1032         dev->trans_start = jiffies;
1033
1034 out_unlock:
1035         spin_unlock_irq(&bp->lock);
1036
1037         return rc;
1038
1039 err_out:
1040         rc = NETDEV_TX_BUSY;
1041         goto out_unlock;
1042 }
1043
1044 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1045 {
1046         struct b44 *bp = netdev_priv(dev);
1047
1048         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1049                 return -EINVAL;
1050
1051         if (!netif_running(dev)) {
1052                 /* We'll just catch it later when the
1053                  * device is up'd.
1054                  */
1055                 dev->mtu = new_mtu;
1056                 return 0;
1057         }
1058
1059         spin_lock_irq(&bp->lock);
1060         b44_halt(bp);
1061         dev->mtu = new_mtu;
1062         b44_init_rings(bp);
1063         b44_init_hw(bp, B44_FULL_RESET);
1064         spin_unlock_irq(&bp->lock);
1065
1066         b44_enable_ints(bp);
1067
1068         return 0;
1069 }
1070
1071 /* Free up pending packets in all rx/tx rings.
1072  *
1073  * The chip has been shut down and the driver detached from
1074  * the networking, so no interrupts or new tx packets will
1075  * end up in the driver.  bp->lock is not held and we are not
1076  * in an interrupt context and thus may sleep.
1077  */
1078 static void b44_free_rings(struct b44 *bp)
1079 {
1080         struct ring_info *rp;
1081         int i;
1082
1083         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1084                 rp = &bp->rx_buffers[i];
1085
1086                 if (rp->skb == NULL)
1087                         continue;
1088                 pci_unmap_single(bp->pdev,
1089                                  pci_unmap_addr(rp, mapping),
1090                                  RX_PKT_BUF_SZ,
1091                                  PCI_DMA_FROMDEVICE);
1092                 dev_kfree_skb_any(rp->skb);
1093                 rp->skb = NULL;
1094         }
1095
1096         /* XXX needs changes once NETIF_F_SG is set... */
1097         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1098                 rp = &bp->tx_buffers[i];
1099
1100                 if (rp->skb == NULL)
1101                         continue;
1102                 pci_unmap_single(bp->pdev,
1103                                  pci_unmap_addr(rp, mapping),
1104                                  rp->skb->len,
1105                                  PCI_DMA_TODEVICE);
1106                 dev_kfree_skb_any(rp->skb);
1107                 rp->skb = NULL;
1108         }
1109 }
1110
1111 /* Initialize tx/rx rings for packet processing.
1112  *
1113  * The chip has been shut down and the driver detached from
1114  * the networking, so no interrupts or new tx packets will
1115  * end up in the driver.
1116  */
1117 static void b44_init_rings(struct b44 *bp)
1118 {
1119         int i;
1120
1121         b44_free_rings(bp);
1122
1123         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1124         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1125
1126         if (bp->flags & B44_FLAG_RX_RING_HACK)
1127                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1128                                            DMA_TABLE_BYTES,
1129                                            PCI_DMA_BIDIRECTIONAL);
1130
1131         if (bp->flags & B44_FLAG_TX_RING_HACK)
1132                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1133                                            DMA_TABLE_BYTES,
1134                                            PCI_DMA_TODEVICE);
1135
1136         for (i = 0; i < bp->rx_pending; i++) {
1137                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1138                         break;
1139         }
1140 }
1141
1142 /*
1143  * Must not be invoked with interrupt sources disabled and
1144  * the hardware shutdown down.
1145  */
1146 static void b44_free_consistent(struct b44 *bp)
1147 {
1148         kfree(bp->rx_buffers);
1149         bp->rx_buffers = NULL;
1150         kfree(bp->tx_buffers);
1151         bp->tx_buffers = NULL;
1152         if (bp->rx_ring) {
1153                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1154                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1155                                          DMA_TABLE_BYTES,
1156                                          DMA_BIDIRECTIONAL);
1157                         kfree(bp->rx_ring);
1158                 } else
1159                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1160                                             bp->rx_ring, bp->rx_ring_dma);
1161                 bp->rx_ring = NULL;
1162                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1163         }
1164         if (bp->tx_ring) {
1165                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1166                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1167                                          DMA_TABLE_BYTES,
1168                                          DMA_TO_DEVICE);
1169                         kfree(bp->tx_ring);
1170                 } else
1171                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1172                                             bp->tx_ring, bp->tx_ring_dma);
1173                 bp->tx_ring = NULL;
1174                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1175         }
1176 }
1177
1178 /*
1179  * Must not be invoked with interrupt sources disabled and
1180  * the hardware shutdown down.  Can sleep.
1181  */
1182 static int b44_alloc_consistent(struct b44 *bp)
1183 {
1184         int size;
1185
1186         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1187         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1188         if (!bp->rx_buffers)
1189                 goto out_err;
1190
1191         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1192         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1193         if (!bp->tx_buffers)
1194                 goto out_err;
1195
1196         size = DMA_TABLE_BYTES;
1197         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1198         if (!bp->rx_ring) {
1199                 /* Allocation may have failed due to pci_alloc_consistent
1200                    insisting on use of GFP_DMA, which is more restrictive
1201                    than necessary...  */
1202                 struct dma_desc *rx_ring;
1203                 dma_addr_t rx_ring_dma;
1204
1205                 rx_ring = kzalloc(size, GFP_KERNEL);
1206                 if (!rx_ring)
1207                         goto out_err;
1208
1209                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1210                                              DMA_TABLE_BYTES,
1211                                              DMA_BIDIRECTIONAL);
1212
1213                 if (dma_mapping_error(rx_ring_dma) ||
1214                         rx_ring_dma + size > DMA_30BIT_MASK) {
1215                         kfree(rx_ring);
1216                         goto out_err;
1217                 }
1218
1219                 bp->rx_ring = rx_ring;
1220                 bp->rx_ring_dma = rx_ring_dma;
1221                 bp->flags |= B44_FLAG_RX_RING_HACK;
1222         }
1223
1224         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1225         if (!bp->tx_ring) {
1226                 /* Allocation may have failed due to pci_alloc_consistent
1227                    insisting on use of GFP_DMA, which is more restrictive
1228                    than necessary...  */
1229                 struct dma_desc *tx_ring;
1230                 dma_addr_t tx_ring_dma;
1231
1232                 tx_ring = kzalloc(size, GFP_KERNEL);
1233                 if (!tx_ring)
1234                         goto out_err;
1235
1236                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1237                                              DMA_TABLE_BYTES,
1238                                              DMA_TO_DEVICE);
1239
1240                 if (dma_mapping_error(tx_ring_dma) ||
1241                         tx_ring_dma + size > DMA_30BIT_MASK) {
1242                         kfree(tx_ring);
1243                         goto out_err;
1244                 }
1245
1246                 bp->tx_ring = tx_ring;
1247                 bp->tx_ring_dma = tx_ring_dma;
1248                 bp->flags |= B44_FLAG_TX_RING_HACK;
1249         }
1250
1251         return 0;
1252
1253 out_err:
1254         b44_free_consistent(bp);
1255         return -ENOMEM;
1256 }
1257
1258 /* bp->lock is held. */
1259 static void b44_clear_stats(struct b44 *bp)
1260 {
1261         unsigned long reg;
1262
1263         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1264         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1265                 br32(bp, reg);
1266         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1267                 br32(bp, reg);
1268 }
1269
1270 /* bp->lock is held. */
1271 static void b44_chip_reset(struct b44 *bp)
1272 {
1273         if (ssb_is_core_up(bp)) {
1274                 bw32(bp, B44_RCV_LAZY, 0);
1275                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1277                 bw32(bp, B44_DMATX_CTRL, 0);
1278                 bp->tx_prod = bp->tx_cons = 0;
1279                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1281                                      100, 0);
1282                 }
1283                 bw32(bp, B44_DMARX_CTRL, 0);
1284                 bp->rx_prod = bp->rx_cons = 0;
1285         } else {
1286                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1287                                    SBINTVEC_ENET0 :
1288                                    SBINTVEC_ENET1));
1289         }
1290
1291         ssb_core_reset(bp);
1292
1293         b44_clear_stats(bp);
1294
1295         /* Make PHY accessible. */
1296         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1298         br32(bp, B44_MDIO_CTRL);
1299
1300         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1301                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1302                 br32(bp, B44_ENET_CTRL);
1303                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1304         } else {
1305                 u32 val = br32(bp, B44_DEVCTRL);
1306
1307                 if (val & DEVCTRL_EPR) {
1308                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1309                         br32(bp, B44_DEVCTRL);
1310                         udelay(100);
1311                 }
1312                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1313         }
1314 }
1315
1316 /* bp->lock is held. */
1317 static void b44_halt(struct b44 *bp)
1318 {
1319         b44_disable_ints(bp);
1320         b44_chip_reset(bp);
1321 }
1322
1323 /* bp->lock is held. */
1324 static void __b44_set_mac_addr(struct b44 *bp)
1325 {
1326         bw32(bp, B44_CAM_CTRL, 0);
1327         if (!(bp->dev->flags & IFF_PROMISC)) {
1328                 u32 val;
1329
1330                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1331                 val = br32(bp, B44_CAM_CTRL);
1332                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1333         }
1334 }
1335
1336 static int b44_set_mac_addr(struct net_device *dev, void *p)
1337 {
1338         struct b44 *bp = netdev_priv(dev);
1339         struct sockaddr *addr = p;
1340
1341         if (netif_running(dev))
1342                 return -EBUSY;
1343
1344         if (!is_valid_ether_addr(addr->sa_data))
1345                 return -EINVAL;
1346
1347         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1348
1349         spin_lock_irq(&bp->lock);
1350         __b44_set_mac_addr(bp);
1351         spin_unlock_irq(&bp->lock);
1352
1353         return 0;
1354 }
1355
1356 /* Called at device open time to get the chip ready for
1357  * packet processing.  Invoked with bp->lock held.
1358  */
1359 static void __b44_set_rx_mode(struct net_device *);
1360 static void b44_init_hw(struct b44 *bp, int reset_kind)
1361 {
1362         u32 val;
1363
1364         b44_chip_reset(bp);
1365         if (reset_kind == B44_FULL_RESET) {
1366                 b44_phy_reset(bp);
1367                 b44_setup_phy(bp);
1368         }
1369
1370         /* Enable CRC32, set proper LED modes and power on PHY */
1371         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1372         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1373
1374         /* This sets the MAC address too.  */
1375         __b44_set_rx_mode(bp->dev);
1376
1377         /* MTU + eth header + possible VLAN tag + struct rx_header */
1378         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1379         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1380
1381         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1382         if (reset_kind == B44_PARTIAL_RESET) {
1383                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1384                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1385         } else {
1386                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1387                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1388                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1389                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1390                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1391
1392                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1393                 bp->rx_prod = bp->rx_pending;
1394
1395                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1396         }
1397
1398         val = br32(bp, B44_ENET_CTRL);
1399         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1400 }
1401
1402 static int b44_open(struct net_device *dev)
1403 {
1404         struct b44 *bp = netdev_priv(dev);
1405         int err;
1406
1407         err = b44_alloc_consistent(bp);
1408         if (err)
1409                 goto out;
1410
1411         napi_enable(&bp->napi);
1412
1413         b44_init_rings(bp);
1414         b44_init_hw(bp, B44_FULL_RESET);
1415
1416         b44_check_phy(bp);
1417
1418         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1419         if (unlikely(err < 0)) {
1420                 napi_disable(&bp->napi);
1421                 b44_chip_reset(bp);
1422                 b44_free_rings(bp);
1423                 b44_free_consistent(bp);
1424                 goto out;
1425         }
1426
1427         init_timer(&bp->timer);
1428         bp->timer.expires = jiffies + HZ;
1429         bp->timer.data = (unsigned long) bp;
1430         bp->timer.function = b44_timer;
1431         add_timer(&bp->timer);
1432
1433         b44_enable_ints(bp);
1434         netif_start_queue(dev);
1435 out:
1436         return err;
1437 }
1438
1439 #if 0
1440 /*static*/ void b44_dump_state(struct b44 *bp)
1441 {
1442         u32 val32, val32_2, val32_3, val32_4, val32_5;
1443         u16 val16;
1444
1445         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1446         printk("DEBUG: PCI status [%04x] \n", val16);
1447
1448 }
1449 #endif
1450
1451 #ifdef CONFIG_NET_POLL_CONTROLLER
1452 /*
1453  * Polling receive - used by netconsole and other diagnostic tools
1454  * to allow network i/o with interrupts disabled.
1455  */
1456 static void b44_poll_controller(struct net_device *dev)
1457 {
1458         disable_irq(dev->irq);
1459         b44_interrupt(dev->irq, dev);
1460         enable_irq(dev->irq);
1461 }
1462 #endif
1463
1464 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1465 {
1466         u32 i;
1467         u32 *pattern = (u32 *) pp;
1468
1469         for (i = 0; i < bytes; i += sizeof(u32)) {
1470                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1471                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1472         }
1473 }
1474
1475 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1476 {
1477         int magicsync = 6;
1478         int k, j, len = offset;
1479         int ethaddr_bytes = ETH_ALEN;
1480
1481         memset(ppattern + offset, 0xff, magicsync);
1482         for (j = 0; j < magicsync; j++)
1483                 set_bit(len++, (unsigned long *) pmask);
1484
1485         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1486                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1487                         ethaddr_bytes = ETH_ALEN;
1488                 else
1489                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1490                 if (ethaddr_bytes <=0)
1491                         break;
1492                 for (k = 0; k< ethaddr_bytes; k++) {
1493                         ppattern[offset + magicsync +
1494                                 (j * ETH_ALEN) + k] = macaddr[k];
1495                         len++;
1496                         set_bit(len, (unsigned long *) pmask);
1497                 }
1498         }
1499         return len - 1;
1500 }
1501
1502 /* Setup magic packet patterns in the b44 WOL
1503  * pattern matching filter.
1504  */
1505 static void b44_setup_pseudo_magicp(struct b44 *bp)
1506 {
1507
1508         u32 val;
1509         int plen0, plen1, plen2;
1510         u8 *pwol_pattern;
1511         u8 pwol_mask[B44_PMASK_SIZE];
1512
1513         pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1514         if (!pwol_pattern) {
1515                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1516                 return;
1517         }
1518
1519         /* Ipv4 magic packet pattern - pattern 0.*/
1520         memset(pwol_mask, 0, B44_PMASK_SIZE);
1521         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1522                                   B44_ETHIPV4UDP_HLEN);
1523
1524         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1525         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1526
1527         /* Raw ethernet II magic packet pattern - pattern 1 */
1528         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1529         memset(pwol_mask, 0, B44_PMASK_SIZE);
1530         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1531                                   ETH_HLEN);
1532
1533         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1534                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1535         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1536                        B44_PMASK_BASE + B44_PMASK_SIZE);
1537
1538         /* Ipv6 magic packet pattern - pattern 2 */
1539         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1540         memset(pwol_mask, 0, B44_PMASK_SIZE);
1541         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1542                                   B44_ETHIPV6UDP_HLEN);
1543
1544         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1545                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1546         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1547                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1548
1549         kfree(pwol_pattern);
1550
1551         /* set these pattern's lengths: one less than each real length */
1552         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1553         bw32(bp, B44_WKUP_LEN, val);
1554
1555         /* enable wakeup pattern matching */
1556         val = br32(bp, B44_DEVCTRL);
1557         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1558
1559 }
1560
1561 static void b44_setup_wol(struct b44 *bp)
1562 {
1563         u32 val;
1564         u16 pmval;
1565
1566         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1567
1568         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1569
1570                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1571
1572                 val = bp->dev->dev_addr[2] << 24 |
1573                         bp->dev->dev_addr[3] << 16 |
1574                         bp->dev->dev_addr[4] << 8 |
1575                         bp->dev->dev_addr[5];
1576                 bw32(bp, B44_ADDR_LO, val);
1577
1578                 val = bp->dev->dev_addr[0] << 8 |
1579                         bp->dev->dev_addr[1];
1580                 bw32(bp, B44_ADDR_HI, val);
1581
1582                 val = br32(bp, B44_DEVCTRL);
1583                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1584
1585         } else {
1586                 b44_setup_pseudo_magicp(bp);
1587         }
1588
1589         val = br32(bp, B44_SBTMSLOW);
1590         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1591
1592         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1593         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1594
1595 }
1596
1597 static int b44_close(struct net_device *dev)
1598 {
1599         struct b44 *bp = netdev_priv(dev);
1600
1601         netif_stop_queue(dev);
1602
1603         napi_disable(&bp->napi);
1604
1605         del_timer_sync(&bp->timer);
1606
1607         spin_lock_irq(&bp->lock);
1608
1609 #if 0
1610         b44_dump_state(bp);
1611 #endif
1612         b44_halt(bp);
1613         b44_free_rings(bp);
1614         netif_carrier_off(dev);
1615
1616         spin_unlock_irq(&bp->lock);
1617
1618         free_irq(dev->irq, dev);
1619
1620         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1621                 b44_init_hw(bp, B44_PARTIAL_RESET);
1622                 b44_setup_wol(bp);
1623         }
1624
1625         b44_free_consistent(bp);
1626
1627         return 0;
1628 }
1629
1630 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1631 {
1632         struct b44 *bp = netdev_priv(dev);
1633         struct net_device_stats *nstat = &bp->stats;
1634         struct b44_hw_stats *hwstat = &bp->hw_stats;
1635
1636         /* Convert HW stats into netdevice stats. */
1637         nstat->rx_packets = hwstat->rx_pkts;
1638         nstat->tx_packets = hwstat->tx_pkts;
1639         nstat->rx_bytes   = hwstat->rx_octets;
1640         nstat->tx_bytes   = hwstat->tx_octets;
1641         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1642                              hwstat->tx_oversize_pkts +
1643                              hwstat->tx_underruns +
1644                              hwstat->tx_excessive_cols +
1645                              hwstat->tx_late_cols);
1646         nstat->multicast  = hwstat->tx_multicast_pkts;
1647         nstat->collisions = hwstat->tx_total_cols;
1648
1649         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1650                                    hwstat->rx_undersize);
1651         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1652         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1653         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1654         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1655                                    hwstat->rx_oversize_pkts +
1656                                    hwstat->rx_missed_pkts +
1657                                    hwstat->rx_crc_align_errs +
1658                                    hwstat->rx_undersize +
1659                                    hwstat->rx_crc_errs +
1660                                    hwstat->rx_align_errs +
1661                                    hwstat->rx_symbol_errs);
1662
1663         nstat->tx_aborted_errors = hwstat->tx_underruns;
1664 #if 0
1665         /* Carrier lost counter seems to be broken for some devices */
1666         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1667 #endif
1668
1669         return nstat;
1670 }
1671
1672 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1673 {
1674         struct dev_mc_list *mclist;
1675         int i, num_ents;
1676
1677         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1678         mclist = dev->mc_list;
1679         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1680                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1681         }
1682         return i+1;
1683 }
1684
1685 static void __b44_set_rx_mode(struct net_device *dev)
1686 {
1687         struct b44 *bp = netdev_priv(dev);
1688         u32 val;
1689
1690         val = br32(bp, B44_RXCONFIG);
1691         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1692         if (dev->flags & IFF_PROMISC) {
1693                 val |= RXCONFIG_PROMISC;
1694                 bw32(bp, B44_RXCONFIG, val);
1695         } else {
1696                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1697                 int i = 1;
1698
1699                 __b44_set_mac_addr(bp);
1700
1701                 if ((dev->flags & IFF_ALLMULTI) ||
1702                     (dev->mc_count > B44_MCAST_TABLE_SIZE))
1703                         val |= RXCONFIG_ALLMULTI;
1704                 else
1705                         i = __b44_load_mcast(bp, dev);
1706
1707                 for (; i < 64; i++)
1708                         __b44_cam_write(bp, zero, i);
1709
1710                 bw32(bp, B44_RXCONFIG, val);
1711                 val = br32(bp, B44_CAM_CTRL);
1712                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1713         }
1714 }
1715
1716 static void b44_set_rx_mode(struct net_device *dev)
1717 {
1718         struct b44 *bp = netdev_priv(dev);
1719
1720         spin_lock_irq(&bp->lock);
1721         __b44_set_rx_mode(dev);
1722         spin_unlock_irq(&bp->lock);
1723 }
1724
1725 static u32 b44_get_msglevel(struct net_device *dev)
1726 {
1727         struct b44 *bp = netdev_priv(dev);
1728         return bp->msg_enable;
1729 }
1730
1731 static void b44_set_msglevel(struct net_device *dev, u32 value)
1732 {
1733         struct b44 *bp = netdev_priv(dev);
1734         bp->msg_enable = value;
1735 }
1736
1737 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1738 {
1739         struct b44 *bp = netdev_priv(dev);
1740         struct pci_dev *pci_dev = bp->pdev;
1741
1742         strcpy (info->driver, DRV_MODULE_NAME);
1743         strcpy (info->version, DRV_MODULE_VERSION);
1744         strcpy (info->bus_info, pci_name(pci_dev));
1745 }
1746
1747 static int b44_nway_reset(struct net_device *dev)
1748 {
1749         struct b44 *bp = netdev_priv(dev);
1750         u32 bmcr;
1751         int r;
1752
1753         spin_lock_irq(&bp->lock);
1754         b44_readphy(bp, MII_BMCR, &bmcr);
1755         b44_readphy(bp, MII_BMCR, &bmcr);
1756         r = -EINVAL;
1757         if (bmcr & BMCR_ANENABLE) {
1758                 b44_writephy(bp, MII_BMCR,
1759                              bmcr | BMCR_ANRESTART);
1760                 r = 0;
1761         }
1762         spin_unlock_irq(&bp->lock);
1763
1764         return r;
1765 }
1766
1767 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1768 {
1769         struct b44 *bp = netdev_priv(dev);
1770
1771         cmd->supported = (SUPPORTED_Autoneg);
1772         cmd->supported |= (SUPPORTED_100baseT_Half |
1773                           SUPPORTED_100baseT_Full |
1774                           SUPPORTED_10baseT_Half |
1775                           SUPPORTED_10baseT_Full |
1776                           SUPPORTED_MII);
1777
1778         cmd->advertising = 0;
1779         if (bp->flags & B44_FLAG_ADV_10HALF)
1780                 cmd->advertising |= ADVERTISED_10baseT_Half;
1781         if (bp->flags & B44_FLAG_ADV_10FULL)
1782                 cmd->advertising |= ADVERTISED_10baseT_Full;
1783         if (bp->flags & B44_FLAG_ADV_100HALF)
1784                 cmd->advertising |= ADVERTISED_100baseT_Half;
1785         if (bp->flags & B44_FLAG_ADV_100FULL)
1786                 cmd->advertising |= ADVERTISED_100baseT_Full;
1787         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1788         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1789                 SPEED_100 : SPEED_10;
1790         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1791                 DUPLEX_FULL : DUPLEX_HALF;
1792         cmd->port = 0;
1793         cmd->phy_address = bp->phy_addr;
1794         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1795                 XCVR_INTERNAL : XCVR_EXTERNAL;
1796         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1797                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1798         if (cmd->autoneg == AUTONEG_ENABLE)
1799                 cmd->advertising |= ADVERTISED_Autoneg;
1800         if (!netif_running(dev)){
1801                 cmd->speed = 0;
1802                 cmd->duplex = 0xff;
1803         }
1804         cmd->maxtxpkt = 0;
1805         cmd->maxrxpkt = 0;
1806         return 0;
1807 }
1808
1809 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1810 {
1811         struct b44 *bp = netdev_priv(dev);
1812
1813         /* We do not support gigabit. */
1814         if (cmd->autoneg == AUTONEG_ENABLE) {
1815                 if (cmd->advertising &
1816                     (ADVERTISED_1000baseT_Half |
1817                      ADVERTISED_1000baseT_Full))
1818                         return -EINVAL;
1819         } else if ((cmd->speed != SPEED_100 &&
1820                     cmd->speed != SPEED_10) ||
1821                    (cmd->duplex != DUPLEX_HALF &&
1822                     cmd->duplex != DUPLEX_FULL)) {
1823                         return -EINVAL;
1824         }
1825
1826         spin_lock_irq(&bp->lock);
1827
1828         if (cmd->autoneg == AUTONEG_ENABLE) {
1829                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1830                                B44_FLAG_100_BASE_T |
1831                                B44_FLAG_FULL_DUPLEX |
1832                                B44_FLAG_ADV_10HALF |
1833                                B44_FLAG_ADV_10FULL |
1834                                B44_FLAG_ADV_100HALF |
1835                                B44_FLAG_ADV_100FULL);
1836                 if (cmd->advertising == 0) {
1837                         bp->flags |= (B44_FLAG_ADV_10HALF |
1838                                       B44_FLAG_ADV_10FULL |
1839                                       B44_FLAG_ADV_100HALF |
1840                                       B44_FLAG_ADV_100FULL);
1841                 } else {
1842                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1843                                 bp->flags |= B44_FLAG_ADV_10HALF;
1844                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1845                                 bp->flags |= B44_FLAG_ADV_10FULL;
1846                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1847                                 bp->flags |= B44_FLAG_ADV_100HALF;
1848                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1849                                 bp->flags |= B44_FLAG_ADV_100FULL;
1850                 }
1851         } else {
1852                 bp->flags |= B44_FLAG_FORCE_LINK;
1853                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1854                 if (cmd->speed == SPEED_100)
1855                         bp->flags |= B44_FLAG_100_BASE_T;
1856                 if (cmd->duplex == DUPLEX_FULL)
1857                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1858         }
1859
1860         if (netif_running(dev))
1861                 b44_setup_phy(bp);
1862
1863         spin_unlock_irq(&bp->lock);
1864
1865         return 0;
1866 }
1867
1868 static void b44_get_ringparam(struct net_device *dev,
1869                               struct ethtool_ringparam *ering)
1870 {
1871         struct b44 *bp = netdev_priv(dev);
1872
1873         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1874         ering->rx_pending = bp->rx_pending;
1875
1876         /* XXX ethtool lacks a tx_max_pending, oops... */
1877 }
1878
1879 static int b44_set_ringparam(struct net_device *dev,
1880                              struct ethtool_ringparam *ering)
1881 {
1882         struct b44 *bp = netdev_priv(dev);
1883
1884         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1885             (ering->rx_mini_pending != 0) ||
1886             (ering->rx_jumbo_pending != 0) ||
1887             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1888                 return -EINVAL;
1889
1890         spin_lock_irq(&bp->lock);
1891
1892         bp->rx_pending = ering->rx_pending;
1893         bp->tx_pending = ering->tx_pending;
1894
1895         b44_halt(bp);
1896         b44_init_rings(bp);
1897         b44_init_hw(bp, B44_FULL_RESET);
1898         netif_wake_queue(bp->dev);
1899         spin_unlock_irq(&bp->lock);
1900
1901         b44_enable_ints(bp);
1902
1903         return 0;
1904 }
1905
1906 static void b44_get_pauseparam(struct net_device *dev,
1907                                 struct ethtool_pauseparam *epause)
1908 {
1909         struct b44 *bp = netdev_priv(dev);
1910
1911         epause->autoneg =
1912                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1913         epause->rx_pause =
1914                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1915         epause->tx_pause =
1916                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1917 }
1918
1919 static int b44_set_pauseparam(struct net_device *dev,
1920                                 struct ethtool_pauseparam *epause)
1921 {
1922         struct b44 *bp = netdev_priv(dev);
1923
1924         spin_lock_irq(&bp->lock);
1925         if (epause->autoneg)
1926                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1927         else
1928                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1929         if (epause->rx_pause)
1930                 bp->flags |= B44_FLAG_RX_PAUSE;
1931         else
1932                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1933         if (epause->tx_pause)
1934                 bp->flags |= B44_FLAG_TX_PAUSE;
1935         else
1936                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1937         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1938                 b44_halt(bp);
1939                 b44_init_rings(bp);
1940                 b44_init_hw(bp, B44_FULL_RESET);
1941         } else {
1942                 __b44_set_flow_ctrl(bp, bp->flags);
1943         }
1944         spin_unlock_irq(&bp->lock);
1945
1946         b44_enable_ints(bp);
1947
1948         return 0;
1949 }
1950
1951 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1952 {
1953         switch(stringset) {
1954         case ETH_SS_STATS:
1955                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1956                 break;
1957         }
1958 }
1959
1960 static int b44_get_stats_count(struct net_device *dev)
1961 {
1962         return ARRAY_SIZE(b44_gstrings);
1963 }
1964
1965 static void b44_get_ethtool_stats(struct net_device *dev,
1966                                   struct ethtool_stats *stats, u64 *data)
1967 {
1968         struct b44 *bp = netdev_priv(dev);
1969         u32 *val = &bp->hw_stats.tx_good_octets;
1970         u32 i;
1971
1972         spin_lock_irq(&bp->lock);
1973
1974         b44_stats_update(bp);
1975
1976         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1977                 *data++ = *val++;
1978
1979         spin_unlock_irq(&bp->lock);
1980 }
1981
1982 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1983 {
1984         struct b44 *bp = netdev_priv(dev);
1985
1986         wol->supported = WAKE_MAGIC;
1987         if (bp->flags & B44_FLAG_WOL_ENABLE)
1988                 wol->wolopts = WAKE_MAGIC;
1989         else
1990                 wol->wolopts = 0;
1991         memset(&wol->sopass, 0, sizeof(wol->sopass));
1992 }
1993
1994 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1995 {
1996         struct b44 *bp = netdev_priv(dev);
1997
1998         spin_lock_irq(&bp->lock);
1999         if (wol->wolopts & WAKE_MAGIC)
2000                 bp->flags |= B44_FLAG_WOL_ENABLE;
2001         else
2002                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2003         spin_unlock_irq(&bp->lock);
2004
2005         return 0;
2006 }
2007
2008 static const struct ethtool_ops b44_ethtool_ops = {
2009         .get_drvinfo            = b44_get_drvinfo,
2010         .get_settings           = b44_get_settings,
2011         .set_settings           = b44_set_settings,
2012         .nway_reset             = b44_nway_reset,
2013         .get_link               = ethtool_op_get_link,
2014         .get_wol                = b44_get_wol,
2015         .set_wol                = b44_set_wol,
2016         .get_ringparam          = b44_get_ringparam,
2017         .set_ringparam          = b44_set_ringparam,
2018         .get_pauseparam         = b44_get_pauseparam,
2019         .set_pauseparam         = b44_set_pauseparam,
2020         .get_msglevel           = b44_get_msglevel,
2021         .set_msglevel           = b44_set_msglevel,
2022         .get_strings            = b44_get_strings,
2023         .get_stats_count        = b44_get_stats_count,
2024         .get_ethtool_stats      = b44_get_ethtool_stats,
2025 };
2026
2027 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2028 {
2029         struct mii_ioctl_data *data = if_mii(ifr);
2030         struct b44 *bp = netdev_priv(dev);
2031         int err = -EINVAL;
2032
2033         if (!netif_running(dev))
2034                 goto out;
2035
2036         spin_lock_irq(&bp->lock);
2037         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2038         spin_unlock_irq(&bp->lock);
2039 out:
2040         return err;
2041 }
2042
2043 /* Read 128-bytes of EEPROM. */
2044 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2045 {
2046         long i;
2047         __le16 *ptr = (__le16 *) data;
2048
2049         for (i = 0; i < 128; i += 2)
2050                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2051
2052         return 0;
2053 }
2054
2055 static int __devinit b44_get_invariants(struct b44 *bp)
2056 {
2057         u8 eeprom[128];
2058         int err;
2059
2060         err = b44_read_eeprom(bp, &eeprom[0]);
2061         if (err)
2062                 goto out;
2063
2064         bp->dev->dev_addr[0] = eeprom[79];
2065         bp->dev->dev_addr[1] = eeprom[78];
2066         bp->dev->dev_addr[2] = eeprom[81];
2067         bp->dev->dev_addr[3] = eeprom[80];
2068         bp->dev->dev_addr[4] = eeprom[83];
2069         bp->dev->dev_addr[5] = eeprom[82];
2070
2071         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2072                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2073                 return -EINVAL;
2074         }
2075
2076         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2077
2078         bp->phy_addr = eeprom[90] & 0x1f;
2079
2080         bp->imask = IMASK_DEF;
2081
2082         bp->core_unit = ssb_core_unit(bp);
2083         bp->dma_offset = SB_PCI_DMA;
2084
2085         /* XXX - really required?
2086            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2087          */
2088
2089         if (ssb_get_core_rev(bp) >= 7)
2090                 bp->flags |= B44_FLAG_B0_ANDLATER;
2091
2092 out:
2093         return err;
2094 }
2095
2096 static int __devinit b44_init_one(struct pci_dev *pdev,
2097                                   const struct pci_device_id *ent)
2098 {
2099         static int b44_version_printed = 0;
2100         unsigned long b44reg_base, b44reg_len;
2101         struct net_device *dev;
2102         struct b44 *bp;
2103         int err, i;
2104
2105         if (b44_version_printed++ == 0)
2106                 printk(KERN_INFO "%s", version);
2107
2108         err = pci_enable_device(pdev);
2109         if (err) {
2110                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2111                        "aborting.\n");
2112                 return err;
2113         }
2114
2115         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2116                 dev_err(&pdev->dev,
2117                         "Cannot find proper PCI device "
2118                        "base address, aborting.\n");
2119                 err = -ENODEV;
2120                 goto err_out_disable_pdev;
2121         }
2122
2123         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2124         if (err) {
2125                 dev_err(&pdev->dev,
2126                         "Cannot obtain PCI resources, aborting.\n");
2127                 goto err_out_disable_pdev;
2128         }
2129
2130         pci_set_master(pdev);
2131
2132         err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2133         if (err) {
2134                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2135                 goto err_out_free_res;
2136         }
2137
2138         err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2139         if (err) {
2140                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2141                 goto err_out_free_res;
2142         }
2143
2144         b44reg_base = pci_resource_start(pdev, 0);
2145         b44reg_len = pci_resource_len(pdev, 0);
2146
2147         dev = alloc_etherdev(sizeof(*bp));
2148         if (!dev) {
2149                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2150                 err = -ENOMEM;
2151                 goto err_out_free_res;
2152         }
2153
2154         SET_NETDEV_DEV(dev,&pdev->dev);
2155
2156         /* No interesting netdevice features in this card... */
2157         dev->features |= 0;
2158
2159         bp = netdev_priv(dev);
2160         bp->pdev = pdev;
2161         bp->dev = dev;
2162
2163         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2164
2165         spin_lock_init(&bp->lock);
2166
2167         bp->regs = ioremap(b44reg_base, b44reg_len);
2168         if (bp->regs == 0UL) {
2169                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2170                 err = -ENOMEM;
2171                 goto err_out_free_dev;
2172         }
2173
2174         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2175         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2176
2177         dev->open = b44_open;
2178         dev->stop = b44_close;
2179         dev->hard_start_xmit = b44_start_xmit;
2180         dev->get_stats = b44_get_stats;
2181         dev->set_multicast_list = b44_set_rx_mode;
2182         dev->set_mac_address = b44_set_mac_addr;
2183         dev->do_ioctl = b44_ioctl;
2184         dev->tx_timeout = b44_tx_timeout;
2185         netif_napi_add(dev, &bp->napi, b44_poll, 64);
2186         dev->watchdog_timeo = B44_TX_TIMEOUT;
2187 #ifdef CONFIG_NET_POLL_CONTROLLER
2188         dev->poll_controller = b44_poll_controller;
2189 #endif
2190         dev->change_mtu = b44_change_mtu;
2191         dev->irq = pdev->irq;
2192         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2193
2194         netif_carrier_off(dev);
2195
2196         err = b44_get_invariants(bp);
2197         if (err) {
2198                 dev_err(&pdev->dev,
2199                         "Problem fetching invariants of chip, aborting.\n");
2200                 goto err_out_iounmap;
2201         }
2202
2203         bp->mii_if.dev = dev;
2204         bp->mii_if.mdio_read = b44_mii_read;
2205         bp->mii_if.mdio_write = b44_mii_write;
2206         bp->mii_if.phy_id = bp->phy_addr;
2207         bp->mii_if.phy_id_mask = 0x1f;
2208         bp->mii_if.reg_num_mask = 0x1f;
2209
2210         /* By default, advertise all speed/duplex settings. */
2211         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2212                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2213
2214         /* By default, auto-negotiate PAUSE. */
2215         bp->flags |= B44_FLAG_PAUSE_AUTO;
2216
2217         err = register_netdev(dev);
2218         if (err) {
2219                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2220                 goto err_out_iounmap;
2221         }
2222
2223         pci_set_drvdata(pdev, dev);
2224
2225         pci_save_state(bp->pdev);
2226
2227         /* Chip reset provides power to the b44 MAC & PCI cores, which
2228          * is necessary for MAC register access.
2229          */
2230         b44_chip_reset(bp);
2231
2232         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2233         for (i = 0; i < 6; i++)
2234                 printk("%2.2x%c", dev->dev_addr[i],
2235                        i == 5 ? '\n' : ':');
2236
2237         return 0;
2238
2239 err_out_iounmap:
2240         iounmap(bp->regs);
2241
2242 err_out_free_dev:
2243         free_netdev(dev);
2244
2245 err_out_free_res:
2246         pci_release_regions(pdev);
2247
2248 err_out_disable_pdev:
2249         pci_disable_device(pdev);
2250         pci_set_drvdata(pdev, NULL);
2251         return err;
2252 }
2253
2254 static void __devexit b44_remove_one(struct pci_dev *pdev)
2255 {
2256         struct net_device *dev = pci_get_drvdata(pdev);
2257         struct b44 *bp = netdev_priv(dev);
2258
2259         unregister_netdev(dev);
2260         iounmap(bp->regs);
2261         free_netdev(dev);
2262         pci_release_regions(pdev);
2263         pci_disable_device(pdev);
2264         pci_set_drvdata(pdev, NULL);
2265 }
2266
2267 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2268 {
2269         struct net_device *dev = pci_get_drvdata(pdev);
2270         struct b44 *bp = netdev_priv(dev);
2271
2272         if (!netif_running(dev))
2273                  return 0;
2274
2275         del_timer_sync(&bp->timer);
2276
2277         spin_lock_irq(&bp->lock);
2278
2279         b44_halt(bp);
2280         netif_carrier_off(bp->dev);
2281         netif_device_detach(bp->dev);
2282         b44_free_rings(bp);
2283
2284         spin_unlock_irq(&bp->lock);
2285
2286         free_irq(dev->irq, dev);
2287         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2288                 b44_init_hw(bp, B44_PARTIAL_RESET);
2289                 b44_setup_wol(bp);
2290         }
2291         pci_disable_device(pdev);
2292         return 0;
2293 }
2294
2295 static int b44_resume(struct pci_dev *pdev)
2296 {
2297         struct net_device *dev = pci_get_drvdata(pdev);
2298         struct b44 *bp = netdev_priv(dev);
2299         int rc = 0;
2300
2301         pci_restore_state(pdev);
2302         rc = pci_enable_device(pdev);
2303         if (rc) {
2304                 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2305                         dev->name);
2306                 return rc;
2307         }
2308
2309         pci_set_master(pdev);
2310
2311         if (!netif_running(dev))
2312                 return 0;
2313
2314         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2315         if (rc) {
2316                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2317                 pci_disable_device(pdev);
2318                 return rc;
2319         }
2320
2321         spin_lock_irq(&bp->lock);
2322
2323         b44_init_rings(bp);
2324         b44_init_hw(bp, B44_FULL_RESET);
2325         netif_device_attach(bp->dev);
2326         spin_unlock_irq(&bp->lock);
2327
2328         b44_enable_ints(bp);
2329         netif_wake_queue(dev);
2330
2331         mod_timer(&bp->timer, jiffies + 1);
2332
2333         return 0;
2334 }
2335
2336 static struct pci_driver b44_driver = {
2337         .name           = DRV_MODULE_NAME,
2338         .id_table       = b44_pci_tbl,
2339         .probe          = b44_init_one,
2340         .remove         = __devexit_p(b44_remove_one),
2341         .suspend        = b44_suspend,
2342         .resume         = b44_resume,
2343 };
2344
2345 static int __init b44_init(void)
2346 {
2347         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2348
2349         /* Setup paramaters for syncing RX/TX DMA descriptors */
2350         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2351         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2352
2353         return pci_register_driver(&b44_driver);
2354 }
2355
2356 static void __exit b44_cleanup(void)
2357 {
2358         pci_unregister_driver(&b44_driver);
2359 }
2360
2361 module_init(b44_init);
2362 module_exit(b44_cleanup);
2363