Merge branch 'master' of git://git.infradead.org/~kmpark/onenand-mtd-2.6
[linux-2.6] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.01"
33 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE        0x400
80 #define B44_PATTERN_SIZE        0x80
81 #define B44_PMASK_BASE          0x600
82 #define B44_PMASK_SIZE          0x10
83 #define B44_MAX_PATTERNS        16
84 #define B44_ETHIPV6UDP_HLEN     62
85 #define B44_ETHIPV4UDP_HLEN     42
86
87 static char version[] __devinitdata =
88         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
94
95 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98
99 static struct pci_device_id b44_pci_tbl[] = {
100         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106         { }     /* terminate list with empty entry */
107 };
108
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
113
114 #define B44_FULL_RESET          1
115 #define B44_FULL_RESET_SKIP_PHY 2
116 #define B44_PARTIAL_RESET       3
117
118 static void b44_init_hw(struct b44 *, int);
119
120 static int dma_desc_align_mask;
121 static int dma_desc_sync_size;
122
123 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124 #define _B44(x...)      # x,
125 B44_STAT_REG_DECLARE
126 #undef _B44
127 };
128
129 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
130                                                 dma_addr_t dma_base,
131                                                 unsigned long offset,
132                                                 enum dma_data_direction dir)
133 {
134         dma_sync_single_range_for_device(&pdev->dev, dma_base,
135                                          offset & dma_desc_align_mask,
136                                          dma_desc_sync_size, dir);
137 }
138
139 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
140                                              dma_addr_t dma_base,
141                                              unsigned long offset,
142                                              enum dma_data_direction dir)
143 {
144         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
145                                       offset & dma_desc_align_mask,
146                                       dma_desc_sync_size, dir);
147 }
148
149 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
150 {
151         return readl(bp->regs + reg);
152 }
153
154 static inline void bw32(const struct b44 *bp,
155                         unsigned long reg, unsigned long val)
156 {
157         writel(val, bp->regs + reg);
158 }
159
160 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
161                         u32 bit, unsigned long timeout, const int clear)
162 {
163         unsigned long i;
164
165         for (i = 0; i < timeout; i++) {
166                 u32 val = br32(bp, reg);
167
168                 if (clear && !(val & bit))
169                         break;
170                 if (!clear && (val & bit))
171                         break;
172                 udelay(10);
173         }
174         if (i == timeout) {
175                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
176                        "%lx to %s.\n",
177                        bp->dev->name,
178                        bit, reg,
179                        (clear ? "clear" : "set"));
180                 return -ENODEV;
181         }
182         return 0;
183 }
184
185 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
186  * buzz words used on this company's website :-)
187  *
188  * All of these routines must be invoked with bp->lock held and
189  * interrupts disabled.
190  */
191
192 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
193 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
194
195 static u32 ssb_get_core_rev(struct b44 *bp)
196 {
197         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
198 }
199
200 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
201 {
202         u32 bar_orig, pci_rev, val;
203
204         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206         pci_rev = ssb_get_core_rev(bp);
207
208         val = br32(bp, B44_SBINTVEC);
209         val |= cores;
210         bw32(bp, B44_SBINTVEC, val);
211
212         val = br32(bp, SSB_PCI_TRANS_2);
213         val |= SSB_PCI_PREF | SSB_PCI_BURST;
214         bw32(bp, SSB_PCI_TRANS_2, val);
215
216         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
217
218         return pci_rev;
219 }
220
221 static void ssb_core_disable(struct b44 *bp)
222 {
223         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
224                 return;
225
226         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
231         br32(bp, B44_SBTMSLOW);
232         udelay(1);
233         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234         br32(bp, B44_SBTMSLOW);
235         udelay(1);
236 }
237
238 static void ssb_core_reset(struct b44 *bp)
239 {
240         u32 val;
241
242         ssb_core_disable(bp);
243         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
244         br32(bp, B44_SBTMSLOW);
245         udelay(1);
246
247         /* Clear SERR if set, this is a hw bug workaround.  */
248         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249                 bw32(bp, B44_SBTMSHIGH, 0);
250
251         val = br32(bp, B44_SBIMSTATE);
252         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
254
255         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256         br32(bp, B44_SBTMSLOW);
257         udelay(1);
258
259         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260         br32(bp, B44_SBTMSLOW);
261         udelay(1);
262 }
263
264 static int ssb_core_unit(struct b44 *bp)
265 {
266 #if 0
267         u32 val = br32(bp, B44_SBADMATCH0);
268         u32 base;
269
270         type = val & SBADMATCH0_TYPE_MASK;
271         switch (type) {
272         case 0:
273                 base = val & SBADMATCH0_BS0_MASK;
274                 break;
275
276         case 1:
277                 base = val & SBADMATCH0_BS1_MASK;
278                 break;
279
280         case 2:
281         default:
282                 base = val & SBADMATCH0_BS2_MASK;
283                 break;
284         };
285 #endif
286         return 0;
287 }
288
289 static int ssb_is_core_up(struct b44 *bp)
290 {
291         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
292                 == SBTMSLOW_CLOCK);
293 }
294
295 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
296 {
297         u32 val;
298
299         val  = ((u32) data[2]) << 24;
300         val |= ((u32) data[3]) << 16;
301         val |= ((u32) data[4]) <<  8;
302         val |= ((u32) data[5]) <<  0;
303         bw32(bp, B44_CAM_DATA_LO, val);
304         val = (CAM_DATA_HI_VALID |
305                (((u32) data[0]) << 8) |
306                (((u32) data[1]) << 0));
307         bw32(bp, B44_CAM_DATA_HI, val);
308         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
309                             (index << CAM_CTRL_INDEX_SHIFT)));
310         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
311 }
312
313 static inline void __b44_disable_ints(struct b44 *bp)
314 {
315         bw32(bp, B44_IMASK, 0);
316 }
317
318 static void b44_disable_ints(struct b44 *bp)
319 {
320         __b44_disable_ints(bp);
321
322         /* Flush posted writes. */
323         br32(bp, B44_IMASK);
324 }
325
326 static void b44_enable_ints(struct b44 *bp)
327 {
328         bw32(bp, B44_IMASK, bp->imask);
329 }
330
331 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
332 {
333         int err;
334
335         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339                              (reg << MDIO_DATA_RA_SHIFT) |
340                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
343
344         return err;
345 }
346
347 static int b44_writephy(struct b44 *bp, int reg, u32 val)
348 {
349         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
353                              (reg << MDIO_DATA_RA_SHIFT) |
354                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355                              (val & MDIO_DATA_DATA)));
356         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
357 }
358
359 /* miilib interface */
360 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361  * due to code existing before miilib use was added to this driver.
362  * Someone should remove this artificial driver limitation in
363  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
364  */
365 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
366 {
367         u32 val;
368         struct b44 *bp = netdev_priv(dev);
369         int rc = b44_readphy(bp, location, &val);
370         if (rc)
371                 return 0xffffffff;
372         return val;
373 }
374
375 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
376                          int val)
377 {
378         struct b44 *bp = netdev_priv(dev);
379         b44_writephy(bp, location, val);
380 }
381
382 static int b44_phy_reset(struct b44 *bp)
383 {
384         u32 val;
385         int err;
386
387         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
388         if (err)
389                 return err;
390         udelay(100);
391         err = b44_readphy(bp, MII_BMCR, &val);
392         if (!err) {
393                 if (val & BMCR_RESET) {
394                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
395                                bp->dev->name);
396                         err = -ENODEV;
397                 }
398         }
399
400         return 0;
401 }
402
403 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
404 {
405         u32 val;
406
407         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
408         bp->flags |= pause_flags;
409
410         val = br32(bp, B44_RXCONFIG);
411         if (pause_flags & B44_FLAG_RX_PAUSE)
412                 val |= RXCONFIG_FLOW;
413         else
414                 val &= ~RXCONFIG_FLOW;
415         bw32(bp, B44_RXCONFIG, val);
416
417         val = br32(bp, B44_MAC_FLOW);
418         if (pause_flags & B44_FLAG_TX_PAUSE)
419                 val |= (MAC_FLOW_PAUSE_ENAB |
420                         (0xc0 & MAC_FLOW_RX_HI_WATER));
421         else
422                 val &= ~MAC_FLOW_PAUSE_ENAB;
423         bw32(bp, B44_MAC_FLOW, val);
424 }
425
426 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
427 {
428         u32 pause_enab = 0;
429
430         /* The driver supports only rx pause by default because
431            the b44 mac tx pause mechanism generates excessive
432            pause frames.
433            Use ethtool to turn on b44 tx pause if necessary.
434          */
435         if ((local & ADVERTISE_PAUSE_CAP) &&
436             (local & ADVERTISE_PAUSE_ASYM)){
437                 if ((remote & LPA_PAUSE_ASYM) &&
438                     !(remote & LPA_PAUSE_CAP))
439                         pause_enab |= B44_FLAG_RX_PAUSE;
440         }
441
442         __b44_set_flow_ctrl(bp, pause_enab);
443 }
444
445 static int b44_setup_phy(struct b44 *bp)
446 {
447         u32 val;
448         int err;
449
450         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
451                 goto out;
452         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
453                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
454                 goto out;
455         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
456                 goto out;
457         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
458                                 val | MII_TLEDCTRL_ENABLE)) != 0)
459                 goto out;
460
461         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
462                 u32 adv = ADVERTISE_CSMA;
463
464                 if (bp->flags & B44_FLAG_ADV_10HALF)
465                         adv |= ADVERTISE_10HALF;
466                 if (bp->flags & B44_FLAG_ADV_10FULL)
467                         adv |= ADVERTISE_10FULL;
468                 if (bp->flags & B44_FLAG_ADV_100HALF)
469                         adv |= ADVERTISE_100HALF;
470                 if (bp->flags & B44_FLAG_ADV_100FULL)
471                         adv |= ADVERTISE_100FULL;
472
473                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
474                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
475
476                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
477                         goto out;
478                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479                                                        BMCR_ANRESTART))) != 0)
480                         goto out;
481         } else {
482                 u32 bmcr;
483
484                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
485                         goto out;
486                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
487                 if (bp->flags & B44_FLAG_100_BASE_T)
488                         bmcr |= BMCR_SPEED100;
489                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
490                         bmcr |= BMCR_FULLDPLX;
491                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
492                         goto out;
493
494                 /* Since we will not be negotiating there is no safe way
495                  * to determine if the link partner supports flow control
496                  * or not.  So just disable it completely in this case.
497                  */
498                 b44_set_flow_ctrl(bp, 0, 0);
499         }
500
501 out:
502         return err;
503 }
504
505 static void b44_stats_update(struct b44 *bp)
506 {
507         unsigned long reg;
508         u32 *val;
509
510         val = &bp->hw_stats.tx_good_octets;
511         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
512                 *val++ += br32(bp, reg);
513         }
514
515         /* Pad */
516         reg += 8*4UL;
517
518         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
519                 *val++ += br32(bp, reg);
520         }
521 }
522
523 static void b44_link_report(struct b44 *bp)
524 {
525         if (!netif_carrier_ok(bp->dev)) {
526                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
527         } else {
528                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
529                        bp->dev->name,
530                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
531                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
532
533                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
534                        "%s for RX.\n",
535                        bp->dev->name,
536                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
537                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
538         }
539 }
540
541 static void b44_check_phy(struct b44 *bp)
542 {
543         u32 bmsr, aux;
544
545         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
547             (bmsr != 0xffff)) {
548                 if (aux & MII_AUXCTRL_SPEED)
549                         bp->flags |= B44_FLAG_100_BASE_T;
550                 else
551                         bp->flags &= ~B44_FLAG_100_BASE_T;
552                 if (aux & MII_AUXCTRL_DUPLEX)
553                         bp->flags |= B44_FLAG_FULL_DUPLEX;
554                 else
555                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
556
557                 if (!netif_carrier_ok(bp->dev) &&
558                     (bmsr & BMSR_LSTATUS)) {
559                         u32 val = br32(bp, B44_TX_CTRL);
560                         u32 local_adv, remote_adv;
561
562                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
563                                 val |= TX_CTRL_DUPLEX;
564                         else
565                                 val &= ~TX_CTRL_DUPLEX;
566                         bw32(bp, B44_TX_CTRL, val);
567
568                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
569                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
570                             !b44_readphy(bp, MII_LPA, &remote_adv))
571                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
572
573                         /* Link now up */
574                         netif_carrier_on(bp->dev);
575                         b44_link_report(bp);
576                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
577                         /* Link now down */
578                         netif_carrier_off(bp->dev);
579                         b44_link_report(bp);
580                 }
581
582                 if (bmsr & BMSR_RFAULT)
583                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
584                                bp->dev->name);
585                 if (bmsr & BMSR_JCD)
586                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
587                                bp->dev->name);
588         }
589 }
590
591 static void b44_timer(unsigned long __opaque)
592 {
593         struct b44 *bp = (struct b44 *) __opaque;
594
595         spin_lock_irq(&bp->lock);
596
597         b44_check_phy(bp);
598
599         b44_stats_update(bp);
600
601         spin_unlock_irq(&bp->lock);
602
603         bp->timer.expires = jiffies + HZ;
604         add_timer(&bp->timer);
605 }
606
607 static void b44_tx(struct b44 *bp)
608 {
609         u32 cur, cons;
610
611         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
612         cur /= sizeof(struct dma_desc);
613
614         /* XXX needs updating when NETIF_F_SG is supported */
615         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
616                 struct ring_info *rp = &bp->tx_buffers[cons];
617                 struct sk_buff *skb = rp->skb;
618
619                 BUG_ON(skb == NULL);
620
621                 pci_unmap_single(bp->pdev,
622                                  pci_unmap_addr(rp, mapping),
623                                  skb->len,
624                                  PCI_DMA_TODEVICE);
625                 rp->skb = NULL;
626                 dev_kfree_skb_irq(skb);
627         }
628
629         bp->tx_cons = cons;
630         if (netif_queue_stopped(bp->dev) &&
631             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
632                 netif_wake_queue(bp->dev);
633
634         bw32(bp, B44_GPTIMER, 0);
635 }
636
637 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
638  * before the DMA address you give it.  So we allocate 30 more bytes
639  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
640  * point the chip at 30 bytes past where the rx_header will go.
641  */
642 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
643 {
644         struct dma_desc *dp;
645         struct ring_info *src_map, *map;
646         struct rx_header *rh;
647         struct sk_buff *skb;
648         dma_addr_t mapping;
649         int dest_idx;
650         u32 ctrl;
651
652         src_map = NULL;
653         if (src_idx >= 0)
654                 src_map = &bp->rx_buffers[src_idx];
655         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
656         map = &bp->rx_buffers[dest_idx];
657         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
658         if (skb == NULL)
659                 return -ENOMEM;
660
661         mapping = pci_map_single(bp->pdev, skb->data,
662                                  RX_PKT_BUF_SZ,
663                                  PCI_DMA_FROMDEVICE);
664
665         /* Hardware bug work-around, the chip is unable to do PCI DMA
666            to/from anything above 1GB :-( */
667         if (dma_mapping_error(mapping) ||
668                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
669                 /* Sigh... */
670                 if (!dma_mapping_error(mapping))
671                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
672                 dev_kfree_skb_any(skb);
673                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
674                 if (skb == NULL)
675                         return -ENOMEM;
676                 mapping = pci_map_single(bp->pdev, skb->data,
677                                          RX_PKT_BUF_SZ,
678                                          PCI_DMA_FROMDEVICE);
679                 if (dma_mapping_error(mapping) ||
680                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
681                         if (!dma_mapping_error(mapping))
682                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
683                         dev_kfree_skb_any(skb);
684                         return -ENOMEM;
685                 }
686         }
687
688         skb->dev = bp->dev;
689         skb_reserve(skb, bp->rx_offset);
690
691         rh = (struct rx_header *)
692                 (skb->data - bp->rx_offset);
693         rh->len = 0;
694         rh->flags = 0;
695
696         map->skb = skb;
697         pci_unmap_addr_set(map, mapping, mapping);
698
699         if (src_map != NULL)
700                 src_map->skb = NULL;
701
702         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
703         if (dest_idx == (B44_RX_RING_SIZE - 1))
704                 ctrl |= DESC_CTRL_EOT;
705
706         dp = &bp->rx_ring[dest_idx];
707         dp->ctrl = cpu_to_le32(ctrl);
708         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
709
710         if (bp->flags & B44_FLAG_RX_RING_HACK)
711                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
712                                              dest_idx * sizeof(dp),
713                                              DMA_BIDIRECTIONAL);
714
715         return RX_PKT_BUF_SZ;
716 }
717
718 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
719 {
720         struct dma_desc *src_desc, *dest_desc;
721         struct ring_info *src_map, *dest_map;
722         struct rx_header *rh;
723         int dest_idx;
724         u32 ctrl;
725
726         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
727         dest_desc = &bp->rx_ring[dest_idx];
728         dest_map = &bp->rx_buffers[dest_idx];
729         src_desc = &bp->rx_ring[src_idx];
730         src_map = &bp->rx_buffers[src_idx];
731
732         dest_map->skb = src_map->skb;
733         rh = (struct rx_header *) src_map->skb->data;
734         rh->len = 0;
735         rh->flags = 0;
736         pci_unmap_addr_set(dest_map, mapping,
737                            pci_unmap_addr(src_map, mapping));
738
739         if (bp->flags & B44_FLAG_RX_RING_HACK)
740                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
741                                           src_idx * sizeof(src_desc),
742                                           DMA_BIDIRECTIONAL);
743
744         ctrl = src_desc->ctrl;
745         if (dest_idx == (B44_RX_RING_SIZE - 1))
746                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
747         else
748                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
749
750         dest_desc->ctrl = ctrl;
751         dest_desc->addr = src_desc->addr;
752
753         src_map->skb = NULL;
754
755         if (bp->flags & B44_FLAG_RX_RING_HACK)
756                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
757                                              dest_idx * sizeof(dest_desc),
758                                              DMA_BIDIRECTIONAL);
759
760         pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
761                                        RX_PKT_BUF_SZ,
762                                        PCI_DMA_FROMDEVICE);
763 }
764
765 static int b44_rx(struct b44 *bp, int budget)
766 {
767         int received;
768         u32 cons, prod;
769
770         received = 0;
771         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
772         prod /= sizeof(struct dma_desc);
773         cons = bp->rx_cons;
774
775         while (cons != prod && budget > 0) {
776                 struct ring_info *rp = &bp->rx_buffers[cons];
777                 struct sk_buff *skb = rp->skb;
778                 dma_addr_t map = pci_unmap_addr(rp, mapping);
779                 struct rx_header *rh;
780                 u16 len;
781
782                 pci_dma_sync_single_for_cpu(bp->pdev, map,
783                                             RX_PKT_BUF_SZ,
784                                             PCI_DMA_FROMDEVICE);
785                 rh = (struct rx_header *) skb->data;
786                 len = cpu_to_le16(rh->len);
787                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
788                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
789                 drop_it:
790                         b44_recycle_rx(bp, cons, bp->rx_prod);
791                 drop_it_no_recycle:
792                         bp->stats.rx_dropped++;
793                         goto next_pkt;
794                 }
795
796                 if (len == 0) {
797                         int i = 0;
798
799                         do {
800                                 udelay(2);
801                                 barrier();
802                                 len = cpu_to_le16(rh->len);
803                         } while (len == 0 && i++ < 5);
804                         if (len == 0)
805                                 goto drop_it;
806                 }
807
808                 /* Omit CRC. */
809                 len -= 4;
810
811                 if (len > RX_COPY_THRESHOLD) {
812                         int skb_size;
813                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
814                         if (skb_size < 0)
815                                 goto drop_it;
816                         pci_unmap_single(bp->pdev, map,
817                                          skb_size, PCI_DMA_FROMDEVICE);
818                         /* Leave out rx_header */
819                         skb_put(skb, len+bp->rx_offset);
820                         skb_pull(skb,bp->rx_offset);
821                 } else {
822                         struct sk_buff *copy_skb;
823
824                         b44_recycle_rx(bp, cons, bp->rx_prod);
825                         copy_skb = dev_alloc_skb(len + 2);
826                         if (copy_skb == NULL)
827                                 goto drop_it_no_recycle;
828
829                         copy_skb->dev = bp->dev;
830                         skb_reserve(copy_skb, 2);
831                         skb_put(copy_skb, len);
832                         /* DMA sync done above, copy just the actual packet */
833                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
834
835                         skb = copy_skb;
836                 }
837                 skb->ip_summed = CHECKSUM_NONE;
838                 skb->protocol = eth_type_trans(skb, bp->dev);
839                 netif_receive_skb(skb);
840                 bp->dev->last_rx = jiffies;
841                 received++;
842                 budget--;
843         next_pkt:
844                 bp->rx_prod = (bp->rx_prod + 1) &
845                         (B44_RX_RING_SIZE - 1);
846                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
847         }
848
849         bp->rx_cons = cons;
850         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
851
852         return received;
853 }
854
855 static int b44_poll(struct net_device *netdev, int *budget)
856 {
857         struct b44 *bp = netdev_priv(netdev);
858         int done;
859
860         spin_lock_irq(&bp->lock);
861
862         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
863                 /* spin_lock(&bp->tx_lock); */
864                 b44_tx(bp);
865                 /* spin_unlock(&bp->tx_lock); */
866         }
867         spin_unlock_irq(&bp->lock);
868
869         done = 1;
870         if (bp->istat & ISTAT_RX) {
871                 int orig_budget = *budget;
872                 int work_done;
873
874                 if (orig_budget > netdev->quota)
875                         orig_budget = netdev->quota;
876
877                 work_done = b44_rx(bp, orig_budget);
878
879                 *budget -= work_done;
880                 netdev->quota -= work_done;
881
882                 if (work_done >= orig_budget)
883                         done = 0;
884         }
885
886         if (bp->istat & ISTAT_ERRORS) {
887                 unsigned long flags;
888
889                 spin_lock_irqsave(&bp->lock, flags);
890                 b44_halt(bp);
891                 b44_init_rings(bp);
892                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
893                 netif_wake_queue(bp->dev);
894                 spin_unlock_irqrestore(&bp->lock, flags);
895                 done = 1;
896         }
897
898         if (done) {
899                 netif_rx_complete(netdev);
900                 b44_enable_ints(bp);
901         }
902
903         return (done ? 0 : 1);
904 }
905
906 static irqreturn_t b44_interrupt(int irq, void *dev_id)
907 {
908         struct net_device *dev = dev_id;
909         struct b44 *bp = netdev_priv(dev);
910         u32 istat, imask;
911         int handled = 0;
912
913         spin_lock(&bp->lock);
914
915         istat = br32(bp, B44_ISTAT);
916         imask = br32(bp, B44_IMASK);
917
918         /* The interrupt mask register controls which interrupt bits
919          * will actually raise an interrupt to the CPU when set by hw/firmware,
920          * but doesn't mask off the bits.
921          */
922         istat &= imask;
923         if (istat) {
924                 handled = 1;
925
926                 if (unlikely(!netif_running(dev))) {
927                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
928                         goto irq_ack;
929                 }
930
931                 if (netif_rx_schedule_prep(dev)) {
932                         /* NOTE: These writes are posted by the readback of
933                          *       the ISTAT register below.
934                          */
935                         bp->istat = istat;
936                         __b44_disable_ints(bp);
937                         __netif_rx_schedule(dev);
938                 } else {
939                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
940                                dev->name);
941                 }
942
943 irq_ack:
944                 bw32(bp, B44_ISTAT, istat);
945                 br32(bp, B44_ISTAT);
946         }
947         spin_unlock(&bp->lock);
948         return IRQ_RETVAL(handled);
949 }
950
951 static void b44_tx_timeout(struct net_device *dev)
952 {
953         struct b44 *bp = netdev_priv(dev);
954
955         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
956                dev->name);
957
958         spin_lock_irq(&bp->lock);
959
960         b44_halt(bp);
961         b44_init_rings(bp);
962         b44_init_hw(bp, B44_FULL_RESET);
963
964         spin_unlock_irq(&bp->lock);
965
966         b44_enable_ints(bp);
967
968         netif_wake_queue(dev);
969 }
970
971 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
972 {
973         struct b44 *bp = netdev_priv(dev);
974         struct sk_buff *bounce_skb;
975         int rc = NETDEV_TX_OK;
976         dma_addr_t mapping;
977         u32 len, entry, ctrl;
978
979         len = skb->len;
980         spin_lock_irq(&bp->lock);
981
982         /* This is a hard error, log it. */
983         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
984                 netif_stop_queue(dev);
985                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
986                        dev->name);
987                 goto err_out;
988         }
989
990         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
991         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
992                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
993                 if (!dma_mapping_error(mapping))
994                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
995
996                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
997                                              GFP_ATOMIC|GFP_DMA);
998                 if (!bounce_skb)
999                         goto err_out;
1000
1001                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
1002                                          len, PCI_DMA_TODEVICE);
1003                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
1004                         if (!dma_mapping_error(mapping))
1005                                 pci_unmap_single(bp->pdev, mapping,
1006                                          len, PCI_DMA_TODEVICE);
1007                         dev_kfree_skb_any(bounce_skb);
1008                         goto err_out;
1009                 }
1010
1011                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1012                 dev_kfree_skb_any(skb);
1013                 skb = bounce_skb;
1014         }
1015
1016         entry = bp->tx_prod;
1017         bp->tx_buffers[entry].skb = skb;
1018         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1019
1020         ctrl  = (len & DESC_CTRL_LEN);
1021         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1022         if (entry == (B44_TX_RING_SIZE - 1))
1023                 ctrl |= DESC_CTRL_EOT;
1024
1025         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1026         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1027
1028         if (bp->flags & B44_FLAG_TX_RING_HACK)
1029                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1030                                              entry * sizeof(bp->tx_ring[0]),
1031                                              DMA_TO_DEVICE);
1032
1033         entry = NEXT_TX(entry);
1034
1035         bp->tx_prod = entry;
1036
1037         wmb();
1038
1039         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1041                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1042         if (bp->flags & B44_FLAG_REORDER_BUG)
1043                 br32(bp, B44_DMATX_PTR);
1044
1045         if (TX_BUFFS_AVAIL(bp) < 1)
1046                 netif_stop_queue(dev);
1047
1048         dev->trans_start = jiffies;
1049
1050 out_unlock:
1051         spin_unlock_irq(&bp->lock);
1052
1053         return rc;
1054
1055 err_out:
1056         rc = NETDEV_TX_BUSY;
1057         goto out_unlock;
1058 }
1059
1060 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1061 {
1062         struct b44 *bp = netdev_priv(dev);
1063
1064         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1065                 return -EINVAL;
1066
1067         if (!netif_running(dev)) {
1068                 /* We'll just catch it later when the
1069                  * device is up'd.
1070                  */
1071                 dev->mtu = new_mtu;
1072                 return 0;
1073         }
1074
1075         spin_lock_irq(&bp->lock);
1076         b44_halt(bp);
1077         dev->mtu = new_mtu;
1078         b44_init_rings(bp);
1079         b44_init_hw(bp, B44_FULL_RESET);
1080         spin_unlock_irq(&bp->lock);
1081
1082         b44_enable_ints(bp);
1083
1084         return 0;
1085 }
1086
1087 /* Free up pending packets in all rx/tx rings.
1088  *
1089  * The chip has been shut down and the driver detached from
1090  * the networking, so no interrupts or new tx packets will
1091  * end up in the driver.  bp->lock is not held and we are not
1092  * in an interrupt context and thus may sleep.
1093  */
1094 static void b44_free_rings(struct b44 *bp)
1095 {
1096         struct ring_info *rp;
1097         int i;
1098
1099         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1100                 rp = &bp->rx_buffers[i];
1101
1102                 if (rp->skb == NULL)
1103                         continue;
1104                 pci_unmap_single(bp->pdev,
1105                                  pci_unmap_addr(rp, mapping),
1106                                  RX_PKT_BUF_SZ,
1107                                  PCI_DMA_FROMDEVICE);
1108                 dev_kfree_skb_any(rp->skb);
1109                 rp->skb = NULL;
1110         }
1111
1112         /* XXX needs changes once NETIF_F_SG is set... */
1113         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1114                 rp = &bp->tx_buffers[i];
1115
1116                 if (rp->skb == NULL)
1117                         continue;
1118                 pci_unmap_single(bp->pdev,
1119                                  pci_unmap_addr(rp, mapping),
1120                                  rp->skb->len,
1121                                  PCI_DMA_TODEVICE);
1122                 dev_kfree_skb_any(rp->skb);
1123                 rp->skb = NULL;
1124         }
1125 }
1126
1127 /* Initialize tx/rx rings for packet processing.
1128  *
1129  * The chip has been shut down and the driver detached from
1130  * the networking, so no interrupts or new tx packets will
1131  * end up in the driver.
1132  */
1133 static void b44_init_rings(struct b44 *bp)
1134 {
1135         int i;
1136
1137         b44_free_rings(bp);
1138
1139         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1140         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1141
1142         if (bp->flags & B44_FLAG_RX_RING_HACK)
1143                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1144                                            DMA_TABLE_BYTES,
1145                                            PCI_DMA_BIDIRECTIONAL);
1146
1147         if (bp->flags & B44_FLAG_TX_RING_HACK)
1148                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1149                                            DMA_TABLE_BYTES,
1150                                            PCI_DMA_TODEVICE);
1151
1152         for (i = 0; i < bp->rx_pending; i++) {
1153                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1154                         break;
1155         }
1156 }
1157
1158 /*
1159  * Must not be invoked with interrupt sources disabled and
1160  * the hardware shutdown down.
1161  */
1162 static void b44_free_consistent(struct b44 *bp)
1163 {
1164         kfree(bp->rx_buffers);
1165         bp->rx_buffers = NULL;
1166         kfree(bp->tx_buffers);
1167         bp->tx_buffers = NULL;
1168         if (bp->rx_ring) {
1169                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1170                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1171                                          DMA_TABLE_BYTES,
1172                                          DMA_BIDIRECTIONAL);
1173                         kfree(bp->rx_ring);
1174                 } else
1175                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1176                                             bp->rx_ring, bp->rx_ring_dma);
1177                 bp->rx_ring = NULL;
1178                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1179         }
1180         if (bp->tx_ring) {
1181                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1182                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1183                                          DMA_TABLE_BYTES,
1184                                          DMA_TO_DEVICE);
1185                         kfree(bp->tx_ring);
1186                 } else
1187                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1188                                             bp->tx_ring, bp->tx_ring_dma);
1189                 bp->tx_ring = NULL;
1190                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1191         }
1192 }
1193
1194 /*
1195  * Must not be invoked with interrupt sources disabled and
1196  * the hardware shutdown down.  Can sleep.
1197  */
1198 static int b44_alloc_consistent(struct b44 *bp)
1199 {
1200         int size;
1201
1202         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1203         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1204         if (!bp->rx_buffers)
1205                 goto out_err;
1206
1207         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1208         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1209         if (!bp->tx_buffers)
1210                 goto out_err;
1211
1212         size = DMA_TABLE_BYTES;
1213         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1214         if (!bp->rx_ring) {
1215                 /* Allocation may have failed due to pci_alloc_consistent
1216                    insisting on use of GFP_DMA, which is more restrictive
1217                    than necessary...  */
1218                 struct dma_desc *rx_ring;
1219                 dma_addr_t rx_ring_dma;
1220
1221                 rx_ring = kzalloc(size, GFP_KERNEL);
1222                 if (!rx_ring)
1223                         goto out_err;
1224
1225                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1226                                              DMA_TABLE_BYTES,
1227                                              DMA_BIDIRECTIONAL);
1228
1229                 if (dma_mapping_error(rx_ring_dma) ||
1230                         rx_ring_dma + size > B44_DMA_MASK) {
1231                         kfree(rx_ring);
1232                         goto out_err;
1233                 }
1234
1235                 bp->rx_ring = rx_ring;
1236                 bp->rx_ring_dma = rx_ring_dma;
1237                 bp->flags |= B44_FLAG_RX_RING_HACK;
1238         }
1239
1240         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1241         if (!bp->tx_ring) {
1242                 /* Allocation may have failed due to pci_alloc_consistent
1243                    insisting on use of GFP_DMA, which is more restrictive
1244                    than necessary...  */
1245                 struct dma_desc *tx_ring;
1246                 dma_addr_t tx_ring_dma;
1247
1248                 tx_ring = kzalloc(size, GFP_KERNEL);
1249                 if (!tx_ring)
1250                         goto out_err;
1251
1252                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1253                                              DMA_TABLE_BYTES,
1254                                              DMA_TO_DEVICE);
1255
1256                 if (dma_mapping_error(tx_ring_dma) ||
1257                         tx_ring_dma + size > B44_DMA_MASK) {
1258                         kfree(tx_ring);
1259                         goto out_err;
1260                 }
1261
1262                 bp->tx_ring = tx_ring;
1263                 bp->tx_ring_dma = tx_ring_dma;
1264                 bp->flags |= B44_FLAG_TX_RING_HACK;
1265         }
1266
1267         return 0;
1268
1269 out_err:
1270         b44_free_consistent(bp);
1271         return -ENOMEM;
1272 }
1273
1274 /* bp->lock is held. */
1275 static void b44_clear_stats(struct b44 *bp)
1276 {
1277         unsigned long reg;
1278
1279         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1280         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1281                 br32(bp, reg);
1282         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1283                 br32(bp, reg);
1284 }
1285
1286 /* bp->lock is held. */
1287 static void b44_chip_reset(struct b44 *bp)
1288 {
1289         if (ssb_is_core_up(bp)) {
1290                 bw32(bp, B44_RCV_LAZY, 0);
1291                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1292                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1293                 bw32(bp, B44_DMATX_CTRL, 0);
1294                 bp->tx_prod = bp->tx_cons = 0;
1295                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1296                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1297                                      100, 0);
1298                 }
1299                 bw32(bp, B44_DMARX_CTRL, 0);
1300                 bp->rx_prod = bp->rx_cons = 0;
1301         } else {
1302                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1303                                    SBINTVEC_ENET0 :
1304                                    SBINTVEC_ENET1));
1305         }
1306
1307         ssb_core_reset(bp);
1308
1309         b44_clear_stats(bp);
1310
1311         /* Make PHY accessible. */
1312         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1313                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1314         br32(bp, B44_MDIO_CTRL);
1315
1316         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1317                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1318                 br32(bp, B44_ENET_CTRL);
1319                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1320         } else {
1321                 u32 val = br32(bp, B44_DEVCTRL);
1322
1323                 if (val & DEVCTRL_EPR) {
1324                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1325                         br32(bp, B44_DEVCTRL);
1326                         udelay(100);
1327                 }
1328                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1329         }
1330 }
1331
1332 /* bp->lock is held. */
1333 static void b44_halt(struct b44 *bp)
1334 {
1335         b44_disable_ints(bp);
1336         b44_chip_reset(bp);
1337 }
1338
1339 /* bp->lock is held. */
1340 static void __b44_set_mac_addr(struct b44 *bp)
1341 {
1342         bw32(bp, B44_CAM_CTRL, 0);
1343         if (!(bp->dev->flags & IFF_PROMISC)) {
1344                 u32 val;
1345
1346                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1347                 val = br32(bp, B44_CAM_CTRL);
1348                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1349         }
1350 }
1351
1352 static int b44_set_mac_addr(struct net_device *dev, void *p)
1353 {
1354         struct b44 *bp = netdev_priv(dev);
1355         struct sockaddr *addr = p;
1356
1357         if (netif_running(dev))
1358                 return -EBUSY;
1359
1360         if (!is_valid_ether_addr(addr->sa_data))
1361                 return -EINVAL;
1362
1363         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1364
1365         spin_lock_irq(&bp->lock);
1366         __b44_set_mac_addr(bp);
1367         spin_unlock_irq(&bp->lock);
1368
1369         return 0;
1370 }
1371
1372 /* Called at device open time to get the chip ready for
1373  * packet processing.  Invoked with bp->lock held.
1374  */
1375 static void __b44_set_rx_mode(struct net_device *);
1376 static void b44_init_hw(struct b44 *bp, int reset_kind)
1377 {
1378         u32 val;
1379
1380         b44_chip_reset(bp);
1381         if (reset_kind == B44_FULL_RESET) {
1382                 b44_phy_reset(bp);
1383                 b44_setup_phy(bp);
1384         }
1385
1386         /* Enable CRC32, set proper LED modes and power on PHY */
1387         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1388         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1389
1390         /* This sets the MAC address too.  */
1391         __b44_set_rx_mode(bp->dev);
1392
1393         /* MTU + eth header + possible VLAN tag + struct rx_header */
1394         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1395         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396
1397         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1398         if (reset_kind == B44_PARTIAL_RESET) {
1399                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1400                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1401         } else {
1402                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1403                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1404                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1405                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1406                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1407
1408                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1409                 bp->rx_prod = bp->rx_pending;
1410
1411                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1412         }
1413
1414         val = br32(bp, B44_ENET_CTRL);
1415         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1416 }
1417
1418 static int b44_open(struct net_device *dev)
1419 {
1420         struct b44 *bp = netdev_priv(dev);
1421         int err;
1422
1423         err = b44_alloc_consistent(bp);
1424         if (err)
1425                 goto out;
1426
1427         b44_init_rings(bp);
1428         b44_init_hw(bp, B44_FULL_RESET);
1429
1430         b44_check_phy(bp);
1431
1432         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1433         if (unlikely(err < 0)) {
1434                 b44_chip_reset(bp);
1435                 b44_free_rings(bp);
1436                 b44_free_consistent(bp);
1437                 goto out;
1438         }
1439
1440         init_timer(&bp->timer);
1441         bp->timer.expires = jiffies + HZ;
1442         bp->timer.data = (unsigned long) bp;
1443         bp->timer.function = b44_timer;
1444         add_timer(&bp->timer);
1445
1446         b44_enable_ints(bp);
1447         netif_start_queue(dev);
1448 out:
1449         return err;
1450 }
1451
1452 #if 0
1453 /*static*/ void b44_dump_state(struct b44 *bp)
1454 {
1455         u32 val32, val32_2, val32_3, val32_4, val32_5;
1456         u16 val16;
1457
1458         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1459         printk("DEBUG: PCI status [%04x] \n", val16);
1460
1461 }
1462 #endif
1463
1464 #ifdef CONFIG_NET_POLL_CONTROLLER
1465 /*
1466  * Polling receive - used by netconsole and other diagnostic tools
1467  * to allow network i/o with interrupts disabled.
1468  */
1469 static void b44_poll_controller(struct net_device *dev)
1470 {
1471         disable_irq(dev->irq);
1472         b44_interrupt(dev->irq, dev);
1473         enable_irq(dev->irq);
1474 }
1475 #endif
1476
1477 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1478 {
1479         u32 i;
1480         u32 *pattern = (u32 *) pp;
1481
1482         for (i = 0; i < bytes; i += sizeof(u32)) {
1483                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1484                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1485         }
1486 }
1487
1488 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1489 {
1490         int magicsync = 6;
1491         int k, j, len = offset;
1492         int ethaddr_bytes = ETH_ALEN;
1493
1494         memset(ppattern + offset, 0xff, magicsync);
1495         for (j = 0; j < magicsync; j++)
1496                 set_bit(len++, (unsigned long *) pmask);
1497
1498         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1499                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1500                         ethaddr_bytes = ETH_ALEN;
1501                 else
1502                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1503                 if (ethaddr_bytes <=0)
1504                         break;
1505                 for (k = 0; k< ethaddr_bytes; k++) {
1506                         ppattern[offset + magicsync +
1507                                 (j * ETH_ALEN) + k] = macaddr[k];
1508                         len++;
1509                         set_bit(len, (unsigned long *) pmask);
1510                 }
1511         }
1512         return len - 1;
1513 }
1514
1515 /* Setup magic packet patterns in the b44 WOL
1516  * pattern matching filter.
1517  */
1518 static void b44_setup_pseudo_magicp(struct b44 *bp)
1519 {
1520
1521         u32 val;
1522         int plen0, plen1, plen2;
1523         u8 *pwol_pattern;
1524         u8 pwol_mask[B44_PMASK_SIZE];
1525
1526         pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1527         if (!pwol_pattern) {
1528                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1529                 return;
1530         }
1531
1532         /* Ipv4 magic packet pattern - pattern 0.*/
1533         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1534         memset(pwol_mask, 0, B44_PMASK_SIZE);
1535         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1536                                   B44_ETHIPV4UDP_HLEN);
1537
1538         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1539         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1540
1541         /* Raw ethernet II magic packet pattern - pattern 1 */
1542         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543         memset(pwol_mask, 0, B44_PMASK_SIZE);
1544         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545                                   ETH_HLEN);
1546
1547         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1549         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550                        B44_PMASK_BASE + B44_PMASK_SIZE);
1551
1552         /* Ipv6 magic packet pattern - pattern 2 */
1553         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1554         memset(pwol_mask, 0, B44_PMASK_SIZE);
1555         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1556                                   B44_ETHIPV6UDP_HLEN);
1557
1558         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1559                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1560         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1561                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1562
1563         kfree(pwol_pattern);
1564
1565         /* set these pattern's lengths: one less than each real length */
1566         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1567         bw32(bp, B44_WKUP_LEN, val);
1568
1569         /* enable wakeup pattern matching */
1570         val = br32(bp, B44_DEVCTRL);
1571         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1572
1573 }
1574
1575 static void b44_setup_wol(struct b44 *bp)
1576 {
1577         u32 val;
1578         u16 pmval;
1579
1580         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1581
1582         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1583
1584                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1585
1586                 val = bp->dev->dev_addr[2] << 24 |
1587                         bp->dev->dev_addr[3] << 16 |
1588                         bp->dev->dev_addr[4] << 8 |
1589                         bp->dev->dev_addr[5];
1590                 bw32(bp, B44_ADDR_LO, val);
1591
1592                 val = bp->dev->dev_addr[0] << 8 |
1593                         bp->dev->dev_addr[1];
1594                 bw32(bp, B44_ADDR_HI, val);
1595
1596                 val = br32(bp, B44_DEVCTRL);
1597                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1598
1599         } else {
1600                 b44_setup_pseudo_magicp(bp);
1601         }
1602
1603         val = br32(bp, B44_SBTMSLOW);
1604         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1605
1606         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1607         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1608
1609 }
1610
1611 static int b44_close(struct net_device *dev)
1612 {
1613         struct b44 *bp = netdev_priv(dev);
1614
1615         netif_stop_queue(dev);
1616
1617         netif_poll_disable(dev);
1618
1619         del_timer_sync(&bp->timer);
1620
1621         spin_lock_irq(&bp->lock);
1622
1623 #if 0
1624         b44_dump_state(bp);
1625 #endif
1626         b44_halt(bp);
1627         b44_free_rings(bp);
1628         netif_carrier_off(dev);
1629
1630         spin_unlock_irq(&bp->lock);
1631
1632         free_irq(dev->irq, dev);
1633
1634         netif_poll_enable(dev);
1635
1636         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1637                 b44_init_hw(bp, B44_PARTIAL_RESET);
1638                 b44_setup_wol(bp);
1639         }
1640
1641         b44_free_consistent(bp);
1642
1643         return 0;
1644 }
1645
1646 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1647 {
1648         struct b44 *bp = netdev_priv(dev);
1649         struct net_device_stats *nstat = &bp->stats;
1650         struct b44_hw_stats *hwstat = &bp->hw_stats;
1651
1652         /* Convert HW stats into netdevice stats. */
1653         nstat->rx_packets = hwstat->rx_pkts;
1654         nstat->tx_packets = hwstat->tx_pkts;
1655         nstat->rx_bytes   = hwstat->rx_octets;
1656         nstat->tx_bytes   = hwstat->tx_octets;
1657         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1658                              hwstat->tx_oversize_pkts +
1659                              hwstat->tx_underruns +
1660                              hwstat->tx_excessive_cols +
1661                              hwstat->tx_late_cols);
1662         nstat->multicast  = hwstat->tx_multicast_pkts;
1663         nstat->collisions = hwstat->tx_total_cols;
1664
1665         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1666                                    hwstat->rx_undersize);
1667         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1668         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1669         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1670         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1671                                    hwstat->rx_oversize_pkts +
1672                                    hwstat->rx_missed_pkts +
1673                                    hwstat->rx_crc_align_errs +
1674                                    hwstat->rx_undersize +
1675                                    hwstat->rx_crc_errs +
1676                                    hwstat->rx_align_errs +
1677                                    hwstat->rx_symbol_errs);
1678
1679         nstat->tx_aborted_errors = hwstat->tx_underruns;
1680 #if 0
1681         /* Carrier lost counter seems to be broken for some devices */
1682         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1683 #endif
1684
1685         return nstat;
1686 }
1687
1688 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1689 {
1690         struct dev_mc_list *mclist;
1691         int i, num_ents;
1692
1693         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1694         mclist = dev->mc_list;
1695         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1696                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1697         }
1698         return i+1;
1699 }
1700
1701 static void __b44_set_rx_mode(struct net_device *dev)
1702 {
1703         struct b44 *bp = netdev_priv(dev);
1704         u32 val;
1705
1706         val = br32(bp, B44_RXCONFIG);
1707         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1708         if (dev->flags & IFF_PROMISC) {
1709                 val |= RXCONFIG_PROMISC;
1710                 bw32(bp, B44_RXCONFIG, val);
1711         } else {
1712                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1713                 int i = 0;
1714
1715                 __b44_set_mac_addr(bp);
1716
1717                 if ((dev->flags & IFF_ALLMULTI) ||
1718                     (dev->mc_count > B44_MCAST_TABLE_SIZE))
1719                         val |= RXCONFIG_ALLMULTI;
1720                 else
1721                         i = __b44_load_mcast(bp, dev);
1722
1723                 for (; i < 64; i++)
1724                         __b44_cam_write(bp, zero, i);
1725
1726                 bw32(bp, B44_RXCONFIG, val);
1727                 val = br32(bp, B44_CAM_CTRL);
1728                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1729         }
1730 }
1731
1732 static void b44_set_rx_mode(struct net_device *dev)
1733 {
1734         struct b44 *bp = netdev_priv(dev);
1735
1736         spin_lock_irq(&bp->lock);
1737         __b44_set_rx_mode(dev);
1738         spin_unlock_irq(&bp->lock);
1739 }
1740
1741 static u32 b44_get_msglevel(struct net_device *dev)
1742 {
1743         struct b44 *bp = netdev_priv(dev);
1744         return bp->msg_enable;
1745 }
1746
1747 static void b44_set_msglevel(struct net_device *dev, u32 value)
1748 {
1749         struct b44 *bp = netdev_priv(dev);
1750         bp->msg_enable = value;
1751 }
1752
1753 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1754 {
1755         struct b44 *bp = netdev_priv(dev);
1756         struct pci_dev *pci_dev = bp->pdev;
1757
1758         strcpy (info->driver, DRV_MODULE_NAME);
1759         strcpy (info->version, DRV_MODULE_VERSION);
1760         strcpy (info->bus_info, pci_name(pci_dev));
1761 }
1762
1763 static int b44_nway_reset(struct net_device *dev)
1764 {
1765         struct b44 *bp = netdev_priv(dev);
1766         u32 bmcr;
1767         int r;
1768
1769         spin_lock_irq(&bp->lock);
1770         b44_readphy(bp, MII_BMCR, &bmcr);
1771         b44_readphy(bp, MII_BMCR, &bmcr);
1772         r = -EINVAL;
1773         if (bmcr & BMCR_ANENABLE) {
1774                 b44_writephy(bp, MII_BMCR,
1775                              bmcr | BMCR_ANRESTART);
1776                 r = 0;
1777         }
1778         spin_unlock_irq(&bp->lock);
1779
1780         return r;
1781 }
1782
1783 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1784 {
1785         struct b44 *bp = netdev_priv(dev);
1786
1787         cmd->supported = (SUPPORTED_Autoneg);
1788         cmd->supported |= (SUPPORTED_100baseT_Half |
1789                           SUPPORTED_100baseT_Full |
1790                           SUPPORTED_10baseT_Half |
1791                           SUPPORTED_10baseT_Full |
1792                           SUPPORTED_MII);
1793
1794         cmd->advertising = 0;
1795         if (bp->flags & B44_FLAG_ADV_10HALF)
1796                 cmd->advertising |= ADVERTISED_10baseT_Half;
1797         if (bp->flags & B44_FLAG_ADV_10FULL)
1798                 cmd->advertising |= ADVERTISED_10baseT_Full;
1799         if (bp->flags & B44_FLAG_ADV_100HALF)
1800                 cmd->advertising |= ADVERTISED_100baseT_Half;
1801         if (bp->flags & B44_FLAG_ADV_100FULL)
1802                 cmd->advertising |= ADVERTISED_100baseT_Full;
1803         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1804         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1805                 SPEED_100 : SPEED_10;
1806         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1807                 DUPLEX_FULL : DUPLEX_HALF;
1808         cmd->port = 0;
1809         cmd->phy_address = bp->phy_addr;
1810         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1811                 XCVR_INTERNAL : XCVR_EXTERNAL;
1812         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1813                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1814         if (cmd->autoneg == AUTONEG_ENABLE)
1815                 cmd->advertising |= ADVERTISED_Autoneg;
1816         if (!netif_running(dev)){
1817                 cmd->speed = 0;
1818                 cmd->duplex = 0xff;
1819         }
1820         cmd->maxtxpkt = 0;
1821         cmd->maxrxpkt = 0;
1822         return 0;
1823 }
1824
1825 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1826 {
1827         struct b44 *bp = netdev_priv(dev);
1828
1829         /* We do not support gigabit. */
1830         if (cmd->autoneg == AUTONEG_ENABLE) {
1831                 if (cmd->advertising &
1832                     (ADVERTISED_1000baseT_Half |
1833                      ADVERTISED_1000baseT_Full))
1834                         return -EINVAL;
1835         } else if ((cmd->speed != SPEED_100 &&
1836                     cmd->speed != SPEED_10) ||
1837                    (cmd->duplex != DUPLEX_HALF &&
1838                     cmd->duplex != DUPLEX_FULL)) {
1839                         return -EINVAL;
1840         }
1841
1842         spin_lock_irq(&bp->lock);
1843
1844         if (cmd->autoneg == AUTONEG_ENABLE) {
1845                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1846                                B44_FLAG_100_BASE_T |
1847                                B44_FLAG_FULL_DUPLEX |
1848                                B44_FLAG_ADV_10HALF |
1849                                B44_FLAG_ADV_10FULL |
1850                                B44_FLAG_ADV_100HALF |
1851                                B44_FLAG_ADV_100FULL);
1852                 if (cmd->advertising == 0) {
1853                         bp->flags |= (B44_FLAG_ADV_10HALF |
1854                                       B44_FLAG_ADV_10FULL |
1855                                       B44_FLAG_ADV_100HALF |
1856                                       B44_FLAG_ADV_100FULL);
1857                 } else {
1858                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1859                                 bp->flags |= B44_FLAG_ADV_10HALF;
1860                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1861                                 bp->flags |= B44_FLAG_ADV_10FULL;
1862                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1863                                 bp->flags |= B44_FLAG_ADV_100HALF;
1864                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1865                                 bp->flags |= B44_FLAG_ADV_100FULL;
1866                 }
1867         } else {
1868                 bp->flags |= B44_FLAG_FORCE_LINK;
1869                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1870                 if (cmd->speed == SPEED_100)
1871                         bp->flags |= B44_FLAG_100_BASE_T;
1872                 if (cmd->duplex == DUPLEX_FULL)
1873                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1874         }
1875
1876         if (netif_running(dev))
1877                 b44_setup_phy(bp);
1878
1879         spin_unlock_irq(&bp->lock);
1880
1881         return 0;
1882 }
1883
1884 static void b44_get_ringparam(struct net_device *dev,
1885                               struct ethtool_ringparam *ering)
1886 {
1887         struct b44 *bp = netdev_priv(dev);
1888
1889         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1890         ering->rx_pending = bp->rx_pending;
1891
1892         /* XXX ethtool lacks a tx_max_pending, oops... */
1893 }
1894
1895 static int b44_set_ringparam(struct net_device *dev,
1896                              struct ethtool_ringparam *ering)
1897 {
1898         struct b44 *bp = netdev_priv(dev);
1899
1900         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1901             (ering->rx_mini_pending != 0) ||
1902             (ering->rx_jumbo_pending != 0) ||
1903             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1904                 return -EINVAL;
1905
1906         spin_lock_irq(&bp->lock);
1907
1908         bp->rx_pending = ering->rx_pending;
1909         bp->tx_pending = ering->tx_pending;
1910
1911         b44_halt(bp);
1912         b44_init_rings(bp);
1913         b44_init_hw(bp, B44_FULL_RESET);
1914         netif_wake_queue(bp->dev);
1915         spin_unlock_irq(&bp->lock);
1916
1917         b44_enable_ints(bp);
1918
1919         return 0;
1920 }
1921
1922 static void b44_get_pauseparam(struct net_device *dev,
1923                                 struct ethtool_pauseparam *epause)
1924 {
1925         struct b44 *bp = netdev_priv(dev);
1926
1927         epause->autoneg =
1928                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1929         epause->rx_pause =
1930                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1931         epause->tx_pause =
1932                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1933 }
1934
1935 static int b44_set_pauseparam(struct net_device *dev,
1936                                 struct ethtool_pauseparam *epause)
1937 {
1938         struct b44 *bp = netdev_priv(dev);
1939
1940         spin_lock_irq(&bp->lock);
1941         if (epause->autoneg)
1942                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1943         else
1944                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1945         if (epause->rx_pause)
1946                 bp->flags |= B44_FLAG_RX_PAUSE;
1947         else
1948                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1949         if (epause->tx_pause)
1950                 bp->flags |= B44_FLAG_TX_PAUSE;
1951         else
1952                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1953         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1954                 b44_halt(bp);
1955                 b44_init_rings(bp);
1956                 b44_init_hw(bp, B44_FULL_RESET);
1957         } else {
1958                 __b44_set_flow_ctrl(bp, bp->flags);
1959         }
1960         spin_unlock_irq(&bp->lock);
1961
1962         b44_enable_ints(bp);
1963
1964         return 0;
1965 }
1966
1967 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1968 {
1969         switch(stringset) {
1970         case ETH_SS_STATS:
1971                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1972                 break;
1973         }
1974 }
1975
1976 static int b44_get_stats_count(struct net_device *dev)
1977 {
1978         return ARRAY_SIZE(b44_gstrings);
1979 }
1980
1981 static void b44_get_ethtool_stats(struct net_device *dev,
1982                                   struct ethtool_stats *stats, u64 *data)
1983 {
1984         struct b44 *bp = netdev_priv(dev);
1985         u32 *val = &bp->hw_stats.tx_good_octets;
1986         u32 i;
1987
1988         spin_lock_irq(&bp->lock);
1989
1990         b44_stats_update(bp);
1991
1992         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1993                 *data++ = *val++;
1994
1995         spin_unlock_irq(&bp->lock);
1996 }
1997
1998 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1999 {
2000         struct b44 *bp = netdev_priv(dev);
2001
2002         wol->supported = WAKE_MAGIC;
2003         if (bp->flags & B44_FLAG_WOL_ENABLE)
2004                 wol->wolopts = WAKE_MAGIC;
2005         else
2006                 wol->wolopts = 0;
2007         memset(&wol->sopass, 0, sizeof(wol->sopass));
2008 }
2009
2010 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2011 {
2012         struct b44 *bp = netdev_priv(dev);
2013
2014         spin_lock_irq(&bp->lock);
2015         if (wol->wolopts & WAKE_MAGIC)
2016                 bp->flags |= B44_FLAG_WOL_ENABLE;
2017         else
2018                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2019         spin_unlock_irq(&bp->lock);
2020
2021         return 0;
2022 }
2023
2024 static const struct ethtool_ops b44_ethtool_ops = {
2025         .get_drvinfo            = b44_get_drvinfo,
2026         .get_settings           = b44_get_settings,
2027         .set_settings           = b44_set_settings,
2028         .nway_reset             = b44_nway_reset,
2029         .get_link               = ethtool_op_get_link,
2030         .get_wol                = b44_get_wol,
2031         .set_wol                = b44_set_wol,
2032         .get_ringparam          = b44_get_ringparam,
2033         .set_ringparam          = b44_set_ringparam,
2034         .get_pauseparam         = b44_get_pauseparam,
2035         .set_pauseparam         = b44_set_pauseparam,
2036         .get_msglevel           = b44_get_msglevel,
2037         .set_msglevel           = b44_set_msglevel,
2038         .get_strings            = b44_get_strings,
2039         .get_stats_count        = b44_get_stats_count,
2040         .get_ethtool_stats      = b44_get_ethtool_stats,
2041         .get_perm_addr          = ethtool_op_get_perm_addr,
2042 };
2043
2044 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2045 {
2046         struct mii_ioctl_data *data = if_mii(ifr);
2047         struct b44 *bp = netdev_priv(dev);
2048         int err = -EINVAL;
2049
2050         if (!netif_running(dev))
2051                 goto out;
2052
2053         spin_lock_irq(&bp->lock);
2054         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2055         spin_unlock_irq(&bp->lock);
2056 out:
2057         return err;
2058 }
2059
2060 /* Read 128-bytes of EEPROM. */
2061 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2062 {
2063         long i;
2064         u16 *ptr = (u16 *) data;
2065
2066         for (i = 0; i < 128; i += 2)
2067                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2068
2069         return 0;
2070 }
2071
2072 static int __devinit b44_get_invariants(struct b44 *bp)
2073 {
2074         u8 eeprom[128];
2075         int err;
2076
2077         err = b44_read_eeprom(bp, &eeprom[0]);
2078         if (err)
2079                 goto out;
2080
2081         bp->dev->dev_addr[0] = eeprom[79];
2082         bp->dev->dev_addr[1] = eeprom[78];
2083         bp->dev->dev_addr[2] = eeprom[81];
2084         bp->dev->dev_addr[3] = eeprom[80];
2085         bp->dev->dev_addr[4] = eeprom[83];
2086         bp->dev->dev_addr[5] = eeprom[82];
2087
2088         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2089                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2090                 return -EINVAL;
2091         }
2092
2093         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2094
2095         bp->phy_addr = eeprom[90] & 0x1f;
2096
2097         /* With this, plus the rx_header prepended to the data by the
2098          * hardware, we'll land the ethernet header on a 2-byte boundary.
2099          */
2100         bp->rx_offset = 30;
2101
2102         bp->imask = IMASK_DEF;
2103
2104         bp->core_unit = ssb_core_unit(bp);
2105         bp->dma_offset = SB_PCI_DMA;
2106
2107         /* XXX - really required?
2108            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2109          */
2110
2111         if (ssb_get_core_rev(bp) >= 7)
2112                 bp->flags |= B44_FLAG_B0_ANDLATER;
2113
2114 out:
2115         return err;
2116 }
2117
2118 static int __devinit b44_init_one(struct pci_dev *pdev,
2119                                   const struct pci_device_id *ent)
2120 {
2121         static int b44_version_printed = 0;
2122         unsigned long b44reg_base, b44reg_len;
2123         struct net_device *dev;
2124         struct b44 *bp;
2125         int err, i;
2126
2127         if (b44_version_printed++ == 0)
2128                 printk(KERN_INFO "%s", version);
2129
2130         err = pci_enable_device(pdev);
2131         if (err) {
2132                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2133                        "aborting.\n");
2134                 return err;
2135         }
2136
2137         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2138                 dev_err(&pdev->dev,
2139                         "Cannot find proper PCI device "
2140                        "base address, aborting.\n");
2141                 err = -ENODEV;
2142                 goto err_out_disable_pdev;
2143         }
2144
2145         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2146         if (err) {
2147                 dev_err(&pdev->dev,
2148                         "Cannot obtain PCI resources, aborting.\n");
2149                 goto err_out_disable_pdev;
2150         }
2151
2152         pci_set_master(pdev);
2153
2154         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2155         if (err) {
2156                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2157                 goto err_out_free_res;
2158         }
2159
2160         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2161         if (err) {
2162                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2163                 goto err_out_free_res;
2164         }
2165
2166         b44reg_base = pci_resource_start(pdev, 0);
2167         b44reg_len = pci_resource_len(pdev, 0);
2168
2169         dev = alloc_etherdev(sizeof(*bp));
2170         if (!dev) {
2171                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2172                 err = -ENOMEM;
2173                 goto err_out_free_res;
2174         }
2175
2176         SET_MODULE_OWNER(dev);
2177         SET_NETDEV_DEV(dev,&pdev->dev);
2178
2179         /* No interesting netdevice features in this card... */
2180         dev->features |= 0;
2181
2182         bp = netdev_priv(dev);
2183         bp->pdev = pdev;
2184         bp->dev = dev;
2185
2186         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2187
2188         spin_lock_init(&bp->lock);
2189
2190         bp->regs = ioremap(b44reg_base, b44reg_len);
2191         if (bp->regs == 0UL) {
2192                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2193                 err = -ENOMEM;
2194                 goto err_out_free_dev;
2195         }
2196
2197         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2198         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2199
2200         dev->open = b44_open;
2201         dev->stop = b44_close;
2202         dev->hard_start_xmit = b44_start_xmit;
2203         dev->get_stats = b44_get_stats;
2204         dev->set_multicast_list = b44_set_rx_mode;
2205         dev->set_mac_address = b44_set_mac_addr;
2206         dev->do_ioctl = b44_ioctl;
2207         dev->tx_timeout = b44_tx_timeout;
2208         dev->poll = b44_poll;
2209         dev->weight = 64;
2210         dev->watchdog_timeo = B44_TX_TIMEOUT;
2211 #ifdef CONFIG_NET_POLL_CONTROLLER
2212         dev->poll_controller = b44_poll_controller;
2213 #endif
2214         dev->change_mtu = b44_change_mtu;
2215         dev->irq = pdev->irq;
2216         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2217
2218         netif_carrier_off(dev);
2219
2220         err = b44_get_invariants(bp);
2221         if (err) {
2222                 dev_err(&pdev->dev,
2223                         "Problem fetching invariants of chip, aborting.\n");
2224                 goto err_out_iounmap;
2225         }
2226
2227         bp->mii_if.dev = dev;
2228         bp->mii_if.mdio_read = b44_mii_read;
2229         bp->mii_if.mdio_write = b44_mii_write;
2230         bp->mii_if.phy_id = bp->phy_addr;
2231         bp->mii_if.phy_id_mask = 0x1f;
2232         bp->mii_if.reg_num_mask = 0x1f;
2233
2234         /* By default, advertise all speed/duplex settings. */
2235         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2236                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2237
2238         /* By default, auto-negotiate PAUSE. */
2239         bp->flags |= B44_FLAG_PAUSE_AUTO;
2240
2241         err = register_netdev(dev);
2242         if (err) {
2243                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2244                 goto err_out_iounmap;
2245         }
2246
2247         pci_set_drvdata(pdev, dev);
2248
2249         pci_save_state(bp->pdev);
2250
2251         /* Chip reset provides power to the b44 MAC & PCI cores, which
2252          * is necessary for MAC register access.
2253          */
2254         b44_chip_reset(bp);
2255
2256         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2257         for (i = 0; i < 6; i++)
2258                 printk("%2.2x%c", dev->dev_addr[i],
2259                        i == 5 ? '\n' : ':');
2260
2261         return 0;
2262
2263 err_out_iounmap:
2264         iounmap(bp->regs);
2265
2266 err_out_free_dev:
2267         free_netdev(dev);
2268
2269 err_out_free_res:
2270         pci_release_regions(pdev);
2271
2272 err_out_disable_pdev:
2273         pci_disable_device(pdev);
2274         pci_set_drvdata(pdev, NULL);
2275         return err;
2276 }
2277
2278 static void __devexit b44_remove_one(struct pci_dev *pdev)
2279 {
2280         struct net_device *dev = pci_get_drvdata(pdev);
2281         struct b44 *bp = netdev_priv(dev);
2282
2283         unregister_netdev(dev);
2284         iounmap(bp->regs);
2285         free_netdev(dev);
2286         pci_release_regions(pdev);
2287         pci_disable_device(pdev);
2288         pci_set_drvdata(pdev, NULL);
2289 }
2290
2291 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2292 {
2293         struct net_device *dev = pci_get_drvdata(pdev);
2294         struct b44 *bp = netdev_priv(dev);
2295
2296         if (!netif_running(dev))
2297                  return 0;
2298
2299         del_timer_sync(&bp->timer);
2300
2301         spin_lock_irq(&bp->lock);
2302
2303         b44_halt(bp);
2304         netif_carrier_off(bp->dev);
2305         netif_device_detach(bp->dev);
2306         b44_free_rings(bp);
2307
2308         spin_unlock_irq(&bp->lock);
2309
2310         free_irq(dev->irq, dev);
2311         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2312                 b44_init_hw(bp, B44_PARTIAL_RESET);
2313                 b44_setup_wol(bp);
2314         }
2315         pci_disable_device(pdev);
2316         return 0;
2317 }
2318
2319 static int b44_resume(struct pci_dev *pdev)
2320 {
2321         struct net_device *dev = pci_get_drvdata(pdev);
2322         struct b44 *bp = netdev_priv(dev);
2323         int rc = 0;
2324
2325         pci_restore_state(pdev);
2326         rc = pci_enable_device(pdev);
2327         if (rc) {
2328                 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2329                         dev->name);
2330                 return rc;
2331         }
2332
2333         pci_set_master(pdev);
2334
2335         if (!netif_running(dev))
2336                 return 0;
2337
2338         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2339         if (rc) {
2340                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2341                 pci_disable_device(pdev);
2342                 return rc;
2343         }
2344
2345         spin_lock_irq(&bp->lock);
2346
2347         b44_init_rings(bp);
2348         b44_init_hw(bp, B44_FULL_RESET);
2349         netif_device_attach(bp->dev);
2350         spin_unlock_irq(&bp->lock);
2351
2352         bp->timer.expires = jiffies + HZ;
2353         add_timer(&bp->timer);
2354
2355         b44_enable_ints(bp);
2356         netif_wake_queue(dev);
2357         return 0;
2358 }
2359
2360 static struct pci_driver b44_driver = {
2361         .name           = DRV_MODULE_NAME,
2362         .id_table       = b44_pci_tbl,
2363         .probe          = b44_init_one,
2364         .remove         = __devexit_p(b44_remove_one),
2365         .suspend        = b44_suspend,
2366         .resume         = b44_resume,
2367 };
2368
2369 static int __init b44_init(void)
2370 {
2371         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2372
2373         /* Setup paramaters for syncing RX/TX DMA descriptors */
2374         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2375         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2376
2377         return pci_register_driver(&b44_driver);
2378 }
2379
2380 static void __exit b44_cleanup(void)
2381 {
2382         pci_unregister_driver(&b44_driver);
2383 }
2384
2385 module_init(b44_init);
2386 module_exit(b44_cleanup);
2387