Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.01"
33 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE        0x400
80 #define B44_PATTERN_SIZE        0x80
81 #define B44_PMASK_BASE          0x600
82 #define B44_PMASK_SIZE          0x10
83 #define B44_MAX_PATTERNS        16
84 #define B44_ETHIPV6UDP_HLEN     62
85 #define B44_ETHIPV4UDP_HLEN     42
86
87 static char version[] __devinitdata =
88         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
94
95 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98
99 static struct pci_device_id b44_pci_tbl[] = {
100         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106         { }     /* terminate list with empty entry */
107 };
108
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
113 static void b44_init_hw(struct b44 *, int);
114
115 static int dma_desc_align_mask;
116 static int dma_desc_sync_size;
117
118 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
119 #define _B44(x...)      # x,
120 B44_STAT_REG_DECLARE
121 #undef _B44
122 };
123
124 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
125                                                 dma_addr_t dma_base,
126                                                 unsigned long offset,
127                                                 enum dma_data_direction dir)
128 {
129         dma_sync_single_range_for_device(&pdev->dev, dma_base,
130                                          offset & dma_desc_align_mask,
131                                          dma_desc_sync_size, dir);
132 }
133
134 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
135                                              dma_addr_t dma_base,
136                                              unsigned long offset,
137                                              enum dma_data_direction dir)
138 {
139         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
140                                       offset & dma_desc_align_mask,
141                                       dma_desc_sync_size, dir);
142 }
143
144 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
145 {
146         return readl(bp->regs + reg);
147 }
148
149 static inline void bw32(const struct b44 *bp,
150                         unsigned long reg, unsigned long val)
151 {
152         writel(val, bp->regs + reg);
153 }
154
155 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
156                         u32 bit, unsigned long timeout, const int clear)
157 {
158         unsigned long i;
159
160         for (i = 0; i < timeout; i++) {
161                 u32 val = br32(bp, reg);
162
163                 if (clear && !(val & bit))
164                         break;
165                 if (!clear && (val & bit))
166                         break;
167                 udelay(10);
168         }
169         if (i == timeout) {
170                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
171                        "%lx to %s.\n",
172                        bp->dev->name,
173                        bit, reg,
174                        (clear ? "clear" : "set"));
175                 return -ENODEV;
176         }
177         return 0;
178 }
179
180 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
181  * buzz words used on this company's website :-)
182  *
183  * All of these routines must be invoked with bp->lock held and
184  * interrupts disabled.
185  */
186
187 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
188 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
189
190 static u32 ssb_get_core_rev(struct b44 *bp)
191 {
192         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
193 }
194
195 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
196 {
197         u32 bar_orig, pci_rev, val;
198
199         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
200         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
201         pci_rev = ssb_get_core_rev(bp);
202
203         val = br32(bp, B44_SBINTVEC);
204         val |= cores;
205         bw32(bp, B44_SBINTVEC, val);
206
207         val = br32(bp, SSB_PCI_TRANS_2);
208         val |= SSB_PCI_PREF | SSB_PCI_BURST;
209         bw32(bp, SSB_PCI_TRANS_2, val);
210
211         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
212
213         return pci_rev;
214 }
215
216 static void ssb_core_disable(struct b44 *bp)
217 {
218         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
219                 return;
220
221         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
222         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
223         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
224         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
225                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
226         br32(bp, B44_SBTMSLOW);
227         udelay(1);
228         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
229         br32(bp, B44_SBTMSLOW);
230         udelay(1);
231 }
232
233 static void ssb_core_reset(struct b44 *bp)
234 {
235         u32 val;
236
237         ssb_core_disable(bp);
238         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
239         br32(bp, B44_SBTMSLOW);
240         udelay(1);
241
242         /* Clear SERR if set, this is a hw bug workaround.  */
243         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
244                 bw32(bp, B44_SBTMSHIGH, 0);
245
246         val = br32(bp, B44_SBIMSTATE);
247         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
248                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
249
250         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
251         br32(bp, B44_SBTMSLOW);
252         udelay(1);
253
254         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
255         br32(bp, B44_SBTMSLOW);
256         udelay(1);
257 }
258
259 static int ssb_core_unit(struct b44 *bp)
260 {
261 #if 0
262         u32 val = br32(bp, B44_SBADMATCH0);
263         u32 base;
264
265         type = val & SBADMATCH0_TYPE_MASK;
266         switch (type) {
267         case 0:
268                 base = val & SBADMATCH0_BS0_MASK;
269                 break;
270
271         case 1:
272                 base = val & SBADMATCH0_BS1_MASK;
273                 break;
274
275         case 2:
276         default:
277                 base = val & SBADMATCH0_BS2_MASK;
278                 break;
279         };
280 #endif
281         return 0;
282 }
283
284 static int ssb_is_core_up(struct b44 *bp)
285 {
286         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
287                 == SBTMSLOW_CLOCK);
288 }
289
290 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
291 {
292         u32 val;
293
294         val  = ((u32) data[2]) << 24;
295         val |= ((u32) data[3]) << 16;
296         val |= ((u32) data[4]) <<  8;
297         val |= ((u32) data[5]) <<  0;
298         bw32(bp, B44_CAM_DATA_LO, val);
299         val = (CAM_DATA_HI_VALID |
300                (((u32) data[0]) << 8) |
301                (((u32) data[1]) << 0));
302         bw32(bp, B44_CAM_DATA_HI, val);
303         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
304                             (index << CAM_CTRL_INDEX_SHIFT)));
305         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
306 }
307
308 static inline void __b44_disable_ints(struct b44 *bp)
309 {
310         bw32(bp, B44_IMASK, 0);
311 }
312
313 static void b44_disable_ints(struct b44 *bp)
314 {
315         __b44_disable_ints(bp);
316
317         /* Flush posted writes. */
318         br32(bp, B44_IMASK);
319 }
320
321 static void b44_enable_ints(struct b44 *bp)
322 {
323         bw32(bp, B44_IMASK, bp->imask);
324 }
325
326 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
327 {
328         int err;
329
330         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
331         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
332                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
333                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
334                              (reg << MDIO_DATA_RA_SHIFT) |
335                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
336         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
337         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
338
339         return err;
340 }
341
342 static int b44_writephy(struct b44 *bp, int reg, u32 val)
343 {
344         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
345         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
346                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
347                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
348                              (reg << MDIO_DATA_RA_SHIFT) |
349                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
350                              (val & MDIO_DATA_DATA)));
351         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
352 }
353
354 /* miilib interface */
355 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
356  * due to code existing before miilib use was added to this driver.
357  * Someone should remove this artificial driver limitation in
358  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
359  */
360 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
361 {
362         u32 val;
363         struct b44 *bp = netdev_priv(dev);
364         int rc = b44_readphy(bp, location, &val);
365         if (rc)
366                 return 0xffffffff;
367         return val;
368 }
369
370 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
371                          int val)
372 {
373         struct b44 *bp = netdev_priv(dev);
374         b44_writephy(bp, location, val);
375 }
376
377 static int b44_phy_reset(struct b44 *bp)
378 {
379         u32 val;
380         int err;
381
382         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
383         if (err)
384                 return err;
385         udelay(100);
386         err = b44_readphy(bp, MII_BMCR, &val);
387         if (!err) {
388                 if (val & BMCR_RESET) {
389                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
390                                bp->dev->name);
391                         err = -ENODEV;
392                 }
393         }
394
395         return 0;
396 }
397
398 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
399 {
400         u32 val;
401
402         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
403         bp->flags |= pause_flags;
404
405         val = br32(bp, B44_RXCONFIG);
406         if (pause_flags & B44_FLAG_RX_PAUSE)
407                 val |= RXCONFIG_FLOW;
408         else
409                 val &= ~RXCONFIG_FLOW;
410         bw32(bp, B44_RXCONFIG, val);
411
412         val = br32(bp, B44_MAC_FLOW);
413         if (pause_flags & B44_FLAG_TX_PAUSE)
414                 val |= (MAC_FLOW_PAUSE_ENAB |
415                         (0xc0 & MAC_FLOW_RX_HI_WATER));
416         else
417                 val &= ~MAC_FLOW_PAUSE_ENAB;
418         bw32(bp, B44_MAC_FLOW, val);
419 }
420
421 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
422 {
423         u32 pause_enab = 0;
424
425         /* The driver supports only rx pause by default because
426            the b44 mac tx pause mechanism generates excessive
427            pause frames.
428            Use ethtool to turn on b44 tx pause if necessary.
429          */
430         if ((local & ADVERTISE_PAUSE_CAP) &&
431             (local & ADVERTISE_PAUSE_ASYM)){
432                 if ((remote & LPA_PAUSE_ASYM) &&
433                     !(remote & LPA_PAUSE_CAP))
434                         pause_enab |= B44_FLAG_RX_PAUSE;
435         }
436
437         __b44_set_flow_ctrl(bp, pause_enab);
438 }
439
440 static int b44_setup_phy(struct b44 *bp)
441 {
442         u32 val;
443         int err;
444
445         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
446                 goto out;
447         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
449                 goto out;
450         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
451                 goto out;
452         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453                                 val | MII_TLEDCTRL_ENABLE)) != 0)
454                 goto out;
455
456         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457                 u32 adv = ADVERTISE_CSMA;
458
459                 if (bp->flags & B44_FLAG_ADV_10HALF)
460                         adv |= ADVERTISE_10HALF;
461                 if (bp->flags & B44_FLAG_ADV_10FULL)
462                         adv |= ADVERTISE_10FULL;
463                 if (bp->flags & B44_FLAG_ADV_100HALF)
464                         adv |= ADVERTISE_100HALF;
465                 if (bp->flags & B44_FLAG_ADV_100FULL)
466                         adv |= ADVERTISE_100FULL;
467
468                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
469                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
470
471                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
472                         goto out;
473                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474                                                        BMCR_ANRESTART))) != 0)
475                         goto out;
476         } else {
477                 u32 bmcr;
478
479                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
480                         goto out;
481                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482                 if (bp->flags & B44_FLAG_100_BASE_T)
483                         bmcr |= BMCR_SPEED100;
484                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
485                         bmcr |= BMCR_FULLDPLX;
486                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
487                         goto out;
488
489                 /* Since we will not be negotiating there is no safe way
490                  * to determine if the link partner supports flow control
491                  * or not.  So just disable it completely in this case.
492                  */
493                 b44_set_flow_ctrl(bp, 0, 0);
494         }
495
496 out:
497         return err;
498 }
499
500 static void b44_stats_update(struct b44 *bp)
501 {
502         unsigned long reg;
503         u32 *val;
504
505         val = &bp->hw_stats.tx_good_octets;
506         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
507                 *val++ += br32(bp, reg);
508         }
509
510         /* Pad */
511         reg += 8*4UL;
512
513         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
514                 *val++ += br32(bp, reg);
515         }
516 }
517
518 static void b44_link_report(struct b44 *bp)
519 {
520         if (!netif_carrier_ok(bp->dev)) {
521                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
522         } else {
523                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
524                        bp->dev->name,
525                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
527
528                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
529                        "%s for RX.\n",
530                        bp->dev->name,
531                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
532                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
533         }
534 }
535
536 static void b44_check_phy(struct b44 *bp)
537 {
538         u32 bmsr, aux;
539
540         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
541             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
542             (bmsr != 0xffff)) {
543                 if (aux & MII_AUXCTRL_SPEED)
544                         bp->flags |= B44_FLAG_100_BASE_T;
545                 else
546                         bp->flags &= ~B44_FLAG_100_BASE_T;
547                 if (aux & MII_AUXCTRL_DUPLEX)
548                         bp->flags |= B44_FLAG_FULL_DUPLEX;
549                 else
550                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
551
552                 if (!netif_carrier_ok(bp->dev) &&
553                     (bmsr & BMSR_LSTATUS)) {
554                         u32 val = br32(bp, B44_TX_CTRL);
555                         u32 local_adv, remote_adv;
556
557                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
558                                 val |= TX_CTRL_DUPLEX;
559                         else
560                                 val &= ~TX_CTRL_DUPLEX;
561                         bw32(bp, B44_TX_CTRL, val);
562
563                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
564                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
565                             !b44_readphy(bp, MII_LPA, &remote_adv))
566                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
567
568                         /* Link now up */
569                         netif_carrier_on(bp->dev);
570                         b44_link_report(bp);
571                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
572                         /* Link now down */
573                         netif_carrier_off(bp->dev);
574                         b44_link_report(bp);
575                 }
576
577                 if (bmsr & BMSR_RFAULT)
578                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
579                                bp->dev->name);
580                 if (bmsr & BMSR_JCD)
581                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
582                                bp->dev->name);
583         }
584 }
585
586 static void b44_timer(unsigned long __opaque)
587 {
588         struct b44 *bp = (struct b44 *) __opaque;
589
590         spin_lock_irq(&bp->lock);
591
592         b44_check_phy(bp);
593
594         b44_stats_update(bp);
595
596         spin_unlock_irq(&bp->lock);
597
598         bp->timer.expires = jiffies + HZ;
599         add_timer(&bp->timer);
600 }
601
602 static void b44_tx(struct b44 *bp)
603 {
604         u32 cur, cons;
605
606         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607         cur /= sizeof(struct dma_desc);
608
609         /* XXX needs updating when NETIF_F_SG is supported */
610         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611                 struct ring_info *rp = &bp->tx_buffers[cons];
612                 struct sk_buff *skb = rp->skb;
613
614                 BUG_ON(skb == NULL);
615
616                 pci_unmap_single(bp->pdev,
617                                  pci_unmap_addr(rp, mapping),
618                                  skb->len,
619                                  PCI_DMA_TODEVICE);
620                 rp->skb = NULL;
621                 dev_kfree_skb_irq(skb);
622         }
623
624         bp->tx_cons = cons;
625         if (netif_queue_stopped(bp->dev) &&
626             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627                 netif_wake_queue(bp->dev);
628
629         bw32(bp, B44_GPTIMER, 0);
630 }
631
632 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
633  * before the DMA address you give it.  So we allocate 30 more bytes
634  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635  * point the chip at 30 bytes past where the rx_header will go.
636  */
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638 {
639         struct dma_desc *dp;
640         struct ring_info *src_map, *map;
641         struct rx_header *rh;
642         struct sk_buff *skb;
643         dma_addr_t mapping;
644         int dest_idx;
645         u32 ctrl;
646
647         src_map = NULL;
648         if (src_idx >= 0)
649                 src_map = &bp->rx_buffers[src_idx];
650         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651         map = &bp->rx_buffers[dest_idx];
652         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
653         if (skb == NULL)
654                 return -ENOMEM;
655
656         mapping = pci_map_single(bp->pdev, skb->data,
657                                  RX_PKT_BUF_SZ,
658                                  PCI_DMA_FROMDEVICE);
659
660         /* Hardware bug work-around, the chip is unable to do PCI DMA
661            to/from anything above 1GB :-( */
662         if (dma_mapping_error(mapping) ||
663                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
664                 /* Sigh... */
665                 if (!dma_mapping_error(mapping))
666                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
667                 dev_kfree_skb_any(skb);
668                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
669                 if (skb == NULL)
670                         return -ENOMEM;
671                 mapping = pci_map_single(bp->pdev, skb->data,
672                                          RX_PKT_BUF_SZ,
673                                          PCI_DMA_FROMDEVICE);
674                 if (dma_mapping_error(mapping) ||
675                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
676                         if (!dma_mapping_error(mapping))
677                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
678                         dev_kfree_skb_any(skb);
679                         return -ENOMEM;
680                 }
681         }
682
683         skb->dev = bp->dev;
684         skb_reserve(skb, bp->rx_offset);
685
686         rh = (struct rx_header *)
687                 (skb->data - bp->rx_offset);
688         rh->len = 0;
689         rh->flags = 0;
690
691         map->skb = skb;
692         pci_unmap_addr_set(map, mapping, mapping);
693
694         if (src_map != NULL)
695                 src_map->skb = NULL;
696
697         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
698         if (dest_idx == (B44_RX_RING_SIZE - 1))
699                 ctrl |= DESC_CTRL_EOT;
700
701         dp = &bp->rx_ring[dest_idx];
702         dp->ctrl = cpu_to_le32(ctrl);
703         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
704
705         if (bp->flags & B44_FLAG_RX_RING_HACK)
706                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
707                                              dest_idx * sizeof(dp),
708                                              DMA_BIDIRECTIONAL);
709
710         return RX_PKT_BUF_SZ;
711 }
712
713 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
714 {
715         struct dma_desc *src_desc, *dest_desc;
716         struct ring_info *src_map, *dest_map;
717         struct rx_header *rh;
718         int dest_idx;
719         u32 ctrl;
720
721         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
722         dest_desc = &bp->rx_ring[dest_idx];
723         dest_map = &bp->rx_buffers[dest_idx];
724         src_desc = &bp->rx_ring[src_idx];
725         src_map = &bp->rx_buffers[src_idx];
726
727         dest_map->skb = src_map->skb;
728         rh = (struct rx_header *) src_map->skb->data;
729         rh->len = 0;
730         rh->flags = 0;
731         pci_unmap_addr_set(dest_map, mapping,
732                            pci_unmap_addr(src_map, mapping));
733
734         if (bp->flags & B44_FLAG_RX_RING_HACK)
735                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
736                                           src_idx * sizeof(src_desc),
737                                           DMA_BIDIRECTIONAL);
738
739         ctrl = src_desc->ctrl;
740         if (dest_idx == (B44_RX_RING_SIZE - 1))
741                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
742         else
743                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
744
745         dest_desc->ctrl = ctrl;
746         dest_desc->addr = src_desc->addr;
747
748         src_map->skb = NULL;
749
750         if (bp->flags & B44_FLAG_RX_RING_HACK)
751                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
752                                              dest_idx * sizeof(dest_desc),
753                                              DMA_BIDIRECTIONAL);
754
755         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
756                                        RX_PKT_BUF_SZ,
757                                        PCI_DMA_FROMDEVICE);
758 }
759
760 static int b44_rx(struct b44 *bp, int budget)
761 {
762         int received;
763         u32 cons, prod;
764
765         received = 0;
766         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
767         prod /= sizeof(struct dma_desc);
768         cons = bp->rx_cons;
769
770         while (cons != prod && budget > 0) {
771                 struct ring_info *rp = &bp->rx_buffers[cons];
772                 struct sk_buff *skb = rp->skb;
773                 dma_addr_t map = pci_unmap_addr(rp, mapping);
774                 struct rx_header *rh;
775                 u16 len;
776
777                 pci_dma_sync_single_for_cpu(bp->pdev, map,
778                                             RX_PKT_BUF_SZ,
779                                             PCI_DMA_FROMDEVICE);
780                 rh = (struct rx_header *) skb->data;
781                 len = cpu_to_le16(rh->len);
782                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
783                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
784                 drop_it:
785                         b44_recycle_rx(bp, cons, bp->rx_prod);
786                 drop_it_no_recycle:
787                         bp->stats.rx_dropped++;
788                         goto next_pkt;
789                 }
790
791                 if (len == 0) {
792                         int i = 0;
793
794                         do {
795                                 udelay(2);
796                                 barrier();
797                                 len = cpu_to_le16(rh->len);
798                         } while (len == 0 && i++ < 5);
799                         if (len == 0)
800                                 goto drop_it;
801                 }
802
803                 /* Omit CRC. */
804                 len -= 4;
805
806                 if (len > RX_COPY_THRESHOLD) {
807                         int skb_size;
808                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
809                         if (skb_size < 0)
810                                 goto drop_it;
811                         pci_unmap_single(bp->pdev, map,
812                                          skb_size, PCI_DMA_FROMDEVICE);
813                         /* Leave out rx_header */
814                         skb_put(skb, len+bp->rx_offset);
815                         skb_pull(skb,bp->rx_offset);
816                 } else {
817                         struct sk_buff *copy_skb;
818
819                         b44_recycle_rx(bp, cons, bp->rx_prod);
820                         copy_skb = dev_alloc_skb(len + 2);
821                         if (copy_skb == NULL)
822                                 goto drop_it_no_recycle;
823
824                         copy_skb->dev = bp->dev;
825                         skb_reserve(copy_skb, 2);
826                         skb_put(copy_skb, len);
827                         /* DMA sync done above, copy just the actual packet */
828                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
829
830                         skb = copy_skb;
831                 }
832                 skb->ip_summed = CHECKSUM_NONE;
833                 skb->protocol = eth_type_trans(skb, bp->dev);
834                 netif_receive_skb(skb);
835                 bp->dev->last_rx = jiffies;
836                 received++;
837                 budget--;
838         next_pkt:
839                 bp->rx_prod = (bp->rx_prod + 1) &
840                         (B44_RX_RING_SIZE - 1);
841                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
842         }
843
844         bp->rx_cons = cons;
845         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
846
847         return received;
848 }
849
850 static int b44_poll(struct net_device *netdev, int *budget)
851 {
852         struct b44 *bp = netdev_priv(netdev);
853         int done;
854
855         spin_lock_irq(&bp->lock);
856
857         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
858                 /* spin_lock(&bp->tx_lock); */
859                 b44_tx(bp);
860                 /* spin_unlock(&bp->tx_lock); */
861         }
862         spin_unlock_irq(&bp->lock);
863
864         done = 1;
865         if (bp->istat & ISTAT_RX) {
866                 int orig_budget = *budget;
867                 int work_done;
868
869                 if (orig_budget > netdev->quota)
870                         orig_budget = netdev->quota;
871
872                 work_done = b44_rx(bp, orig_budget);
873
874                 *budget -= work_done;
875                 netdev->quota -= work_done;
876
877                 if (work_done >= orig_budget)
878                         done = 0;
879         }
880
881         if (bp->istat & ISTAT_ERRORS) {
882                 spin_lock_irq(&bp->lock);
883                 b44_halt(bp);
884                 b44_init_rings(bp);
885                 b44_init_hw(bp, 1);
886                 netif_wake_queue(bp->dev);
887                 spin_unlock_irq(&bp->lock);
888                 done = 1;
889         }
890
891         if (done) {
892                 netif_rx_complete(netdev);
893                 b44_enable_ints(bp);
894         }
895
896         return (done ? 0 : 1);
897 }
898
899 static irqreturn_t b44_interrupt(int irq, void *dev_id)
900 {
901         struct net_device *dev = dev_id;
902         struct b44 *bp = netdev_priv(dev);
903         u32 istat, imask;
904         int handled = 0;
905
906         spin_lock(&bp->lock);
907
908         istat = br32(bp, B44_ISTAT);
909         imask = br32(bp, B44_IMASK);
910
911         /* The interrupt mask register controls which interrupt bits
912          * will actually raise an interrupt to the CPU when set by hw/firmware,
913          * but doesn't mask off the bits.
914          */
915         istat &= imask;
916         if (istat) {
917                 handled = 1;
918
919                 if (unlikely(!netif_running(dev))) {
920                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
921                         goto irq_ack;
922                 }
923
924                 if (netif_rx_schedule_prep(dev)) {
925                         /* NOTE: These writes are posted by the readback of
926                          *       the ISTAT register below.
927                          */
928                         bp->istat = istat;
929                         __b44_disable_ints(bp);
930                         __netif_rx_schedule(dev);
931                 } else {
932                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
933                                dev->name);
934                 }
935
936 irq_ack:
937                 bw32(bp, B44_ISTAT, istat);
938                 br32(bp, B44_ISTAT);
939         }
940         spin_unlock(&bp->lock);
941         return IRQ_RETVAL(handled);
942 }
943
944 static void b44_tx_timeout(struct net_device *dev)
945 {
946         struct b44 *bp = netdev_priv(dev);
947
948         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
949                dev->name);
950
951         spin_lock_irq(&bp->lock);
952
953         b44_halt(bp);
954         b44_init_rings(bp);
955         b44_init_hw(bp, 1);
956
957         spin_unlock_irq(&bp->lock);
958
959         b44_enable_ints(bp);
960
961         netif_wake_queue(dev);
962 }
963
964 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
965 {
966         struct b44 *bp = netdev_priv(dev);
967         struct sk_buff *bounce_skb;
968         int rc = NETDEV_TX_OK;
969         dma_addr_t mapping;
970         u32 len, entry, ctrl;
971
972         len = skb->len;
973         spin_lock_irq(&bp->lock);
974
975         /* This is a hard error, log it. */
976         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
977                 netif_stop_queue(dev);
978                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
979                        dev->name);
980                 goto err_out;
981         }
982
983         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
984         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
985                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
986                 if (!dma_mapping_error(mapping))
987                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
988
989                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
990                                              GFP_ATOMIC|GFP_DMA);
991                 if (!bounce_skb)
992                         goto err_out;
993
994                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
995                                          len, PCI_DMA_TODEVICE);
996                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
997                         if (!dma_mapping_error(mapping))
998                                 pci_unmap_single(bp->pdev, mapping,
999                                          len, PCI_DMA_TODEVICE);
1000                         dev_kfree_skb_any(bounce_skb);
1001                         goto err_out;
1002                 }
1003
1004                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1005                 dev_kfree_skb_any(skb);
1006                 skb = bounce_skb;
1007         }
1008
1009         entry = bp->tx_prod;
1010         bp->tx_buffers[entry].skb = skb;
1011         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1012
1013         ctrl  = (len & DESC_CTRL_LEN);
1014         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1015         if (entry == (B44_TX_RING_SIZE - 1))
1016                 ctrl |= DESC_CTRL_EOT;
1017
1018         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1019         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1020
1021         if (bp->flags & B44_FLAG_TX_RING_HACK)
1022                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1023                                              entry * sizeof(bp->tx_ring[0]),
1024                                              DMA_TO_DEVICE);
1025
1026         entry = NEXT_TX(entry);
1027
1028         bp->tx_prod = entry;
1029
1030         wmb();
1031
1032         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1033         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1034                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1035         if (bp->flags & B44_FLAG_REORDER_BUG)
1036                 br32(bp, B44_DMATX_PTR);
1037
1038         if (TX_BUFFS_AVAIL(bp) < 1)
1039                 netif_stop_queue(dev);
1040
1041         dev->trans_start = jiffies;
1042
1043 out_unlock:
1044         spin_unlock_irq(&bp->lock);
1045
1046         return rc;
1047
1048 err_out:
1049         rc = NETDEV_TX_BUSY;
1050         goto out_unlock;
1051 }
1052
1053 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1054 {
1055         struct b44 *bp = netdev_priv(dev);
1056
1057         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1058                 return -EINVAL;
1059
1060         if (!netif_running(dev)) {
1061                 /* We'll just catch it later when the
1062                  * device is up'd.
1063                  */
1064                 dev->mtu = new_mtu;
1065                 return 0;
1066         }
1067
1068         spin_lock_irq(&bp->lock);
1069         b44_halt(bp);
1070         dev->mtu = new_mtu;
1071         b44_init_rings(bp);
1072         b44_init_hw(bp, 1);
1073         spin_unlock_irq(&bp->lock);
1074
1075         b44_enable_ints(bp);
1076
1077         return 0;
1078 }
1079
1080 /* Free up pending packets in all rx/tx rings.
1081  *
1082  * The chip has been shut down and the driver detached from
1083  * the networking, so no interrupts or new tx packets will
1084  * end up in the driver.  bp->lock is not held and we are not
1085  * in an interrupt context and thus may sleep.
1086  */
1087 static void b44_free_rings(struct b44 *bp)
1088 {
1089         struct ring_info *rp;
1090         int i;
1091
1092         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1093                 rp = &bp->rx_buffers[i];
1094
1095                 if (rp->skb == NULL)
1096                         continue;
1097                 pci_unmap_single(bp->pdev,
1098                                  pci_unmap_addr(rp, mapping),
1099                                  RX_PKT_BUF_SZ,
1100                                  PCI_DMA_FROMDEVICE);
1101                 dev_kfree_skb_any(rp->skb);
1102                 rp->skb = NULL;
1103         }
1104
1105         /* XXX needs changes once NETIF_F_SG is set... */
1106         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1107                 rp = &bp->tx_buffers[i];
1108
1109                 if (rp->skb == NULL)
1110                         continue;
1111                 pci_unmap_single(bp->pdev,
1112                                  pci_unmap_addr(rp, mapping),
1113                                  rp->skb->len,
1114                                  PCI_DMA_TODEVICE);
1115                 dev_kfree_skb_any(rp->skb);
1116                 rp->skb = NULL;
1117         }
1118 }
1119
1120 /* Initialize tx/rx rings for packet processing.
1121  *
1122  * The chip has been shut down and the driver detached from
1123  * the networking, so no interrupts or new tx packets will
1124  * end up in the driver.
1125  */
1126 static void b44_init_rings(struct b44 *bp)
1127 {
1128         int i;
1129
1130         b44_free_rings(bp);
1131
1132         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1133         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1134
1135         if (bp->flags & B44_FLAG_RX_RING_HACK)
1136                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1137                                            DMA_TABLE_BYTES,
1138                                            PCI_DMA_BIDIRECTIONAL);
1139
1140         if (bp->flags & B44_FLAG_TX_RING_HACK)
1141                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1142                                            DMA_TABLE_BYTES,
1143                                            PCI_DMA_TODEVICE);
1144
1145         for (i = 0; i < bp->rx_pending; i++) {
1146                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1147                         break;
1148         }
1149 }
1150
1151 /*
1152  * Must not be invoked with interrupt sources disabled and
1153  * the hardware shutdown down.
1154  */
1155 static void b44_free_consistent(struct b44 *bp)
1156 {
1157         kfree(bp->rx_buffers);
1158         bp->rx_buffers = NULL;
1159         kfree(bp->tx_buffers);
1160         bp->tx_buffers = NULL;
1161         if (bp->rx_ring) {
1162                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1163                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1164                                          DMA_TABLE_BYTES,
1165                                          DMA_BIDIRECTIONAL);
1166                         kfree(bp->rx_ring);
1167                 } else
1168                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1169                                             bp->rx_ring, bp->rx_ring_dma);
1170                 bp->rx_ring = NULL;
1171                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1172         }
1173         if (bp->tx_ring) {
1174                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1175                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1176                                          DMA_TABLE_BYTES,
1177                                          DMA_TO_DEVICE);
1178                         kfree(bp->tx_ring);
1179                 } else
1180                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1181                                             bp->tx_ring, bp->tx_ring_dma);
1182                 bp->tx_ring = NULL;
1183                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1184         }
1185 }
1186
1187 /*
1188  * Must not be invoked with interrupt sources disabled and
1189  * the hardware shutdown down.  Can sleep.
1190  */
1191 static int b44_alloc_consistent(struct b44 *bp)
1192 {
1193         int size;
1194
1195         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1196         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1197         if (!bp->rx_buffers)
1198                 goto out_err;
1199
1200         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1201         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1202         if (!bp->tx_buffers)
1203                 goto out_err;
1204
1205         size = DMA_TABLE_BYTES;
1206         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1207         if (!bp->rx_ring) {
1208                 /* Allocation may have failed due to pci_alloc_consistent
1209                    insisting on use of GFP_DMA, which is more restrictive
1210                    than necessary...  */
1211                 struct dma_desc *rx_ring;
1212                 dma_addr_t rx_ring_dma;
1213
1214                 rx_ring = kzalloc(size, GFP_KERNEL);
1215                 if (!rx_ring)
1216                         goto out_err;
1217
1218                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1219                                              DMA_TABLE_BYTES,
1220                                              DMA_BIDIRECTIONAL);
1221
1222                 if (dma_mapping_error(rx_ring_dma) ||
1223                         rx_ring_dma + size > B44_DMA_MASK) {
1224                         kfree(rx_ring);
1225                         goto out_err;
1226                 }
1227
1228                 bp->rx_ring = rx_ring;
1229                 bp->rx_ring_dma = rx_ring_dma;
1230                 bp->flags |= B44_FLAG_RX_RING_HACK;
1231         }
1232
1233         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1234         if (!bp->tx_ring) {
1235                 /* Allocation may have failed due to pci_alloc_consistent
1236                    insisting on use of GFP_DMA, which is more restrictive
1237                    than necessary...  */
1238                 struct dma_desc *tx_ring;
1239                 dma_addr_t tx_ring_dma;
1240
1241                 tx_ring = kzalloc(size, GFP_KERNEL);
1242                 if (!tx_ring)
1243                         goto out_err;
1244
1245                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1246                                              DMA_TABLE_BYTES,
1247                                              DMA_TO_DEVICE);
1248
1249                 if (dma_mapping_error(tx_ring_dma) ||
1250                         tx_ring_dma + size > B44_DMA_MASK) {
1251                         kfree(tx_ring);
1252                         goto out_err;
1253                 }
1254
1255                 bp->tx_ring = tx_ring;
1256                 bp->tx_ring_dma = tx_ring_dma;
1257                 bp->flags |= B44_FLAG_TX_RING_HACK;
1258         }
1259
1260         return 0;
1261
1262 out_err:
1263         b44_free_consistent(bp);
1264         return -ENOMEM;
1265 }
1266
1267 /* bp->lock is held. */
1268 static void b44_clear_stats(struct b44 *bp)
1269 {
1270         unsigned long reg;
1271
1272         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1273         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1274                 br32(bp, reg);
1275         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1276                 br32(bp, reg);
1277 }
1278
1279 /* bp->lock is held. */
1280 static void b44_chip_reset(struct b44 *bp)
1281 {
1282         if (ssb_is_core_up(bp)) {
1283                 bw32(bp, B44_RCV_LAZY, 0);
1284                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1285                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1286                 bw32(bp, B44_DMATX_CTRL, 0);
1287                 bp->tx_prod = bp->tx_cons = 0;
1288                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1289                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1290                                      100, 0);
1291                 }
1292                 bw32(bp, B44_DMARX_CTRL, 0);
1293                 bp->rx_prod = bp->rx_cons = 0;
1294         } else {
1295                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1296                                    SBINTVEC_ENET0 :
1297                                    SBINTVEC_ENET1));
1298         }
1299
1300         ssb_core_reset(bp);
1301
1302         b44_clear_stats(bp);
1303
1304         /* Make PHY accessible. */
1305         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1306                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1307         br32(bp, B44_MDIO_CTRL);
1308
1309         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1310                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1311                 br32(bp, B44_ENET_CTRL);
1312                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1313         } else {
1314                 u32 val = br32(bp, B44_DEVCTRL);
1315
1316                 if (val & DEVCTRL_EPR) {
1317                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1318                         br32(bp, B44_DEVCTRL);
1319                         udelay(100);
1320                 }
1321                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1322         }
1323 }
1324
1325 /* bp->lock is held. */
1326 static void b44_halt(struct b44 *bp)
1327 {
1328         b44_disable_ints(bp);
1329         b44_chip_reset(bp);
1330 }
1331
1332 /* bp->lock is held. */
1333 static void __b44_set_mac_addr(struct b44 *bp)
1334 {
1335         bw32(bp, B44_CAM_CTRL, 0);
1336         if (!(bp->dev->flags & IFF_PROMISC)) {
1337                 u32 val;
1338
1339                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1340                 val = br32(bp, B44_CAM_CTRL);
1341                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1342         }
1343 }
1344
1345 static int b44_set_mac_addr(struct net_device *dev, void *p)
1346 {
1347         struct b44 *bp = netdev_priv(dev);
1348         struct sockaddr *addr = p;
1349
1350         if (netif_running(dev))
1351                 return -EBUSY;
1352
1353         if (!is_valid_ether_addr(addr->sa_data))
1354                 return -EINVAL;
1355
1356         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1357
1358         spin_lock_irq(&bp->lock);
1359         __b44_set_mac_addr(bp);
1360         spin_unlock_irq(&bp->lock);
1361
1362         return 0;
1363 }
1364
1365 /* Called at device open time to get the chip ready for
1366  * packet processing.  Invoked with bp->lock held.
1367  */
1368 static void __b44_set_rx_mode(struct net_device *);
1369 static void b44_init_hw(struct b44 *bp, int full_reset)
1370 {
1371         u32 val;
1372
1373         b44_chip_reset(bp);
1374         if (full_reset) {
1375                 b44_phy_reset(bp);
1376                 b44_setup_phy(bp);
1377         }
1378
1379         /* Enable CRC32, set proper LED modes and power on PHY */
1380         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1381         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1382
1383         /* This sets the MAC address too.  */
1384         __b44_set_rx_mode(bp->dev);
1385
1386         /* MTU + eth header + possible VLAN tag + struct rx_header */
1387         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1388         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1389
1390         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1391         if (full_reset) {
1392                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1393                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1394                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1395                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1396                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1397
1398                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1399                 bp->rx_prod = bp->rx_pending;
1400
1401                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1402         } else {
1403                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1405         }
1406
1407         val = br32(bp, B44_ENET_CTRL);
1408         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1409 }
1410
1411 static int b44_open(struct net_device *dev)
1412 {
1413         struct b44 *bp = netdev_priv(dev);
1414         int err;
1415
1416         err = b44_alloc_consistent(bp);
1417         if (err)
1418                 goto out;
1419
1420         b44_init_rings(bp);
1421         b44_init_hw(bp, 1);
1422
1423         b44_check_phy(bp);
1424
1425         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1426         if (unlikely(err < 0)) {
1427                 b44_chip_reset(bp);
1428                 b44_free_rings(bp);
1429                 b44_free_consistent(bp);
1430                 goto out;
1431         }
1432
1433         init_timer(&bp->timer);
1434         bp->timer.expires = jiffies + HZ;
1435         bp->timer.data = (unsigned long) bp;
1436         bp->timer.function = b44_timer;
1437         add_timer(&bp->timer);
1438
1439         b44_enable_ints(bp);
1440         netif_start_queue(dev);
1441 out:
1442         return err;
1443 }
1444
1445 #if 0
1446 /*static*/ void b44_dump_state(struct b44 *bp)
1447 {
1448         u32 val32, val32_2, val32_3, val32_4, val32_5;
1449         u16 val16;
1450
1451         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1452         printk("DEBUG: PCI status [%04x] \n", val16);
1453
1454 }
1455 #endif
1456
1457 #ifdef CONFIG_NET_POLL_CONTROLLER
1458 /*
1459  * Polling receive - used by netconsole and other diagnostic tools
1460  * to allow network i/o with interrupts disabled.
1461  */
1462 static void b44_poll_controller(struct net_device *dev)
1463 {
1464         disable_irq(dev->irq);
1465         b44_interrupt(dev->irq, dev);
1466         enable_irq(dev->irq);
1467 }
1468 #endif
1469
1470 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1471 {
1472         u32 i;
1473         u32 *pattern = (u32 *) pp;
1474
1475         for (i = 0; i < bytes; i += sizeof(u32)) {
1476                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1477                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1478         }
1479 }
1480
1481 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1482 {
1483         int magicsync = 6;
1484         int k, j, len = offset;
1485         int ethaddr_bytes = ETH_ALEN;
1486
1487         memset(ppattern + offset, 0xff, magicsync);
1488         for (j = 0; j < magicsync; j++)
1489                 set_bit(len++, (unsigned long *) pmask);
1490
1491         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1492                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1493                         ethaddr_bytes = ETH_ALEN;
1494                 else
1495                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1496                 if (ethaddr_bytes <=0)
1497                         break;
1498                 for (k = 0; k< ethaddr_bytes; k++) {
1499                         ppattern[offset + magicsync +
1500                                 (j * ETH_ALEN) + k] = macaddr[k];
1501                         len++;
1502                         set_bit(len, (unsigned long *) pmask);
1503                 }
1504         }
1505         return len - 1;
1506 }
1507
1508 /* Setup magic packet patterns in the b44 WOL
1509  * pattern matching filter.
1510  */
1511 static void b44_setup_pseudo_magicp(struct b44 *bp)
1512 {
1513
1514         u32 val;
1515         int plen0, plen1, plen2;
1516         u8 *pwol_pattern;
1517         u8 pwol_mask[B44_PMASK_SIZE];
1518
1519         pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1520         if (!pwol_pattern) {
1521                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1522                 return;
1523         }
1524
1525         /* Ipv4 magic packet pattern - pattern 0.*/
1526         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1527         memset(pwol_mask, 0, B44_PMASK_SIZE);
1528         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1529                                   B44_ETHIPV4UDP_HLEN);
1530
1531         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1532         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1533
1534         /* Raw ethernet II magic packet pattern - pattern 1 */
1535         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1536         memset(pwol_mask, 0, B44_PMASK_SIZE);
1537         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1538                                   ETH_HLEN);
1539
1540         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1541                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1542         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1543                        B44_PMASK_BASE + B44_PMASK_SIZE);
1544
1545         /* Ipv6 magic packet pattern - pattern 2 */
1546         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1547         memset(pwol_mask, 0, B44_PMASK_SIZE);
1548         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1549                                   B44_ETHIPV6UDP_HLEN);
1550
1551         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1552                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1553         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1554                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1555
1556         kfree(pwol_pattern);
1557
1558         /* set these pattern's lengths: one less than each real length */
1559         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1560         bw32(bp, B44_WKUP_LEN, val);
1561
1562         /* enable wakeup pattern matching */
1563         val = br32(bp, B44_DEVCTRL);
1564         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1565
1566 }
1567
1568 static void b44_setup_wol(struct b44 *bp)
1569 {
1570         u32 val;
1571         u16 pmval;
1572
1573         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1574
1575         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1576
1577                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1578
1579                 val = bp->dev->dev_addr[2] << 24 |
1580                         bp->dev->dev_addr[3] << 16 |
1581                         bp->dev->dev_addr[4] << 8 |
1582                         bp->dev->dev_addr[5];
1583                 bw32(bp, B44_ADDR_LO, val);
1584
1585                 val = bp->dev->dev_addr[0] << 8 |
1586                         bp->dev->dev_addr[1];
1587                 bw32(bp, B44_ADDR_HI, val);
1588
1589                 val = br32(bp, B44_DEVCTRL);
1590                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1591
1592         } else {
1593                 b44_setup_pseudo_magicp(bp);
1594         }
1595
1596         val = br32(bp, B44_SBTMSLOW);
1597         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1598
1599         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1600         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1601
1602 }
1603
1604 static int b44_close(struct net_device *dev)
1605 {
1606         struct b44 *bp = netdev_priv(dev);
1607
1608         netif_stop_queue(dev);
1609
1610         netif_poll_disable(dev);
1611
1612         del_timer_sync(&bp->timer);
1613
1614         spin_lock_irq(&bp->lock);
1615
1616 #if 0
1617         b44_dump_state(bp);
1618 #endif
1619         b44_halt(bp);
1620         b44_free_rings(bp);
1621         netif_carrier_off(dev);
1622
1623         spin_unlock_irq(&bp->lock);
1624
1625         free_irq(dev->irq, dev);
1626
1627         netif_poll_enable(dev);
1628
1629         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1630                 b44_init_hw(bp, 0);
1631                 b44_setup_wol(bp);
1632         }
1633
1634         b44_free_consistent(bp);
1635
1636         return 0;
1637 }
1638
1639 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1640 {
1641         struct b44 *bp = netdev_priv(dev);
1642         struct net_device_stats *nstat = &bp->stats;
1643         struct b44_hw_stats *hwstat = &bp->hw_stats;
1644
1645         /* Convert HW stats into netdevice stats. */
1646         nstat->rx_packets = hwstat->rx_pkts;
1647         nstat->tx_packets = hwstat->tx_pkts;
1648         nstat->rx_bytes   = hwstat->rx_octets;
1649         nstat->tx_bytes   = hwstat->tx_octets;
1650         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1651                              hwstat->tx_oversize_pkts +
1652                              hwstat->tx_underruns +
1653                              hwstat->tx_excessive_cols +
1654                              hwstat->tx_late_cols);
1655         nstat->multicast  = hwstat->tx_multicast_pkts;
1656         nstat->collisions = hwstat->tx_total_cols;
1657
1658         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1659                                    hwstat->rx_undersize);
1660         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1661         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1662         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1663         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1664                                    hwstat->rx_oversize_pkts +
1665                                    hwstat->rx_missed_pkts +
1666                                    hwstat->rx_crc_align_errs +
1667                                    hwstat->rx_undersize +
1668                                    hwstat->rx_crc_errs +
1669                                    hwstat->rx_align_errs +
1670                                    hwstat->rx_symbol_errs);
1671
1672         nstat->tx_aborted_errors = hwstat->tx_underruns;
1673 #if 0
1674         /* Carrier lost counter seems to be broken for some devices */
1675         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1676 #endif
1677
1678         return nstat;
1679 }
1680
1681 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1682 {
1683         struct dev_mc_list *mclist;
1684         int i, num_ents;
1685
1686         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1687         mclist = dev->mc_list;
1688         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1689                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1690         }
1691         return i+1;
1692 }
1693
1694 static void __b44_set_rx_mode(struct net_device *dev)
1695 {
1696         struct b44 *bp = netdev_priv(dev);
1697         u32 val;
1698
1699         val = br32(bp, B44_RXCONFIG);
1700         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1701         if (dev->flags & IFF_PROMISC) {
1702                 val |= RXCONFIG_PROMISC;
1703                 bw32(bp, B44_RXCONFIG, val);
1704         } else {
1705                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1706                 int i = 0;
1707
1708                 __b44_set_mac_addr(bp);
1709
1710                 if ((dev->flags & IFF_ALLMULTI) ||
1711                     (dev->mc_count > B44_MCAST_TABLE_SIZE))
1712                         val |= RXCONFIG_ALLMULTI;
1713                 else
1714                         i = __b44_load_mcast(bp, dev);
1715
1716                 for (; i < 64; i++)
1717                         __b44_cam_write(bp, zero, i);
1718
1719                 bw32(bp, B44_RXCONFIG, val);
1720                 val = br32(bp, B44_CAM_CTRL);
1721                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1722         }
1723 }
1724
1725 static void b44_set_rx_mode(struct net_device *dev)
1726 {
1727         struct b44 *bp = netdev_priv(dev);
1728
1729         spin_lock_irq(&bp->lock);
1730         __b44_set_rx_mode(dev);
1731         spin_unlock_irq(&bp->lock);
1732 }
1733
1734 static u32 b44_get_msglevel(struct net_device *dev)
1735 {
1736         struct b44 *bp = netdev_priv(dev);
1737         return bp->msg_enable;
1738 }
1739
1740 static void b44_set_msglevel(struct net_device *dev, u32 value)
1741 {
1742         struct b44 *bp = netdev_priv(dev);
1743         bp->msg_enable = value;
1744 }
1745
1746 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1747 {
1748         struct b44 *bp = netdev_priv(dev);
1749         struct pci_dev *pci_dev = bp->pdev;
1750
1751         strcpy (info->driver, DRV_MODULE_NAME);
1752         strcpy (info->version, DRV_MODULE_VERSION);
1753         strcpy (info->bus_info, pci_name(pci_dev));
1754 }
1755
1756 static int b44_nway_reset(struct net_device *dev)
1757 {
1758         struct b44 *bp = netdev_priv(dev);
1759         u32 bmcr;
1760         int r;
1761
1762         spin_lock_irq(&bp->lock);
1763         b44_readphy(bp, MII_BMCR, &bmcr);
1764         b44_readphy(bp, MII_BMCR, &bmcr);
1765         r = -EINVAL;
1766         if (bmcr & BMCR_ANENABLE) {
1767                 b44_writephy(bp, MII_BMCR,
1768                              bmcr | BMCR_ANRESTART);
1769                 r = 0;
1770         }
1771         spin_unlock_irq(&bp->lock);
1772
1773         return r;
1774 }
1775
1776 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1777 {
1778         struct b44 *bp = netdev_priv(dev);
1779
1780         cmd->supported = (SUPPORTED_Autoneg);
1781         cmd->supported |= (SUPPORTED_100baseT_Half |
1782                           SUPPORTED_100baseT_Full |
1783                           SUPPORTED_10baseT_Half |
1784                           SUPPORTED_10baseT_Full |
1785                           SUPPORTED_MII);
1786
1787         cmd->advertising = 0;
1788         if (bp->flags & B44_FLAG_ADV_10HALF)
1789                 cmd->advertising |= ADVERTISED_10baseT_Half;
1790         if (bp->flags & B44_FLAG_ADV_10FULL)
1791                 cmd->advertising |= ADVERTISED_10baseT_Full;
1792         if (bp->flags & B44_FLAG_ADV_100HALF)
1793                 cmd->advertising |= ADVERTISED_100baseT_Half;
1794         if (bp->flags & B44_FLAG_ADV_100FULL)
1795                 cmd->advertising |= ADVERTISED_100baseT_Full;
1796         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1797         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1798                 SPEED_100 : SPEED_10;
1799         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1800                 DUPLEX_FULL : DUPLEX_HALF;
1801         cmd->port = 0;
1802         cmd->phy_address = bp->phy_addr;
1803         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1804                 XCVR_INTERNAL : XCVR_EXTERNAL;
1805         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1806                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1807         if (cmd->autoneg == AUTONEG_ENABLE)
1808                 cmd->advertising |= ADVERTISED_Autoneg;
1809         if (!netif_running(dev)){
1810                 cmd->speed = 0;
1811                 cmd->duplex = 0xff;
1812         }
1813         cmd->maxtxpkt = 0;
1814         cmd->maxrxpkt = 0;
1815         return 0;
1816 }
1817
1818 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1819 {
1820         struct b44 *bp = netdev_priv(dev);
1821
1822         /* We do not support gigabit. */
1823         if (cmd->autoneg == AUTONEG_ENABLE) {
1824                 if (cmd->advertising &
1825                     (ADVERTISED_1000baseT_Half |
1826                      ADVERTISED_1000baseT_Full))
1827                         return -EINVAL;
1828         } else if ((cmd->speed != SPEED_100 &&
1829                     cmd->speed != SPEED_10) ||
1830                    (cmd->duplex != DUPLEX_HALF &&
1831                     cmd->duplex != DUPLEX_FULL)) {
1832                         return -EINVAL;
1833         }
1834
1835         spin_lock_irq(&bp->lock);
1836
1837         if (cmd->autoneg == AUTONEG_ENABLE) {
1838                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1839                                B44_FLAG_100_BASE_T |
1840                                B44_FLAG_FULL_DUPLEX |
1841                                B44_FLAG_ADV_10HALF |
1842                                B44_FLAG_ADV_10FULL |
1843                                B44_FLAG_ADV_100HALF |
1844                                B44_FLAG_ADV_100FULL);
1845                 if (cmd->advertising == 0) {
1846                         bp->flags |= (B44_FLAG_ADV_10HALF |
1847                                       B44_FLAG_ADV_10FULL |
1848                                       B44_FLAG_ADV_100HALF |
1849                                       B44_FLAG_ADV_100FULL);
1850                 } else {
1851                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1852                                 bp->flags |= B44_FLAG_ADV_10HALF;
1853                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1854                                 bp->flags |= B44_FLAG_ADV_10FULL;
1855                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1856                                 bp->flags |= B44_FLAG_ADV_100HALF;
1857                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1858                                 bp->flags |= B44_FLAG_ADV_100FULL;
1859                 }
1860         } else {
1861                 bp->flags |= B44_FLAG_FORCE_LINK;
1862                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1863                 if (cmd->speed == SPEED_100)
1864                         bp->flags |= B44_FLAG_100_BASE_T;
1865                 if (cmd->duplex == DUPLEX_FULL)
1866                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1867         }
1868
1869         if (netif_running(dev))
1870                 b44_setup_phy(bp);
1871
1872         spin_unlock_irq(&bp->lock);
1873
1874         return 0;
1875 }
1876
1877 static void b44_get_ringparam(struct net_device *dev,
1878                               struct ethtool_ringparam *ering)
1879 {
1880         struct b44 *bp = netdev_priv(dev);
1881
1882         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1883         ering->rx_pending = bp->rx_pending;
1884
1885         /* XXX ethtool lacks a tx_max_pending, oops... */
1886 }
1887
1888 static int b44_set_ringparam(struct net_device *dev,
1889                              struct ethtool_ringparam *ering)
1890 {
1891         struct b44 *bp = netdev_priv(dev);
1892
1893         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1894             (ering->rx_mini_pending != 0) ||
1895             (ering->rx_jumbo_pending != 0) ||
1896             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1897                 return -EINVAL;
1898
1899         spin_lock_irq(&bp->lock);
1900
1901         bp->rx_pending = ering->rx_pending;
1902         bp->tx_pending = ering->tx_pending;
1903
1904         b44_halt(bp);
1905         b44_init_rings(bp);
1906         b44_init_hw(bp, 1);
1907         netif_wake_queue(bp->dev);
1908         spin_unlock_irq(&bp->lock);
1909
1910         b44_enable_ints(bp);
1911
1912         return 0;
1913 }
1914
1915 static void b44_get_pauseparam(struct net_device *dev,
1916                                 struct ethtool_pauseparam *epause)
1917 {
1918         struct b44 *bp = netdev_priv(dev);
1919
1920         epause->autoneg =
1921                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1922         epause->rx_pause =
1923                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1924         epause->tx_pause =
1925                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1926 }
1927
1928 static int b44_set_pauseparam(struct net_device *dev,
1929                                 struct ethtool_pauseparam *epause)
1930 {
1931         struct b44 *bp = netdev_priv(dev);
1932
1933         spin_lock_irq(&bp->lock);
1934         if (epause->autoneg)
1935                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1936         else
1937                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1938         if (epause->rx_pause)
1939                 bp->flags |= B44_FLAG_RX_PAUSE;
1940         else
1941                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1942         if (epause->tx_pause)
1943                 bp->flags |= B44_FLAG_TX_PAUSE;
1944         else
1945                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1946         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1947                 b44_halt(bp);
1948                 b44_init_rings(bp);
1949                 b44_init_hw(bp, 1);
1950         } else {
1951                 __b44_set_flow_ctrl(bp, bp->flags);
1952         }
1953         spin_unlock_irq(&bp->lock);
1954
1955         b44_enable_ints(bp);
1956
1957         return 0;
1958 }
1959
1960 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1961 {
1962         switch(stringset) {
1963         case ETH_SS_STATS:
1964                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1965                 break;
1966         }
1967 }
1968
1969 static int b44_get_stats_count(struct net_device *dev)
1970 {
1971         return ARRAY_SIZE(b44_gstrings);
1972 }
1973
1974 static void b44_get_ethtool_stats(struct net_device *dev,
1975                                   struct ethtool_stats *stats, u64 *data)
1976 {
1977         struct b44 *bp = netdev_priv(dev);
1978         u32 *val = &bp->hw_stats.tx_good_octets;
1979         u32 i;
1980
1981         spin_lock_irq(&bp->lock);
1982
1983         b44_stats_update(bp);
1984
1985         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1986                 *data++ = *val++;
1987
1988         spin_unlock_irq(&bp->lock);
1989 }
1990
1991 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1992 {
1993         struct b44 *bp = netdev_priv(dev);
1994
1995         wol->supported = WAKE_MAGIC;
1996         if (bp->flags & B44_FLAG_WOL_ENABLE)
1997                 wol->wolopts = WAKE_MAGIC;
1998         else
1999                 wol->wolopts = 0;
2000         memset(&wol->sopass, 0, sizeof(wol->sopass));
2001 }
2002
2003 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2004 {
2005         struct b44 *bp = netdev_priv(dev);
2006
2007         spin_lock_irq(&bp->lock);
2008         if (wol->wolopts & WAKE_MAGIC)
2009                 bp->flags |= B44_FLAG_WOL_ENABLE;
2010         else
2011                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2012         spin_unlock_irq(&bp->lock);
2013
2014         return 0;
2015 }
2016
2017 static const struct ethtool_ops b44_ethtool_ops = {
2018         .get_drvinfo            = b44_get_drvinfo,
2019         .get_settings           = b44_get_settings,
2020         .set_settings           = b44_set_settings,
2021         .nway_reset             = b44_nway_reset,
2022         .get_link               = ethtool_op_get_link,
2023         .get_wol                = b44_get_wol,
2024         .set_wol                = b44_set_wol,
2025         .get_ringparam          = b44_get_ringparam,
2026         .set_ringparam          = b44_set_ringparam,
2027         .get_pauseparam         = b44_get_pauseparam,
2028         .set_pauseparam         = b44_set_pauseparam,
2029         .get_msglevel           = b44_get_msglevel,
2030         .set_msglevel           = b44_set_msglevel,
2031         .get_strings            = b44_get_strings,
2032         .get_stats_count        = b44_get_stats_count,
2033         .get_ethtool_stats      = b44_get_ethtool_stats,
2034         .get_perm_addr          = ethtool_op_get_perm_addr,
2035 };
2036
2037 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2038 {
2039         struct mii_ioctl_data *data = if_mii(ifr);
2040         struct b44 *bp = netdev_priv(dev);
2041         int err = -EINVAL;
2042
2043         if (!netif_running(dev))
2044                 goto out;
2045
2046         spin_lock_irq(&bp->lock);
2047         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2048         spin_unlock_irq(&bp->lock);
2049 out:
2050         return err;
2051 }
2052
2053 /* Read 128-bytes of EEPROM. */
2054 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2055 {
2056         long i;
2057         u16 *ptr = (u16 *) data;
2058
2059         for (i = 0; i < 128; i += 2)
2060                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2061
2062         return 0;
2063 }
2064
2065 static int __devinit b44_get_invariants(struct b44 *bp)
2066 {
2067         u8 eeprom[128];
2068         int err;
2069
2070         err = b44_read_eeprom(bp, &eeprom[0]);
2071         if (err)
2072                 goto out;
2073
2074         bp->dev->dev_addr[0] = eeprom[79];
2075         bp->dev->dev_addr[1] = eeprom[78];
2076         bp->dev->dev_addr[2] = eeprom[81];
2077         bp->dev->dev_addr[3] = eeprom[80];
2078         bp->dev->dev_addr[4] = eeprom[83];
2079         bp->dev->dev_addr[5] = eeprom[82];
2080
2081         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2082                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2083                 return -EINVAL;
2084         }
2085
2086         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2087
2088         bp->phy_addr = eeprom[90] & 0x1f;
2089
2090         /* With this, plus the rx_header prepended to the data by the
2091          * hardware, we'll land the ethernet header on a 2-byte boundary.
2092          */
2093         bp->rx_offset = 30;
2094
2095         bp->imask = IMASK_DEF;
2096
2097         bp->core_unit = ssb_core_unit(bp);
2098         bp->dma_offset = SB_PCI_DMA;
2099
2100         /* XXX - really required?
2101            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2102          */
2103
2104         if (ssb_get_core_rev(bp) >= 7)
2105                 bp->flags |= B44_FLAG_B0_ANDLATER;
2106
2107 out:
2108         return err;
2109 }
2110
2111 static int __devinit b44_init_one(struct pci_dev *pdev,
2112                                   const struct pci_device_id *ent)
2113 {
2114         static int b44_version_printed = 0;
2115         unsigned long b44reg_base, b44reg_len;
2116         struct net_device *dev;
2117         struct b44 *bp;
2118         int err, i;
2119
2120         if (b44_version_printed++ == 0)
2121                 printk(KERN_INFO "%s", version);
2122
2123         err = pci_enable_device(pdev);
2124         if (err) {
2125                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2126                        "aborting.\n");
2127                 return err;
2128         }
2129
2130         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2131                 dev_err(&pdev->dev,
2132                         "Cannot find proper PCI device "
2133                        "base address, aborting.\n");
2134                 err = -ENODEV;
2135                 goto err_out_disable_pdev;
2136         }
2137
2138         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2139         if (err) {
2140                 dev_err(&pdev->dev,
2141                         "Cannot obtain PCI resources, aborting.\n");
2142                 goto err_out_disable_pdev;
2143         }
2144
2145         pci_set_master(pdev);
2146
2147         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2148         if (err) {
2149                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2150                 goto err_out_free_res;
2151         }
2152
2153         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2154         if (err) {
2155                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2156                 goto err_out_free_res;
2157         }
2158
2159         b44reg_base = pci_resource_start(pdev, 0);
2160         b44reg_len = pci_resource_len(pdev, 0);
2161
2162         dev = alloc_etherdev(sizeof(*bp));
2163         if (!dev) {
2164                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2165                 err = -ENOMEM;
2166                 goto err_out_free_res;
2167         }
2168
2169         SET_MODULE_OWNER(dev);
2170         SET_NETDEV_DEV(dev,&pdev->dev);
2171
2172         /* No interesting netdevice features in this card... */
2173         dev->features |= 0;
2174
2175         bp = netdev_priv(dev);
2176         bp->pdev = pdev;
2177         bp->dev = dev;
2178
2179         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2180
2181         spin_lock_init(&bp->lock);
2182
2183         bp->regs = ioremap(b44reg_base, b44reg_len);
2184         if (bp->regs == 0UL) {
2185                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2186                 err = -ENOMEM;
2187                 goto err_out_free_dev;
2188         }
2189
2190         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2191         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2192
2193         dev->open = b44_open;
2194         dev->stop = b44_close;
2195         dev->hard_start_xmit = b44_start_xmit;
2196         dev->get_stats = b44_get_stats;
2197         dev->set_multicast_list = b44_set_rx_mode;
2198         dev->set_mac_address = b44_set_mac_addr;
2199         dev->do_ioctl = b44_ioctl;
2200         dev->tx_timeout = b44_tx_timeout;
2201         dev->poll = b44_poll;
2202         dev->weight = 64;
2203         dev->watchdog_timeo = B44_TX_TIMEOUT;
2204 #ifdef CONFIG_NET_POLL_CONTROLLER
2205         dev->poll_controller = b44_poll_controller;
2206 #endif
2207         dev->change_mtu = b44_change_mtu;
2208         dev->irq = pdev->irq;
2209         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2210
2211         netif_carrier_off(dev);
2212
2213         err = b44_get_invariants(bp);
2214         if (err) {
2215                 dev_err(&pdev->dev,
2216                         "Problem fetching invariants of chip, aborting.\n");
2217                 goto err_out_iounmap;
2218         }
2219
2220         bp->mii_if.dev = dev;
2221         bp->mii_if.mdio_read = b44_mii_read;
2222         bp->mii_if.mdio_write = b44_mii_write;
2223         bp->mii_if.phy_id = bp->phy_addr;
2224         bp->mii_if.phy_id_mask = 0x1f;
2225         bp->mii_if.reg_num_mask = 0x1f;
2226
2227         /* By default, advertise all speed/duplex settings. */
2228         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2229                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2230
2231         /* By default, auto-negotiate PAUSE. */
2232         bp->flags |= B44_FLAG_PAUSE_AUTO;
2233
2234         err = register_netdev(dev);
2235         if (err) {
2236                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2237                 goto err_out_iounmap;
2238         }
2239
2240         pci_set_drvdata(pdev, dev);
2241
2242         pci_save_state(bp->pdev);
2243
2244         /* Chip reset provides power to the b44 MAC & PCI cores, which
2245          * is necessary for MAC register access.
2246          */
2247         b44_chip_reset(bp);
2248
2249         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2250         for (i = 0; i < 6; i++)
2251                 printk("%2.2x%c", dev->dev_addr[i],
2252                        i == 5 ? '\n' : ':');
2253
2254         return 0;
2255
2256 err_out_iounmap:
2257         iounmap(bp->regs);
2258
2259 err_out_free_dev:
2260         free_netdev(dev);
2261
2262 err_out_free_res:
2263         pci_release_regions(pdev);
2264
2265 err_out_disable_pdev:
2266         pci_disable_device(pdev);
2267         pci_set_drvdata(pdev, NULL);
2268         return err;
2269 }
2270
2271 static void __devexit b44_remove_one(struct pci_dev *pdev)
2272 {
2273         struct net_device *dev = pci_get_drvdata(pdev);
2274         struct b44 *bp = netdev_priv(dev);
2275
2276         unregister_netdev(dev);
2277         iounmap(bp->regs);
2278         free_netdev(dev);
2279         pci_release_regions(pdev);
2280         pci_disable_device(pdev);
2281         pci_set_drvdata(pdev, NULL);
2282 }
2283
2284 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2285 {
2286         struct net_device *dev = pci_get_drvdata(pdev);
2287         struct b44 *bp = netdev_priv(dev);
2288
2289         if (!netif_running(dev))
2290                  return 0;
2291
2292         del_timer_sync(&bp->timer);
2293
2294         spin_lock_irq(&bp->lock);
2295
2296         b44_halt(bp);
2297         netif_carrier_off(bp->dev);
2298         netif_device_detach(bp->dev);
2299         b44_free_rings(bp);
2300
2301         spin_unlock_irq(&bp->lock);
2302
2303         free_irq(dev->irq, dev);
2304         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2305                 b44_init_hw(bp, 0);
2306                 b44_setup_wol(bp);
2307         }
2308         pci_disable_device(pdev);
2309         return 0;
2310 }
2311
2312 static int b44_resume(struct pci_dev *pdev)
2313 {
2314         struct net_device *dev = pci_get_drvdata(pdev);
2315         struct b44 *bp = netdev_priv(dev);
2316
2317         pci_restore_state(pdev);
2318         pci_enable_device(pdev);
2319         pci_set_master(pdev);
2320
2321         if (!netif_running(dev))
2322                 return 0;
2323
2324         if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev))
2325                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2326
2327         spin_lock_irq(&bp->lock);
2328
2329         b44_init_rings(bp);
2330         b44_init_hw(bp, 1);
2331         netif_device_attach(bp->dev);
2332         spin_unlock_irq(&bp->lock);
2333
2334         bp->timer.expires = jiffies + HZ;
2335         add_timer(&bp->timer);
2336
2337         b44_enable_ints(bp);
2338         netif_wake_queue(dev);
2339         return 0;
2340 }
2341
2342 static struct pci_driver b44_driver = {
2343         .name           = DRV_MODULE_NAME,
2344         .id_table       = b44_pci_tbl,
2345         .probe          = b44_init_one,
2346         .remove         = __devexit_p(b44_remove_one),
2347         .suspend        = b44_suspend,
2348         .resume         = b44_resume,
2349 };
2350
2351 static int __init b44_init(void)
2352 {
2353         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2354
2355         /* Setup paramaters for syncing RX/TX DMA descriptors */
2356         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2357         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2358
2359         return pci_register_driver(&b44_driver);
2360 }
2361
2362 static void __exit b44_cleanup(void)
2363 {
2364         pci_unregister_driver(&b44_driver);
2365 }
2366
2367 module_init(b44_init);
2368 module_exit(b44_cleanup);
2369