1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.01"
33 #define DRV_MODULE_RELDATE "Jun 16, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
99 static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
113 static void b44_init_hw(struct b44 *, int);
115 static int dma_desc_align_mask;
116 static int dma_desc_sync_size;
118 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
119 #define _B44(x...) # x,
124 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
126 unsigned long offset,
127 enum dma_data_direction dir)
129 dma_sync_single_range_for_device(&pdev->dev, dma_base,
130 offset & dma_desc_align_mask,
131 dma_desc_sync_size, dir);
134 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
136 unsigned long offset,
137 enum dma_data_direction dir)
139 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
140 offset & dma_desc_align_mask,
141 dma_desc_sync_size, dir);
144 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
146 return readl(bp->regs + reg);
149 static inline void bw32(const struct b44 *bp,
150 unsigned long reg, unsigned long val)
152 writel(val, bp->regs + reg);
155 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
156 u32 bit, unsigned long timeout, const int clear)
160 for (i = 0; i < timeout; i++) {
161 u32 val = br32(bp, reg);
163 if (clear && !(val & bit))
165 if (!clear && (val & bit))
170 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
174 (clear ? "clear" : "set"));
180 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
181 * buzz words used on this company's website :-)
183 * All of these routines must be invoked with bp->lock held and
184 * interrupts disabled.
187 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
188 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
190 static u32 ssb_get_core_rev(struct b44 *bp)
192 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
195 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
197 u32 bar_orig, pci_rev, val;
199 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
200 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
201 pci_rev = ssb_get_core_rev(bp);
203 val = br32(bp, B44_SBINTVEC);
205 bw32(bp, B44_SBINTVEC, val);
207 val = br32(bp, SSB_PCI_TRANS_2);
208 val |= SSB_PCI_PREF | SSB_PCI_BURST;
209 bw32(bp, SSB_PCI_TRANS_2, val);
211 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
216 static void ssb_core_disable(struct b44 *bp)
218 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
221 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
222 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
223 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
224 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
225 SBTMSLOW_REJECT | SBTMSLOW_RESET));
226 br32(bp, B44_SBTMSLOW);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
229 br32(bp, B44_SBTMSLOW);
233 static void ssb_core_reset(struct b44 *bp)
237 ssb_core_disable(bp);
238 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
239 br32(bp, B44_SBTMSLOW);
242 /* Clear SERR if set, this is a hw bug workaround. */
243 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
244 bw32(bp, B44_SBTMSHIGH, 0);
246 val = br32(bp, B44_SBIMSTATE);
247 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
248 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
250 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
251 br32(bp, B44_SBTMSLOW);
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
255 br32(bp, B44_SBTMSLOW);
259 static int ssb_core_unit(struct b44 *bp)
262 u32 val = br32(bp, B44_SBADMATCH0);
265 type = val & SBADMATCH0_TYPE_MASK;
268 base = val & SBADMATCH0_BS0_MASK;
272 base = val & SBADMATCH0_BS1_MASK;
277 base = val & SBADMATCH0_BS2_MASK;
284 static int ssb_is_core_up(struct b44 *bp)
286 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
290 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
294 val = ((u32) data[2]) << 24;
295 val |= ((u32) data[3]) << 16;
296 val |= ((u32) data[4]) << 8;
297 val |= ((u32) data[5]) << 0;
298 bw32(bp, B44_CAM_DATA_LO, val);
299 val = (CAM_DATA_HI_VALID |
300 (((u32) data[0]) << 8) |
301 (((u32) data[1]) << 0));
302 bw32(bp, B44_CAM_DATA_HI, val);
303 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
304 (index << CAM_CTRL_INDEX_SHIFT)));
305 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
308 static inline void __b44_disable_ints(struct b44 *bp)
310 bw32(bp, B44_IMASK, 0);
313 static void b44_disable_ints(struct b44 *bp)
315 __b44_disable_ints(bp);
317 /* Flush posted writes. */
321 static void b44_enable_ints(struct b44 *bp)
323 bw32(bp, B44_IMASK, bp->imask);
326 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
330 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
331 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
332 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
333 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
334 (reg << MDIO_DATA_RA_SHIFT) |
335 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
336 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
337 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
342 static int b44_writephy(struct b44 *bp, int reg, u32 val)
344 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
345 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
346 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
347 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
348 (reg << MDIO_DATA_RA_SHIFT) |
349 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
350 (val & MDIO_DATA_DATA)));
351 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
354 /* miilib interface */
355 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
356 * due to code existing before miilib use was added to this driver.
357 * Someone should remove this artificial driver limitation in
358 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
360 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
363 struct b44 *bp = netdev_priv(dev);
364 int rc = b44_readphy(bp, location, &val);
370 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
373 struct b44 *bp = netdev_priv(dev);
374 b44_writephy(bp, location, val);
377 static int b44_phy_reset(struct b44 *bp)
382 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
386 err = b44_readphy(bp, MII_BMCR, &val);
388 if (val & BMCR_RESET) {
389 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
398 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
402 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
403 bp->flags |= pause_flags;
405 val = br32(bp, B44_RXCONFIG);
406 if (pause_flags & B44_FLAG_RX_PAUSE)
407 val |= RXCONFIG_FLOW;
409 val &= ~RXCONFIG_FLOW;
410 bw32(bp, B44_RXCONFIG, val);
412 val = br32(bp, B44_MAC_FLOW);
413 if (pause_flags & B44_FLAG_TX_PAUSE)
414 val |= (MAC_FLOW_PAUSE_ENAB |
415 (0xc0 & MAC_FLOW_RX_HI_WATER));
417 val &= ~MAC_FLOW_PAUSE_ENAB;
418 bw32(bp, B44_MAC_FLOW, val);
421 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
425 /* The driver supports only rx pause by default because
426 the b44 mac tx pause mechanism generates excessive
428 Use ethtool to turn on b44 tx pause if necessary.
430 if ((local & ADVERTISE_PAUSE_CAP) &&
431 (local & ADVERTISE_PAUSE_ASYM)){
432 if ((remote & LPA_PAUSE_ASYM) &&
433 !(remote & LPA_PAUSE_CAP))
434 pause_enab |= B44_FLAG_RX_PAUSE;
437 __b44_set_flow_ctrl(bp, pause_enab);
440 static int b44_setup_phy(struct b44 *bp)
445 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
447 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448 val & MII_ALEDCTRL_ALLMSK)) != 0)
450 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
452 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453 val | MII_TLEDCTRL_ENABLE)) != 0)
456 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457 u32 adv = ADVERTISE_CSMA;
459 if (bp->flags & B44_FLAG_ADV_10HALF)
460 adv |= ADVERTISE_10HALF;
461 if (bp->flags & B44_FLAG_ADV_10FULL)
462 adv |= ADVERTISE_10FULL;
463 if (bp->flags & B44_FLAG_ADV_100HALF)
464 adv |= ADVERTISE_100HALF;
465 if (bp->flags & B44_FLAG_ADV_100FULL)
466 adv |= ADVERTISE_100FULL;
468 if (bp->flags & B44_FLAG_PAUSE_AUTO)
469 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
471 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
473 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474 BMCR_ANRESTART))) != 0)
479 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
481 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482 if (bp->flags & B44_FLAG_100_BASE_T)
483 bmcr |= BMCR_SPEED100;
484 if (bp->flags & B44_FLAG_FULL_DUPLEX)
485 bmcr |= BMCR_FULLDPLX;
486 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
489 /* Since we will not be negotiating there is no safe way
490 * to determine if the link partner supports flow control
491 * or not. So just disable it completely in this case.
493 b44_set_flow_ctrl(bp, 0, 0);
500 static void b44_stats_update(struct b44 *bp)
505 val = &bp->hw_stats.tx_good_octets;
506 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
507 *val++ += br32(bp, reg);
513 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
514 *val++ += br32(bp, reg);
518 static void b44_link_report(struct b44 *bp)
520 if (!netif_carrier_ok(bp->dev)) {
521 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
523 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
525 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
528 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
531 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
532 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
536 static void b44_check_phy(struct b44 *bp)
540 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
541 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
543 if (aux & MII_AUXCTRL_SPEED)
544 bp->flags |= B44_FLAG_100_BASE_T;
546 bp->flags &= ~B44_FLAG_100_BASE_T;
547 if (aux & MII_AUXCTRL_DUPLEX)
548 bp->flags |= B44_FLAG_FULL_DUPLEX;
550 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
552 if (!netif_carrier_ok(bp->dev) &&
553 (bmsr & BMSR_LSTATUS)) {
554 u32 val = br32(bp, B44_TX_CTRL);
555 u32 local_adv, remote_adv;
557 if (bp->flags & B44_FLAG_FULL_DUPLEX)
558 val |= TX_CTRL_DUPLEX;
560 val &= ~TX_CTRL_DUPLEX;
561 bw32(bp, B44_TX_CTRL, val);
563 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
564 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
565 !b44_readphy(bp, MII_LPA, &remote_adv))
566 b44_set_flow_ctrl(bp, local_adv, remote_adv);
569 netif_carrier_on(bp->dev);
571 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
573 netif_carrier_off(bp->dev);
577 if (bmsr & BMSR_RFAULT)
578 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
581 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
586 static void b44_timer(unsigned long __opaque)
588 struct b44 *bp = (struct b44 *) __opaque;
590 spin_lock_irq(&bp->lock);
594 b44_stats_update(bp);
596 spin_unlock_irq(&bp->lock);
598 bp->timer.expires = jiffies + HZ;
599 add_timer(&bp->timer);
602 static void b44_tx(struct b44 *bp)
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
616 pci_unmap_single(bp->pdev,
617 pci_unmap_addr(rp, mapping),
621 dev_kfree_skb_irq(skb);
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
629 bw32(bp, B44_GPTIMER, 0);
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
656 mapping = pci_map_single(bp->pdev, skb->data,
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (dma_mapping_error(mapping) ||
663 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
665 if (!dma_mapping_error(mapping))
666 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
667 dev_kfree_skb_any(skb);
668 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
671 mapping = pci_map_single(bp->pdev, skb->data,
674 if (dma_mapping_error(mapping) ||
675 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
676 if (!dma_mapping_error(mapping))
677 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
678 dev_kfree_skb_any(skb);
684 skb_reserve(skb, bp->rx_offset);
686 rh = (struct rx_header *)
687 (skb->data - bp->rx_offset);
692 pci_unmap_addr_set(map, mapping, mapping);
697 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
698 if (dest_idx == (B44_RX_RING_SIZE - 1))
699 ctrl |= DESC_CTRL_EOT;
701 dp = &bp->rx_ring[dest_idx];
702 dp->ctrl = cpu_to_le32(ctrl);
703 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
705 if (bp->flags & B44_FLAG_RX_RING_HACK)
706 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
707 dest_idx * sizeof(dp),
710 return RX_PKT_BUF_SZ;
713 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
715 struct dma_desc *src_desc, *dest_desc;
716 struct ring_info *src_map, *dest_map;
717 struct rx_header *rh;
721 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
722 dest_desc = &bp->rx_ring[dest_idx];
723 dest_map = &bp->rx_buffers[dest_idx];
724 src_desc = &bp->rx_ring[src_idx];
725 src_map = &bp->rx_buffers[src_idx];
727 dest_map->skb = src_map->skb;
728 rh = (struct rx_header *) src_map->skb->data;
731 pci_unmap_addr_set(dest_map, mapping,
732 pci_unmap_addr(src_map, mapping));
734 if (bp->flags & B44_FLAG_RX_RING_HACK)
735 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
736 src_idx * sizeof(src_desc),
739 ctrl = src_desc->ctrl;
740 if (dest_idx == (B44_RX_RING_SIZE - 1))
741 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
743 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
745 dest_desc->ctrl = ctrl;
746 dest_desc->addr = src_desc->addr;
750 if (bp->flags & B44_FLAG_RX_RING_HACK)
751 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
752 dest_idx * sizeof(dest_desc),
755 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
760 static int b44_rx(struct b44 *bp, int budget)
766 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
767 prod /= sizeof(struct dma_desc);
770 while (cons != prod && budget > 0) {
771 struct ring_info *rp = &bp->rx_buffers[cons];
772 struct sk_buff *skb = rp->skb;
773 dma_addr_t map = pci_unmap_addr(rp, mapping);
774 struct rx_header *rh;
777 pci_dma_sync_single_for_cpu(bp->pdev, map,
780 rh = (struct rx_header *) skb->data;
781 len = cpu_to_le16(rh->len);
782 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
783 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
785 b44_recycle_rx(bp, cons, bp->rx_prod);
787 bp->stats.rx_dropped++;
797 len = cpu_to_le16(rh->len);
798 } while (len == 0 && i++ < 5);
806 if (len > RX_COPY_THRESHOLD) {
808 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
811 pci_unmap_single(bp->pdev, map,
812 skb_size, PCI_DMA_FROMDEVICE);
813 /* Leave out rx_header */
814 skb_put(skb, len+bp->rx_offset);
815 skb_pull(skb,bp->rx_offset);
817 struct sk_buff *copy_skb;
819 b44_recycle_rx(bp, cons, bp->rx_prod);
820 copy_skb = dev_alloc_skb(len + 2);
821 if (copy_skb == NULL)
822 goto drop_it_no_recycle;
824 copy_skb->dev = bp->dev;
825 skb_reserve(copy_skb, 2);
826 skb_put(copy_skb, len);
827 /* DMA sync done above, copy just the actual packet */
828 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
832 skb->ip_summed = CHECKSUM_NONE;
833 skb->protocol = eth_type_trans(skb, bp->dev);
834 netif_receive_skb(skb);
835 bp->dev->last_rx = jiffies;
839 bp->rx_prod = (bp->rx_prod + 1) &
840 (B44_RX_RING_SIZE - 1);
841 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
845 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
850 static int b44_poll(struct net_device *netdev, int *budget)
852 struct b44 *bp = netdev_priv(netdev);
855 spin_lock_irq(&bp->lock);
857 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
858 /* spin_lock(&bp->tx_lock); */
860 /* spin_unlock(&bp->tx_lock); */
862 spin_unlock_irq(&bp->lock);
865 if (bp->istat & ISTAT_RX) {
866 int orig_budget = *budget;
869 if (orig_budget > netdev->quota)
870 orig_budget = netdev->quota;
872 work_done = b44_rx(bp, orig_budget);
874 *budget -= work_done;
875 netdev->quota -= work_done;
877 if (work_done >= orig_budget)
881 if (bp->istat & ISTAT_ERRORS) {
882 spin_lock_irq(&bp->lock);
886 netif_wake_queue(bp->dev);
887 spin_unlock_irq(&bp->lock);
892 netif_rx_complete(netdev);
896 return (done ? 0 : 1);
899 static irqreturn_t b44_interrupt(int irq, void *dev_id)
901 struct net_device *dev = dev_id;
902 struct b44 *bp = netdev_priv(dev);
906 spin_lock(&bp->lock);
908 istat = br32(bp, B44_ISTAT);
909 imask = br32(bp, B44_IMASK);
911 /* The interrupt mask register controls which interrupt bits
912 * will actually raise an interrupt to the CPU when set by hw/firmware,
913 * but doesn't mask off the bits.
919 if (unlikely(!netif_running(dev))) {
920 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
924 if (netif_rx_schedule_prep(dev)) {
925 /* NOTE: These writes are posted by the readback of
926 * the ISTAT register below.
929 __b44_disable_ints(bp);
930 __netif_rx_schedule(dev);
932 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
937 bw32(bp, B44_ISTAT, istat);
940 spin_unlock(&bp->lock);
941 return IRQ_RETVAL(handled);
944 static void b44_tx_timeout(struct net_device *dev)
946 struct b44 *bp = netdev_priv(dev);
948 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
951 spin_lock_irq(&bp->lock);
957 spin_unlock_irq(&bp->lock);
961 netif_wake_queue(dev);
964 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
966 struct b44 *bp = netdev_priv(dev);
967 struct sk_buff *bounce_skb;
968 int rc = NETDEV_TX_OK;
970 u32 len, entry, ctrl;
973 spin_lock_irq(&bp->lock);
975 /* This is a hard error, log it. */
976 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
977 netif_stop_queue(dev);
978 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
983 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
984 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
985 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
986 if (!dma_mapping_error(mapping))
987 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
989 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
994 mapping = pci_map_single(bp->pdev, bounce_skb->data,
995 len, PCI_DMA_TODEVICE);
996 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
997 if (!dma_mapping_error(mapping))
998 pci_unmap_single(bp->pdev, mapping,
999 len, PCI_DMA_TODEVICE);
1000 dev_kfree_skb_any(bounce_skb);
1004 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1005 dev_kfree_skb_any(skb);
1009 entry = bp->tx_prod;
1010 bp->tx_buffers[entry].skb = skb;
1011 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1013 ctrl = (len & DESC_CTRL_LEN);
1014 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1015 if (entry == (B44_TX_RING_SIZE - 1))
1016 ctrl |= DESC_CTRL_EOT;
1018 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1019 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1021 if (bp->flags & B44_FLAG_TX_RING_HACK)
1022 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1023 entry * sizeof(bp->tx_ring[0]),
1026 entry = NEXT_TX(entry);
1028 bp->tx_prod = entry;
1032 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1033 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1034 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1035 if (bp->flags & B44_FLAG_REORDER_BUG)
1036 br32(bp, B44_DMATX_PTR);
1038 if (TX_BUFFS_AVAIL(bp) < 1)
1039 netif_stop_queue(dev);
1041 dev->trans_start = jiffies;
1044 spin_unlock_irq(&bp->lock);
1049 rc = NETDEV_TX_BUSY;
1053 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1055 struct b44 *bp = netdev_priv(dev);
1057 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1060 if (!netif_running(dev)) {
1061 /* We'll just catch it later when the
1068 spin_lock_irq(&bp->lock);
1073 spin_unlock_irq(&bp->lock);
1075 b44_enable_ints(bp);
1080 /* Free up pending packets in all rx/tx rings.
1082 * The chip has been shut down and the driver detached from
1083 * the networking, so no interrupts or new tx packets will
1084 * end up in the driver. bp->lock is not held and we are not
1085 * in an interrupt context and thus may sleep.
1087 static void b44_free_rings(struct b44 *bp)
1089 struct ring_info *rp;
1092 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1093 rp = &bp->rx_buffers[i];
1095 if (rp->skb == NULL)
1097 pci_unmap_single(bp->pdev,
1098 pci_unmap_addr(rp, mapping),
1100 PCI_DMA_FROMDEVICE);
1101 dev_kfree_skb_any(rp->skb);
1105 /* XXX needs changes once NETIF_F_SG is set... */
1106 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1107 rp = &bp->tx_buffers[i];
1109 if (rp->skb == NULL)
1111 pci_unmap_single(bp->pdev,
1112 pci_unmap_addr(rp, mapping),
1115 dev_kfree_skb_any(rp->skb);
1120 /* Initialize tx/rx rings for packet processing.
1122 * The chip has been shut down and the driver detached from
1123 * the networking, so no interrupts or new tx packets will
1124 * end up in the driver.
1126 static void b44_init_rings(struct b44 *bp)
1132 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1133 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1135 if (bp->flags & B44_FLAG_RX_RING_HACK)
1136 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1138 PCI_DMA_BIDIRECTIONAL);
1140 if (bp->flags & B44_FLAG_TX_RING_HACK)
1141 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1145 for (i = 0; i < bp->rx_pending; i++) {
1146 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1152 * Must not be invoked with interrupt sources disabled and
1153 * the hardware shutdown down.
1155 static void b44_free_consistent(struct b44 *bp)
1157 kfree(bp->rx_buffers);
1158 bp->rx_buffers = NULL;
1159 kfree(bp->tx_buffers);
1160 bp->tx_buffers = NULL;
1162 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1163 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1168 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1169 bp->rx_ring, bp->rx_ring_dma);
1171 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1174 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1175 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1180 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1181 bp->tx_ring, bp->tx_ring_dma);
1183 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1188 * Must not be invoked with interrupt sources disabled and
1189 * the hardware shutdown down. Can sleep.
1191 static int b44_alloc_consistent(struct b44 *bp)
1195 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1196 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1197 if (!bp->rx_buffers)
1200 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1201 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1202 if (!bp->tx_buffers)
1205 size = DMA_TABLE_BYTES;
1206 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1208 /* Allocation may have failed due to pci_alloc_consistent
1209 insisting on use of GFP_DMA, which is more restrictive
1210 than necessary... */
1211 struct dma_desc *rx_ring;
1212 dma_addr_t rx_ring_dma;
1214 rx_ring = kzalloc(size, GFP_KERNEL);
1218 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1222 if (dma_mapping_error(rx_ring_dma) ||
1223 rx_ring_dma + size > B44_DMA_MASK) {
1228 bp->rx_ring = rx_ring;
1229 bp->rx_ring_dma = rx_ring_dma;
1230 bp->flags |= B44_FLAG_RX_RING_HACK;
1233 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1235 /* Allocation may have failed due to pci_alloc_consistent
1236 insisting on use of GFP_DMA, which is more restrictive
1237 than necessary... */
1238 struct dma_desc *tx_ring;
1239 dma_addr_t tx_ring_dma;
1241 tx_ring = kzalloc(size, GFP_KERNEL);
1245 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1249 if (dma_mapping_error(tx_ring_dma) ||
1250 tx_ring_dma + size > B44_DMA_MASK) {
1255 bp->tx_ring = tx_ring;
1256 bp->tx_ring_dma = tx_ring_dma;
1257 bp->flags |= B44_FLAG_TX_RING_HACK;
1263 b44_free_consistent(bp);
1267 /* bp->lock is held. */
1268 static void b44_clear_stats(struct b44 *bp)
1272 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1273 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1275 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1279 /* bp->lock is held. */
1280 static void b44_chip_reset(struct b44 *bp)
1282 if (ssb_is_core_up(bp)) {
1283 bw32(bp, B44_RCV_LAZY, 0);
1284 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1285 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1286 bw32(bp, B44_DMATX_CTRL, 0);
1287 bp->tx_prod = bp->tx_cons = 0;
1288 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1289 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1292 bw32(bp, B44_DMARX_CTRL, 0);
1293 bp->rx_prod = bp->rx_cons = 0;
1295 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1302 b44_clear_stats(bp);
1304 /* Make PHY accessible. */
1305 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1306 (0x0d & MDIO_CTRL_MAXF_MASK)));
1307 br32(bp, B44_MDIO_CTRL);
1309 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1310 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1311 br32(bp, B44_ENET_CTRL);
1312 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1314 u32 val = br32(bp, B44_DEVCTRL);
1316 if (val & DEVCTRL_EPR) {
1317 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1318 br32(bp, B44_DEVCTRL);
1321 bp->flags |= B44_FLAG_INTERNAL_PHY;
1325 /* bp->lock is held. */
1326 static void b44_halt(struct b44 *bp)
1328 b44_disable_ints(bp);
1332 /* bp->lock is held. */
1333 static void __b44_set_mac_addr(struct b44 *bp)
1335 bw32(bp, B44_CAM_CTRL, 0);
1336 if (!(bp->dev->flags & IFF_PROMISC)) {
1339 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1340 val = br32(bp, B44_CAM_CTRL);
1341 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1345 static int b44_set_mac_addr(struct net_device *dev, void *p)
1347 struct b44 *bp = netdev_priv(dev);
1348 struct sockaddr *addr = p;
1350 if (netif_running(dev))
1353 if (!is_valid_ether_addr(addr->sa_data))
1356 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1358 spin_lock_irq(&bp->lock);
1359 __b44_set_mac_addr(bp);
1360 spin_unlock_irq(&bp->lock);
1365 /* Called at device open time to get the chip ready for
1366 * packet processing. Invoked with bp->lock held.
1368 static void __b44_set_rx_mode(struct net_device *);
1369 static void b44_init_hw(struct b44 *bp, int full_reset)
1379 /* Enable CRC32, set proper LED modes and power on PHY */
1380 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1381 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1383 /* This sets the MAC address too. */
1384 __b44_set_rx_mode(bp->dev);
1386 /* MTU + eth header + possible VLAN tag + struct rx_header */
1387 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1388 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1390 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1392 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1393 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1394 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1395 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1396 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1398 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1399 bp->rx_prod = bp->rx_pending;
1401 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1403 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1407 val = br32(bp, B44_ENET_CTRL);
1408 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1411 static int b44_open(struct net_device *dev)
1413 struct b44 *bp = netdev_priv(dev);
1416 err = b44_alloc_consistent(bp);
1425 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1426 if (unlikely(err < 0)) {
1429 b44_free_consistent(bp);
1433 init_timer(&bp->timer);
1434 bp->timer.expires = jiffies + HZ;
1435 bp->timer.data = (unsigned long) bp;
1436 bp->timer.function = b44_timer;
1437 add_timer(&bp->timer);
1439 b44_enable_ints(bp);
1440 netif_start_queue(dev);
1446 /*static*/ void b44_dump_state(struct b44 *bp)
1448 u32 val32, val32_2, val32_3, val32_4, val32_5;
1451 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1452 printk("DEBUG: PCI status [%04x] \n", val16);
1457 #ifdef CONFIG_NET_POLL_CONTROLLER
1459 * Polling receive - used by netconsole and other diagnostic tools
1460 * to allow network i/o with interrupts disabled.
1462 static void b44_poll_controller(struct net_device *dev)
1464 disable_irq(dev->irq);
1465 b44_interrupt(dev->irq, dev);
1466 enable_irq(dev->irq);
1470 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1473 u32 *pattern = (u32 *) pp;
1475 for (i = 0; i < bytes; i += sizeof(u32)) {
1476 bw32(bp, B44_FILT_ADDR, table_offset + i);
1477 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1481 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1484 int k, j, len = offset;
1485 int ethaddr_bytes = ETH_ALEN;
1487 memset(ppattern + offset, 0xff, magicsync);
1488 for (j = 0; j < magicsync; j++)
1489 set_bit(len++, (unsigned long *) pmask);
1491 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1492 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1493 ethaddr_bytes = ETH_ALEN;
1495 ethaddr_bytes = B44_PATTERN_SIZE - len;
1496 if (ethaddr_bytes <=0)
1498 for (k = 0; k< ethaddr_bytes; k++) {
1499 ppattern[offset + magicsync +
1500 (j * ETH_ALEN) + k] = macaddr[k];
1502 set_bit(len, (unsigned long *) pmask);
1508 /* Setup magic packet patterns in the b44 WOL
1509 * pattern matching filter.
1511 static void b44_setup_pseudo_magicp(struct b44 *bp)
1515 int plen0, plen1, plen2;
1517 u8 pwol_mask[B44_PMASK_SIZE];
1519 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1520 if (!pwol_pattern) {
1521 printk(KERN_ERR PFX "Memory not available for WOL\n");
1525 /* Ipv4 magic packet pattern - pattern 0.*/
1526 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1527 memset(pwol_mask, 0, B44_PMASK_SIZE);
1528 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1529 B44_ETHIPV4UDP_HLEN);
1531 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1532 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1534 /* Raw ethernet II magic packet pattern - pattern 1 */
1535 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1536 memset(pwol_mask, 0, B44_PMASK_SIZE);
1537 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1540 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1541 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1542 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1543 B44_PMASK_BASE + B44_PMASK_SIZE);
1545 /* Ipv6 magic packet pattern - pattern 2 */
1546 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1547 memset(pwol_mask, 0, B44_PMASK_SIZE);
1548 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1549 B44_ETHIPV6UDP_HLEN);
1551 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1552 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1553 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1554 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1556 kfree(pwol_pattern);
1558 /* set these pattern's lengths: one less than each real length */
1559 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1560 bw32(bp, B44_WKUP_LEN, val);
1562 /* enable wakeup pattern matching */
1563 val = br32(bp, B44_DEVCTRL);
1564 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1568 static void b44_setup_wol(struct b44 *bp)
1573 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1575 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1577 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1579 val = bp->dev->dev_addr[2] << 24 |
1580 bp->dev->dev_addr[3] << 16 |
1581 bp->dev->dev_addr[4] << 8 |
1582 bp->dev->dev_addr[5];
1583 bw32(bp, B44_ADDR_LO, val);
1585 val = bp->dev->dev_addr[0] << 8 |
1586 bp->dev->dev_addr[1];
1587 bw32(bp, B44_ADDR_HI, val);
1589 val = br32(bp, B44_DEVCTRL);
1590 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1593 b44_setup_pseudo_magicp(bp);
1596 val = br32(bp, B44_SBTMSLOW);
1597 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1599 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1600 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1604 static int b44_close(struct net_device *dev)
1606 struct b44 *bp = netdev_priv(dev);
1608 netif_stop_queue(dev);
1610 netif_poll_disable(dev);
1612 del_timer_sync(&bp->timer);
1614 spin_lock_irq(&bp->lock);
1621 netif_carrier_off(dev);
1623 spin_unlock_irq(&bp->lock);
1625 free_irq(dev->irq, dev);
1627 netif_poll_enable(dev);
1629 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1634 b44_free_consistent(bp);
1639 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1641 struct b44 *bp = netdev_priv(dev);
1642 struct net_device_stats *nstat = &bp->stats;
1643 struct b44_hw_stats *hwstat = &bp->hw_stats;
1645 /* Convert HW stats into netdevice stats. */
1646 nstat->rx_packets = hwstat->rx_pkts;
1647 nstat->tx_packets = hwstat->tx_pkts;
1648 nstat->rx_bytes = hwstat->rx_octets;
1649 nstat->tx_bytes = hwstat->tx_octets;
1650 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1651 hwstat->tx_oversize_pkts +
1652 hwstat->tx_underruns +
1653 hwstat->tx_excessive_cols +
1654 hwstat->tx_late_cols);
1655 nstat->multicast = hwstat->tx_multicast_pkts;
1656 nstat->collisions = hwstat->tx_total_cols;
1658 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1659 hwstat->rx_undersize);
1660 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1661 nstat->rx_frame_errors = hwstat->rx_align_errs;
1662 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1663 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1664 hwstat->rx_oversize_pkts +
1665 hwstat->rx_missed_pkts +
1666 hwstat->rx_crc_align_errs +
1667 hwstat->rx_undersize +
1668 hwstat->rx_crc_errs +
1669 hwstat->rx_align_errs +
1670 hwstat->rx_symbol_errs);
1672 nstat->tx_aborted_errors = hwstat->tx_underruns;
1674 /* Carrier lost counter seems to be broken for some devices */
1675 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1681 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1683 struct dev_mc_list *mclist;
1686 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1687 mclist = dev->mc_list;
1688 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1689 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1694 static void __b44_set_rx_mode(struct net_device *dev)
1696 struct b44 *bp = netdev_priv(dev);
1699 val = br32(bp, B44_RXCONFIG);
1700 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1701 if (dev->flags & IFF_PROMISC) {
1702 val |= RXCONFIG_PROMISC;
1703 bw32(bp, B44_RXCONFIG, val);
1705 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1708 __b44_set_mac_addr(bp);
1710 if ((dev->flags & IFF_ALLMULTI) ||
1711 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1712 val |= RXCONFIG_ALLMULTI;
1714 i = __b44_load_mcast(bp, dev);
1717 __b44_cam_write(bp, zero, i);
1719 bw32(bp, B44_RXCONFIG, val);
1720 val = br32(bp, B44_CAM_CTRL);
1721 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1725 static void b44_set_rx_mode(struct net_device *dev)
1727 struct b44 *bp = netdev_priv(dev);
1729 spin_lock_irq(&bp->lock);
1730 __b44_set_rx_mode(dev);
1731 spin_unlock_irq(&bp->lock);
1734 static u32 b44_get_msglevel(struct net_device *dev)
1736 struct b44 *bp = netdev_priv(dev);
1737 return bp->msg_enable;
1740 static void b44_set_msglevel(struct net_device *dev, u32 value)
1742 struct b44 *bp = netdev_priv(dev);
1743 bp->msg_enable = value;
1746 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1748 struct b44 *bp = netdev_priv(dev);
1749 struct pci_dev *pci_dev = bp->pdev;
1751 strcpy (info->driver, DRV_MODULE_NAME);
1752 strcpy (info->version, DRV_MODULE_VERSION);
1753 strcpy (info->bus_info, pci_name(pci_dev));
1756 static int b44_nway_reset(struct net_device *dev)
1758 struct b44 *bp = netdev_priv(dev);
1762 spin_lock_irq(&bp->lock);
1763 b44_readphy(bp, MII_BMCR, &bmcr);
1764 b44_readphy(bp, MII_BMCR, &bmcr);
1766 if (bmcr & BMCR_ANENABLE) {
1767 b44_writephy(bp, MII_BMCR,
1768 bmcr | BMCR_ANRESTART);
1771 spin_unlock_irq(&bp->lock);
1776 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1778 struct b44 *bp = netdev_priv(dev);
1780 cmd->supported = (SUPPORTED_Autoneg);
1781 cmd->supported |= (SUPPORTED_100baseT_Half |
1782 SUPPORTED_100baseT_Full |
1783 SUPPORTED_10baseT_Half |
1784 SUPPORTED_10baseT_Full |
1787 cmd->advertising = 0;
1788 if (bp->flags & B44_FLAG_ADV_10HALF)
1789 cmd->advertising |= ADVERTISED_10baseT_Half;
1790 if (bp->flags & B44_FLAG_ADV_10FULL)
1791 cmd->advertising |= ADVERTISED_10baseT_Full;
1792 if (bp->flags & B44_FLAG_ADV_100HALF)
1793 cmd->advertising |= ADVERTISED_100baseT_Half;
1794 if (bp->flags & B44_FLAG_ADV_100FULL)
1795 cmd->advertising |= ADVERTISED_100baseT_Full;
1796 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1797 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1798 SPEED_100 : SPEED_10;
1799 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1800 DUPLEX_FULL : DUPLEX_HALF;
1802 cmd->phy_address = bp->phy_addr;
1803 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1804 XCVR_INTERNAL : XCVR_EXTERNAL;
1805 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1806 AUTONEG_DISABLE : AUTONEG_ENABLE;
1807 if (cmd->autoneg == AUTONEG_ENABLE)
1808 cmd->advertising |= ADVERTISED_Autoneg;
1809 if (!netif_running(dev)){
1818 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1820 struct b44 *bp = netdev_priv(dev);
1822 /* We do not support gigabit. */
1823 if (cmd->autoneg == AUTONEG_ENABLE) {
1824 if (cmd->advertising &
1825 (ADVERTISED_1000baseT_Half |
1826 ADVERTISED_1000baseT_Full))
1828 } else if ((cmd->speed != SPEED_100 &&
1829 cmd->speed != SPEED_10) ||
1830 (cmd->duplex != DUPLEX_HALF &&
1831 cmd->duplex != DUPLEX_FULL)) {
1835 spin_lock_irq(&bp->lock);
1837 if (cmd->autoneg == AUTONEG_ENABLE) {
1838 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1839 B44_FLAG_100_BASE_T |
1840 B44_FLAG_FULL_DUPLEX |
1841 B44_FLAG_ADV_10HALF |
1842 B44_FLAG_ADV_10FULL |
1843 B44_FLAG_ADV_100HALF |
1844 B44_FLAG_ADV_100FULL);
1845 if (cmd->advertising == 0) {
1846 bp->flags |= (B44_FLAG_ADV_10HALF |
1847 B44_FLAG_ADV_10FULL |
1848 B44_FLAG_ADV_100HALF |
1849 B44_FLAG_ADV_100FULL);
1851 if (cmd->advertising & ADVERTISED_10baseT_Half)
1852 bp->flags |= B44_FLAG_ADV_10HALF;
1853 if (cmd->advertising & ADVERTISED_10baseT_Full)
1854 bp->flags |= B44_FLAG_ADV_10FULL;
1855 if (cmd->advertising & ADVERTISED_100baseT_Half)
1856 bp->flags |= B44_FLAG_ADV_100HALF;
1857 if (cmd->advertising & ADVERTISED_100baseT_Full)
1858 bp->flags |= B44_FLAG_ADV_100FULL;
1861 bp->flags |= B44_FLAG_FORCE_LINK;
1862 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1863 if (cmd->speed == SPEED_100)
1864 bp->flags |= B44_FLAG_100_BASE_T;
1865 if (cmd->duplex == DUPLEX_FULL)
1866 bp->flags |= B44_FLAG_FULL_DUPLEX;
1869 if (netif_running(dev))
1872 spin_unlock_irq(&bp->lock);
1877 static void b44_get_ringparam(struct net_device *dev,
1878 struct ethtool_ringparam *ering)
1880 struct b44 *bp = netdev_priv(dev);
1882 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1883 ering->rx_pending = bp->rx_pending;
1885 /* XXX ethtool lacks a tx_max_pending, oops... */
1888 static int b44_set_ringparam(struct net_device *dev,
1889 struct ethtool_ringparam *ering)
1891 struct b44 *bp = netdev_priv(dev);
1893 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1894 (ering->rx_mini_pending != 0) ||
1895 (ering->rx_jumbo_pending != 0) ||
1896 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1899 spin_lock_irq(&bp->lock);
1901 bp->rx_pending = ering->rx_pending;
1902 bp->tx_pending = ering->tx_pending;
1907 netif_wake_queue(bp->dev);
1908 spin_unlock_irq(&bp->lock);
1910 b44_enable_ints(bp);
1915 static void b44_get_pauseparam(struct net_device *dev,
1916 struct ethtool_pauseparam *epause)
1918 struct b44 *bp = netdev_priv(dev);
1921 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1923 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1925 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1928 static int b44_set_pauseparam(struct net_device *dev,
1929 struct ethtool_pauseparam *epause)
1931 struct b44 *bp = netdev_priv(dev);
1933 spin_lock_irq(&bp->lock);
1934 if (epause->autoneg)
1935 bp->flags |= B44_FLAG_PAUSE_AUTO;
1937 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1938 if (epause->rx_pause)
1939 bp->flags |= B44_FLAG_RX_PAUSE;
1941 bp->flags &= ~B44_FLAG_RX_PAUSE;
1942 if (epause->tx_pause)
1943 bp->flags |= B44_FLAG_TX_PAUSE;
1945 bp->flags &= ~B44_FLAG_TX_PAUSE;
1946 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1951 __b44_set_flow_ctrl(bp, bp->flags);
1953 spin_unlock_irq(&bp->lock);
1955 b44_enable_ints(bp);
1960 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1964 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1969 static int b44_get_stats_count(struct net_device *dev)
1971 return ARRAY_SIZE(b44_gstrings);
1974 static void b44_get_ethtool_stats(struct net_device *dev,
1975 struct ethtool_stats *stats, u64 *data)
1977 struct b44 *bp = netdev_priv(dev);
1978 u32 *val = &bp->hw_stats.tx_good_octets;
1981 spin_lock_irq(&bp->lock);
1983 b44_stats_update(bp);
1985 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1988 spin_unlock_irq(&bp->lock);
1991 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1993 struct b44 *bp = netdev_priv(dev);
1995 wol->supported = WAKE_MAGIC;
1996 if (bp->flags & B44_FLAG_WOL_ENABLE)
1997 wol->wolopts = WAKE_MAGIC;
2000 memset(&wol->sopass, 0, sizeof(wol->sopass));
2003 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2005 struct b44 *bp = netdev_priv(dev);
2007 spin_lock_irq(&bp->lock);
2008 if (wol->wolopts & WAKE_MAGIC)
2009 bp->flags |= B44_FLAG_WOL_ENABLE;
2011 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2012 spin_unlock_irq(&bp->lock);
2017 static const struct ethtool_ops b44_ethtool_ops = {
2018 .get_drvinfo = b44_get_drvinfo,
2019 .get_settings = b44_get_settings,
2020 .set_settings = b44_set_settings,
2021 .nway_reset = b44_nway_reset,
2022 .get_link = ethtool_op_get_link,
2023 .get_wol = b44_get_wol,
2024 .set_wol = b44_set_wol,
2025 .get_ringparam = b44_get_ringparam,
2026 .set_ringparam = b44_set_ringparam,
2027 .get_pauseparam = b44_get_pauseparam,
2028 .set_pauseparam = b44_set_pauseparam,
2029 .get_msglevel = b44_get_msglevel,
2030 .set_msglevel = b44_set_msglevel,
2031 .get_strings = b44_get_strings,
2032 .get_stats_count = b44_get_stats_count,
2033 .get_ethtool_stats = b44_get_ethtool_stats,
2034 .get_perm_addr = ethtool_op_get_perm_addr,
2037 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2039 struct mii_ioctl_data *data = if_mii(ifr);
2040 struct b44 *bp = netdev_priv(dev);
2043 if (!netif_running(dev))
2046 spin_lock_irq(&bp->lock);
2047 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2048 spin_unlock_irq(&bp->lock);
2053 /* Read 128-bytes of EEPROM. */
2054 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2057 u16 *ptr = (u16 *) data;
2059 for (i = 0; i < 128; i += 2)
2060 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2065 static int __devinit b44_get_invariants(struct b44 *bp)
2070 err = b44_read_eeprom(bp, &eeprom[0]);
2074 bp->dev->dev_addr[0] = eeprom[79];
2075 bp->dev->dev_addr[1] = eeprom[78];
2076 bp->dev->dev_addr[2] = eeprom[81];
2077 bp->dev->dev_addr[3] = eeprom[80];
2078 bp->dev->dev_addr[4] = eeprom[83];
2079 bp->dev->dev_addr[5] = eeprom[82];
2081 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2082 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2086 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2088 bp->phy_addr = eeprom[90] & 0x1f;
2090 /* With this, plus the rx_header prepended to the data by the
2091 * hardware, we'll land the ethernet header on a 2-byte boundary.
2095 bp->imask = IMASK_DEF;
2097 bp->core_unit = ssb_core_unit(bp);
2098 bp->dma_offset = SB_PCI_DMA;
2100 /* XXX - really required?
2101 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2104 if (ssb_get_core_rev(bp) >= 7)
2105 bp->flags |= B44_FLAG_B0_ANDLATER;
2111 static int __devinit b44_init_one(struct pci_dev *pdev,
2112 const struct pci_device_id *ent)
2114 static int b44_version_printed = 0;
2115 unsigned long b44reg_base, b44reg_len;
2116 struct net_device *dev;
2120 if (b44_version_printed++ == 0)
2121 printk(KERN_INFO "%s", version);
2123 err = pci_enable_device(pdev);
2125 dev_err(&pdev->dev, "Cannot enable PCI device, "
2130 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2132 "Cannot find proper PCI device "
2133 "base address, aborting.\n");
2135 goto err_out_disable_pdev;
2138 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2141 "Cannot obtain PCI resources, aborting.\n");
2142 goto err_out_disable_pdev;
2145 pci_set_master(pdev);
2147 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2149 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2150 goto err_out_free_res;
2153 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2155 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2156 goto err_out_free_res;
2159 b44reg_base = pci_resource_start(pdev, 0);
2160 b44reg_len = pci_resource_len(pdev, 0);
2162 dev = alloc_etherdev(sizeof(*bp));
2164 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2166 goto err_out_free_res;
2169 SET_MODULE_OWNER(dev);
2170 SET_NETDEV_DEV(dev,&pdev->dev);
2172 /* No interesting netdevice features in this card... */
2175 bp = netdev_priv(dev);
2179 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2181 spin_lock_init(&bp->lock);
2183 bp->regs = ioremap(b44reg_base, b44reg_len);
2184 if (bp->regs == 0UL) {
2185 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2187 goto err_out_free_dev;
2190 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2191 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2193 dev->open = b44_open;
2194 dev->stop = b44_close;
2195 dev->hard_start_xmit = b44_start_xmit;
2196 dev->get_stats = b44_get_stats;
2197 dev->set_multicast_list = b44_set_rx_mode;
2198 dev->set_mac_address = b44_set_mac_addr;
2199 dev->do_ioctl = b44_ioctl;
2200 dev->tx_timeout = b44_tx_timeout;
2201 dev->poll = b44_poll;
2203 dev->watchdog_timeo = B44_TX_TIMEOUT;
2204 #ifdef CONFIG_NET_POLL_CONTROLLER
2205 dev->poll_controller = b44_poll_controller;
2207 dev->change_mtu = b44_change_mtu;
2208 dev->irq = pdev->irq;
2209 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2211 netif_carrier_off(dev);
2213 err = b44_get_invariants(bp);
2216 "Problem fetching invariants of chip, aborting.\n");
2217 goto err_out_iounmap;
2220 bp->mii_if.dev = dev;
2221 bp->mii_if.mdio_read = b44_mii_read;
2222 bp->mii_if.mdio_write = b44_mii_write;
2223 bp->mii_if.phy_id = bp->phy_addr;
2224 bp->mii_if.phy_id_mask = 0x1f;
2225 bp->mii_if.reg_num_mask = 0x1f;
2227 /* By default, advertise all speed/duplex settings. */
2228 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2229 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2231 /* By default, auto-negotiate PAUSE. */
2232 bp->flags |= B44_FLAG_PAUSE_AUTO;
2234 err = register_netdev(dev);
2236 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2237 goto err_out_iounmap;
2240 pci_set_drvdata(pdev, dev);
2242 pci_save_state(bp->pdev);
2244 /* Chip reset provides power to the b44 MAC & PCI cores, which
2245 * is necessary for MAC register access.
2249 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2250 for (i = 0; i < 6; i++)
2251 printk("%2.2x%c", dev->dev_addr[i],
2252 i == 5 ? '\n' : ':');
2263 pci_release_regions(pdev);
2265 err_out_disable_pdev:
2266 pci_disable_device(pdev);
2267 pci_set_drvdata(pdev, NULL);
2271 static void __devexit b44_remove_one(struct pci_dev *pdev)
2273 struct net_device *dev = pci_get_drvdata(pdev);
2274 struct b44 *bp = netdev_priv(dev);
2276 unregister_netdev(dev);
2279 pci_release_regions(pdev);
2280 pci_disable_device(pdev);
2281 pci_set_drvdata(pdev, NULL);
2284 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2286 struct net_device *dev = pci_get_drvdata(pdev);
2287 struct b44 *bp = netdev_priv(dev);
2289 if (!netif_running(dev))
2292 del_timer_sync(&bp->timer);
2294 spin_lock_irq(&bp->lock);
2297 netif_carrier_off(bp->dev);
2298 netif_device_detach(bp->dev);
2301 spin_unlock_irq(&bp->lock);
2303 free_irq(dev->irq, dev);
2304 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2308 pci_disable_device(pdev);
2312 static int b44_resume(struct pci_dev *pdev)
2314 struct net_device *dev = pci_get_drvdata(pdev);
2315 struct b44 *bp = netdev_priv(dev);
2317 pci_restore_state(pdev);
2318 pci_enable_device(pdev);
2319 pci_set_master(pdev);
2321 if (!netif_running(dev))
2324 if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev))
2325 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2327 spin_lock_irq(&bp->lock);
2331 netif_device_attach(bp->dev);
2332 spin_unlock_irq(&bp->lock);
2334 bp->timer.expires = jiffies + HZ;
2335 add_timer(&bp->timer);
2337 b44_enable_ints(bp);
2338 netif_wake_queue(dev);
2342 static struct pci_driver b44_driver = {
2343 .name = DRV_MODULE_NAME,
2344 .id_table = b44_pci_tbl,
2345 .probe = b44_init_one,
2346 .remove = __devexit_p(b44_remove_one),
2347 .suspend = b44_suspend,
2348 .resume = b44_resume,
2351 static int __init b44_init(void)
2353 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2355 /* Setup paramaters for syncing RX/TX DMA descriptors */
2356 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2357 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2359 return pci_register_driver(&b44_driver);
2362 static void __exit b44_cleanup(void)
2364 pci_unregister_driver(&b44_driver);
2367 module_init(b44_init);
2368 module_exit(b44_cleanup);