1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.01"
33 #define DRV_MODULE_RELDATE "Jun 16, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
99 static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
113 static void b44_init_hw(struct b44 *, int);
115 static int dma_desc_align_mask;
116 static int dma_desc_sync_size;
118 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
119 #define _B44(x...) # x,
124 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
126 unsigned long offset,
127 enum dma_data_direction dir)
129 dma_sync_single_range_for_device(&pdev->dev, dma_base,
130 offset & dma_desc_align_mask,
131 dma_desc_sync_size, dir);
134 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
136 unsigned long offset,
137 enum dma_data_direction dir)
139 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
140 offset & dma_desc_align_mask,
141 dma_desc_sync_size, dir);
144 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
146 return readl(bp->regs + reg);
149 static inline void bw32(const struct b44 *bp,
150 unsigned long reg, unsigned long val)
152 writel(val, bp->regs + reg);
155 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
156 u32 bit, unsigned long timeout, const int clear)
160 for (i = 0; i < timeout; i++) {
161 u32 val = br32(bp, reg);
163 if (clear && !(val & bit))
165 if (!clear && (val & bit))
170 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
174 (clear ? "clear" : "set"));
180 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
181 * buzz words used on this company's website :-)
183 * All of these routines must be invoked with bp->lock held and
184 * interrupts disabled.
187 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
188 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
190 static u32 ssb_get_core_rev(struct b44 *bp)
192 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
195 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
197 u32 bar_orig, pci_rev, val;
199 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
200 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
201 pci_rev = ssb_get_core_rev(bp);
203 val = br32(bp, B44_SBINTVEC);
205 bw32(bp, B44_SBINTVEC, val);
207 val = br32(bp, SSB_PCI_TRANS_2);
208 val |= SSB_PCI_PREF | SSB_PCI_BURST;
209 bw32(bp, SSB_PCI_TRANS_2, val);
211 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
216 static void ssb_core_disable(struct b44 *bp)
218 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
221 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
222 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
223 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
224 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
225 SBTMSLOW_REJECT | SBTMSLOW_RESET));
226 br32(bp, B44_SBTMSLOW);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
229 br32(bp, B44_SBTMSLOW);
233 static void ssb_core_reset(struct b44 *bp)
237 ssb_core_disable(bp);
238 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
239 br32(bp, B44_SBTMSLOW);
242 /* Clear SERR if set, this is a hw bug workaround. */
243 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
244 bw32(bp, B44_SBTMSHIGH, 0);
246 val = br32(bp, B44_SBIMSTATE);
247 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
248 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
250 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
251 br32(bp, B44_SBTMSLOW);
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
255 br32(bp, B44_SBTMSLOW);
259 static int ssb_core_unit(struct b44 *bp)
262 u32 val = br32(bp, B44_SBADMATCH0);
265 type = val & SBADMATCH0_TYPE_MASK;
268 base = val & SBADMATCH0_BS0_MASK;
272 base = val & SBADMATCH0_BS1_MASK;
277 base = val & SBADMATCH0_BS2_MASK;
284 static int ssb_is_core_up(struct b44 *bp)
286 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
290 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
294 val = ((u32) data[2]) << 24;
295 val |= ((u32) data[3]) << 16;
296 val |= ((u32) data[4]) << 8;
297 val |= ((u32) data[5]) << 0;
298 bw32(bp, B44_CAM_DATA_LO, val);
299 val = (CAM_DATA_HI_VALID |
300 (((u32) data[0]) << 8) |
301 (((u32) data[1]) << 0));
302 bw32(bp, B44_CAM_DATA_HI, val);
303 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
304 (index << CAM_CTRL_INDEX_SHIFT)));
305 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
308 static inline void __b44_disable_ints(struct b44 *bp)
310 bw32(bp, B44_IMASK, 0);
313 static void b44_disable_ints(struct b44 *bp)
315 __b44_disable_ints(bp);
317 /* Flush posted writes. */
321 static void b44_enable_ints(struct b44 *bp)
323 bw32(bp, B44_IMASK, bp->imask);
326 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
330 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
331 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
332 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
333 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
334 (reg << MDIO_DATA_RA_SHIFT) |
335 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
336 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
337 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
342 static int b44_writephy(struct b44 *bp, int reg, u32 val)
344 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
345 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
346 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
347 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
348 (reg << MDIO_DATA_RA_SHIFT) |
349 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
350 (val & MDIO_DATA_DATA)));
351 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
354 /* miilib interface */
355 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
356 * due to code existing before miilib use was added to this driver.
357 * Someone should remove this artificial driver limitation in
358 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
360 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
363 struct b44 *bp = netdev_priv(dev);
364 int rc = b44_readphy(bp, location, &val);
370 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
373 struct b44 *bp = netdev_priv(dev);
374 b44_writephy(bp, location, val);
377 static int b44_phy_reset(struct b44 *bp)
382 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
386 err = b44_readphy(bp, MII_BMCR, &val);
388 if (val & BMCR_RESET) {
389 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
398 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
402 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
403 bp->flags |= pause_flags;
405 val = br32(bp, B44_RXCONFIG);
406 if (pause_flags & B44_FLAG_RX_PAUSE)
407 val |= RXCONFIG_FLOW;
409 val &= ~RXCONFIG_FLOW;
410 bw32(bp, B44_RXCONFIG, val);
412 val = br32(bp, B44_MAC_FLOW);
413 if (pause_flags & B44_FLAG_TX_PAUSE)
414 val |= (MAC_FLOW_PAUSE_ENAB |
415 (0xc0 & MAC_FLOW_RX_HI_WATER));
417 val &= ~MAC_FLOW_PAUSE_ENAB;
418 bw32(bp, B44_MAC_FLOW, val);
421 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
425 /* The driver supports only rx pause by default because
426 the b44 mac tx pause mechanism generates excessive
428 Use ethtool to turn on b44 tx pause if necessary.
430 if ((local & ADVERTISE_PAUSE_CAP) &&
431 (local & ADVERTISE_PAUSE_ASYM)){
432 if ((remote & LPA_PAUSE_ASYM) &&
433 !(remote & LPA_PAUSE_CAP))
434 pause_enab |= B44_FLAG_RX_PAUSE;
437 __b44_set_flow_ctrl(bp, pause_enab);
440 static int b44_setup_phy(struct b44 *bp)
445 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
447 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448 val & MII_ALEDCTRL_ALLMSK)) != 0)
450 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
452 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453 val | MII_TLEDCTRL_ENABLE)) != 0)
456 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457 u32 adv = ADVERTISE_CSMA;
459 if (bp->flags & B44_FLAG_ADV_10HALF)
460 adv |= ADVERTISE_10HALF;
461 if (bp->flags & B44_FLAG_ADV_10FULL)
462 adv |= ADVERTISE_10FULL;
463 if (bp->flags & B44_FLAG_ADV_100HALF)
464 adv |= ADVERTISE_100HALF;
465 if (bp->flags & B44_FLAG_ADV_100FULL)
466 adv |= ADVERTISE_100FULL;
468 if (bp->flags & B44_FLAG_PAUSE_AUTO)
469 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
471 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
473 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474 BMCR_ANRESTART))) != 0)
479 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
481 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482 if (bp->flags & B44_FLAG_100_BASE_T)
483 bmcr |= BMCR_SPEED100;
484 if (bp->flags & B44_FLAG_FULL_DUPLEX)
485 bmcr |= BMCR_FULLDPLX;
486 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
489 /* Since we will not be negotiating there is no safe way
490 * to determine if the link partner supports flow control
491 * or not. So just disable it completely in this case.
493 b44_set_flow_ctrl(bp, 0, 0);
500 static void b44_stats_update(struct b44 *bp)
505 val = &bp->hw_stats.tx_good_octets;
506 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
507 *val++ += br32(bp, reg);
513 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
514 *val++ += br32(bp, reg);
518 static void b44_link_report(struct b44 *bp)
520 if (!netif_carrier_ok(bp->dev)) {
521 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
523 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
525 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
528 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
531 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
532 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
536 static void b44_check_phy(struct b44 *bp)
540 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
541 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
543 if (aux & MII_AUXCTRL_SPEED)
544 bp->flags |= B44_FLAG_100_BASE_T;
546 bp->flags &= ~B44_FLAG_100_BASE_T;
547 if (aux & MII_AUXCTRL_DUPLEX)
548 bp->flags |= B44_FLAG_FULL_DUPLEX;
550 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
552 if (!netif_carrier_ok(bp->dev) &&
553 (bmsr & BMSR_LSTATUS)) {
554 u32 val = br32(bp, B44_TX_CTRL);
555 u32 local_adv, remote_adv;
557 if (bp->flags & B44_FLAG_FULL_DUPLEX)
558 val |= TX_CTRL_DUPLEX;
560 val &= ~TX_CTRL_DUPLEX;
561 bw32(bp, B44_TX_CTRL, val);
563 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
564 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
565 !b44_readphy(bp, MII_LPA, &remote_adv))
566 b44_set_flow_ctrl(bp, local_adv, remote_adv);
569 netif_carrier_on(bp->dev);
571 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
573 netif_carrier_off(bp->dev);
577 if (bmsr & BMSR_RFAULT)
578 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
581 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
586 static void b44_timer(unsigned long __opaque)
588 struct b44 *bp = (struct b44 *) __opaque;
590 spin_lock_irq(&bp->lock);
594 b44_stats_update(bp);
596 spin_unlock_irq(&bp->lock);
598 bp->timer.expires = jiffies + HZ;
599 add_timer(&bp->timer);
602 static void b44_tx(struct b44 *bp)
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
616 pci_unmap_single(bp->pdev,
617 pci_unmap_addr(rp, mapping),
621 dev_kfree_skb_irq(skb);
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
629 bw32(bp, B44_GPTIMER, 0);
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
656 mapping = pci_map_single(bp->pdev, skb->data,
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (dma_mapping_error(mapping) ||
663 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
665 if (!dma_mapping_error(mapping))
666 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
667 dev_kfree_skb_any(skb);
668 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
671 mapping = pci_map_single(bp->pdev, skb->data,
674 if (dma_mapping_error(mapping) ||
675 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
676 if (!dma_mapping_error(mapping))
677 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
678 dev_kfree_skb_any(skb);
684 skb_reserve(skb, bp->rx_offset);
686 rh = (struct rx_header *)
687 (skb->data - bp->rx_offset);
692 pci_unmap_addr_set(map, mapping, mapping);
697 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
698 if (dest_idx == (B44_RX_RING_SIZE - 1))
699 ctrl |= DESC_CTRL_EOT;
701 dp = &bp->rx_ring[dest_idx];
702 dp->ctrl = cpu_to_le32(ctrl);
703 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
705 if (bp->flags & B44_FLAG_RX_RING_HACK)
706 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
707 dest_idx * sizeof(dp),
710 return RX_PKT_BUF_SZ;
713 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
715 struct dma_desc *src_desc, *dest_desc;
716 struct ring_info *src_map, *dest_map;
717 struct rx_header *rh;
721 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
722 dest_desc = &bp->rx_ring[dest_idx];
723 dest_map = &bp->rx_buffers[dest_idx];
724 src_desc = &bp->rx_ring[src_idx];
725 src_map = &bp->rx_buffers[src_idx];
727 dest_map->skb = src_map->skb;
728 rh = (struct rx_header *) src_map->skb->data;
731 pci_unmap_addr_set(dest_map, mapping,
732 pci_unmap_addr(src_map, mapping));
734 if (bp->flags & B44_FLAG_RX_RING_HACK)
735 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
736 src_idx * sizeof(src_desc),
739 ctrl = src_desc->ctrl;
740 if (dest_idx == (B44_RX_RING_SIZE - 1))
741 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
743 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
745 dest_desc->ctrl = ctrl;
746 dest_desc->addr = src_desc->addr;
750 if (bp->flags & B44_FLAG_RX_RING_HACK)
751 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
752 dest_idx * sizeof(dest_desc),
755 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
760 static int b44_rx(struct b44 *bp, int budget)
766 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
767 prod /= sizeof(struct dma_desc);
770 while (cons != prod && budget > 0) {
771 struct ring_info *rp = &bp->rx_buffers[cons];
772 struct sk_buff *skb = rp->skb;
773 dma_addr_t map = pci_unmap_addr(rp, mapping);
774 struct rx_header *rh;
777 pci_dma_sync_single_for_cpu(bp->pdev, map,
780 rh = (struct rx_header *) skb->data;
781 len = cpu_to_le16(rh->len);
782 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
783 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
785 b44_recycle_rx(bp, cons, bp->rx_prod);
787 bp->stats.rx_dropped++;
797 len = cpu_to_le16(rh->len);
798 } while (len == 0 && i++ < 5);
806 if (len > RX_COPY_THRESHOLD) {
808 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
811 pci_unmap_single(bp->pdev, map,
812 skb_size, PCI_DMA_FROMDEVICE);
813 /* Leave out rx_header */
814 skb_put(skb, len+bp->rx_offset);
815 skb_pull(skb,bp->rx_offset);
817 struct sk_buff *copy_skb;
819 b44_recycle_rx(bp, cons, bp->rx_prod);
820 copy_skb = dev_alloc_skb(len + 2);
821 if (copy_skb == NULL)
822 goto drop_it_no_recycle;
824 copy_skb->dev = bp->dev;
825 skb_reserve(copy_skb, 2);
826 skb_put(copy_skb, len);
827 /* DMA sync done above, copy just the actual packet */
828 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
832 skb->ip_summed = CHECKSUM_NONE;
833 skb->protocol = eth_type_trans(skb, bp->dev);
834 netif_receive_skb(skb);
835 bp->dev->last_rx = jiffies;
839 bp->rx_prod = (bp->rx_prod + 1) &
840 (B44_RX_RING_SIZE - 1);
841 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
845 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
850 static int b44_poll(struct net_device *netdev, int *budget)
852 struct b44 *bp = netdev_priv(netdev);
855 spin_lock_irq(&bp->lock);
857 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
858 /* spin_lock(&bp->tx_lock); */
860 /* spin_unlock(&bp->tx_lock); */
862 spin_unlock_irq(&bp->lock);
865 if (bp->istat & ISTAT_RX) {
866 int orig_budget = *budget;
869 if (orig_budget > netdev->quota)
870 orig_budget = netdev->quota;
872 work_done = b44_rx(bp, orig_budget);
874 *budget -= work_done;
875 netdev->quota -= work_done;
877 if (work_done >= orig_budget)
881 if (bp->istat & ISTAT_ERRORS) {
884 spin_lock_irqsave(&bp->lock, flags);
888 netif_wake_queue(bp->dev);
889 spin_unlock_irqrestore(&bp->lock, flags);
894 netif_rx_complete(netdev);
898 return (done ? 0 : 1);
901 static irqreturn_t b44_interrupt(int irq, void *dev_id)
903 struct net_device *dev = dev_id;
904 struct b44 *bp = netdev_priv(dev);
908 spin_lock(&bp->lock);
910 istat = br32(bp, B44_ISTAT);
911 imask = br32(bp, B44_IMASK);
913 /* The interrupt mask register controls which interrupt bits
914 * will actually raise an interrupt to the CPU when set by hw/firmware,
915 * but doesn't mask off the bits.
921 if (unlikely(!netif_running(dev))) {
922 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
926 if (netif_rx_schedule_prep(dev)) {
927 /* NOTE: These writes are posted by the readback of
928 * the ISTAT register below.
931 __b44_disable_ints(bp);
932 __netif_rx_schedule(dev);
934 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
939 bw32(bp, B44_ISTAT, istat);
942 spin_unlock(&bp->lock);
943 return IRQ_RETVAL(handled);
946 static void b44_tx_timeout(struct net_device *dev)
948 struct b44 *bp = netdev_priv(dev);
950 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
953 spin_lock_irq(&bp->lock);
959 spin_unlock_irq(&bp->lock);
963 netif_wake_queue(dev);
966 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
968 struct b44 *bp = netdev_priv(dev);
969 struct sk_buff *bounce_skb;
970 int rc = NETDEV_TX_OK;
972 u32 len, entry, ctrl;
975 spin_lock_irq(&bp->lock);
977 /* This is a hard error, log it. */
978 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
979 netif_stop_queue(dev);
980 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
985 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
986 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
987 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
988 if (!dma_mapping_error(mapping))
989 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
991 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
996 mapping = pci_map_single(bp->pdev, bounce_skb->data,
997 len, PCI_DMA_TODEVICE);
998 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
999 if (!dma_mapping_error(mapping))
1000 pci_unmap_single(bp->pdev, mapping,
1001 len, PCI_DMA_TODEVICE);
1002 dev_kfree_skb_any(bounce_skb);
1006 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1007 dev_kfree_skb_any(skb);
1011 entry = bp->tx_prod;
1012 bp->tx_buffers[entry].skb = skb;
1013 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1015 ctrl = (len & DESC_CTRL_LEN);
1016 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1017 if (entry == (B44_TX_RING_SIZE - 1))
1018 ctrl |= DESC_CTRL_EOT;
1020 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1021 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1023 if (bp->flags & B44_FLAG_TX_RING_HACK)
1024 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1025 entry * sizeof(bp->tx_ring[0]),
1028 entry = NEXT_TX(entry);
1030 bp->tx_prod = entry;
1034 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1035 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1036 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1037 if (bp->flags & B44_FLAG_REORDER_BUG)
1038 br32(bp, B44_DMATX_PTR);
1040 if (TX_BUFFS_AVAIL(bp) < 1)
1041 netif_stop_queue(dev);
1043 dev->trans_start = jiffies;
1046 spin_unlock_irq(&bp->lock);
1051 rc = NETDEV_TX_BUSY;
1055 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1057 struct b44 *bp = netdev_priv(dev);
1059 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1062 if (!netif_running(dev)) {
1063 /* We'll just catch it later when the
1070 spin_lock_irq(&bp->lock);
1075 spin_unlock_irq(&bp->lock);
1077 b44_enable_ints(bp);
1082 /* Free up pending packets in all rx/tx rings.
1084 * The chip has been shut down and the driver detached from
1085 * the networking, so no interrupts or new tx packets will
1086 * end up in the driver. bp->lock is not held and we are not
1087 * in an interrupt context and thus may sleep.
1089 static void b44_free_rings(struct b44 *bp)
1091 struct ring_info *rp;
1094 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1095 rp = &bp->rx_buffers[i];
1097 if (rp->skb == NULL)
1099 pci_unmap_single(bp->pdev,
1100 pci_unmap_addr(rp, mapping),
1102 PCI_DMA_FROMDEVICE);
1103 dev_kfree_skb_any(rp->skb);
1107 /* XXX needs changes once NETIF_F_SG is set... */
1108 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1109 rp = &bp->tx_buffers[i];
1111 if (rp->skb == NULL)
1113 pci_unmap_single(bp->pdev,
1114 pci_unmap_addr(rp, mapping),
1117 dev_kfree_skb_any(rp->skb);
1122 /* Initialize tx/rx rings for packet processing.
1124 * The chip has been shut down and the driver detached from
1125 * the networking, so no interrupts or new tx packets will
1126 * end up in the driver.
1128 static void b44_init_rings(struct b44 *bp)
1134 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1135 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1137 if (bp->flags & B44_FLAG_RX_RING_HACK)
1138 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1140 PCI_DMA_BIDIRECTIONAL);
1142 if (bp->flags & B44_FLAG_TX_RING_HACK)
1143 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1147 for (i = 0; i < bp->rx_pending; i++) {
1148 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1154 * Must not be invoked with interrupt sources disabled and
1155 * the hardware shutdown down.
1157 static void b44_free_consistent(struct b44 *bp)
1159 kfree(bp->rx_buffers);
1160 bp->rx_buffers = NULL;
1161 kfree(bp->tx_buffers);
1162 bp->tx_buffers = NULL;
1164 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1165 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1170 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1171 bp->rx_ring, bp->rx_ring_dma);
1173 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1176 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1177 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1182 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1183 bp->tx_ring, bp->tx_ring_dma);
1185 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1190 * Must not be invoked with interrupt sources disabled and
1191 * the hardware shutdown down. Can sleep.
1193 static int b44_alloc_consistent(struct b44 *bp)
1197 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1198 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1199 if (!bp->rx_buffers)
1202 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1203 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1204 if (!bp->tx_buffers)
1207 size = DMA_TABLE_BYTES;
1208 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1210 /* Allocation may have failed due to pci_alloc_consistent
1211 insisting on use of GFP_DMA, which is more restrictive
1212 than necessary... */
1213 struct dma_desc *rx_ring;
1214 dma_addr_t rx_ring_dma;
1216 rx_ring = kzalloc(size, GFP_KERNEL);
1220 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1224 if (dma_mapping_error(rx_ring_dma) ||
1225 rx_ring_dma + size > B44_DMA_MASK) {
1230 bp->rx_ring = rx_ring;
1231 bp->rx_ring_dma = rx_ring_dma;
1232 bp->flags |= B44_FLAG_RX_RING_HACK;
1235 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1237 /* Allocation may have failed due to pci_alloc_consistent
1238 insisting on use of GFP_DMA, which is more restrictive
1239 than necessary... */
1240 struct dma_desc *tx_ring;
1241 dma_addr_t tx_ring_dma;
1243 tx_ring = kzalloc(size, GFP_KERNEL);
1247 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1251 if (dma_mapping_error(tx_ring_dma) ||
1252 tx_ring_dma + size > B44_DMA_MASK) {
1257 bp->tx_ring = tx_ring;
1258 bp->tx_ring_dma = tx_ring_dma;
1259 bp->flags |= B44_FLAG_TX_RING_HACK;
1265 b44_free_consistent(bp);
1269 /* bp->lock is held. */
1270 static void b44_clear_stats(struct b44 *bp)
1274 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1275 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1277 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1281 /* bp->lock is held. */
1282 static void b44_chip_reset(struct b44 *bp)
1284 if (ssb_is_core_up(bp)) {
1285 bw32(bp, B44_RCV_LAZY, 0);
1286 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1287 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1288 bw32(bp, B44_DMATX_CTRL, 0);
1289 bp->tx_prod = bp->tx_cons = 0;
1290 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1291 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1294 bw32(bp, B44_DMARX_CTRL, 0);
1295 bp->rx_prod = bp->rx_cons = 0;
1297 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1304 b44_clear_stats(bp);
1306 /* Make PHY accessible. */
1307 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1308 (0x0d & MDIO_CTRL_MAXF_MASK)));
1309 br32(bp, B44_MDIO_CTRL);
1311 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1312 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1313 br32(bp, B44_ENET_CTRL);
1314 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1316 u32 val = br32(bp, B44_DEVCTRL);
1318 if (val & DEVCTRL_EPR) {
1319 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1320 br32(bp, B44_DEVCTRL);
1323 bp->flags |= B44_FLAG_INTERNAL_PHY;
1327 /* bp->lock is held. */
1328 static void b44_halt(struct b44 *bp)
1330 b44_disable_ints(bp);
1334 /* bp->lock is held. */
1335 static void __b44_set_mac_addr(struct b44 *bp)
1337 bw32(bp, B44_CAM_CTRL, 0);
1338 if (!(bp->dev->flags & IFF_PROMISC)) {
1341 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1342 val = br32(bp, B44_CAM_CTRL);
1343 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1347 static int b44_set_mac_addr(struct net_device *dev, void *p)
1349 struct b44 *bp = netdev_priv(dev);
1350 struct sockaddr *addr = p;
1352 if (netif_running(dev))
1355 if (!is_valid_ether_addr(addr->sa_data))
1358 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1360 spin_lock_irq(&bp->lock);
1361 __b44_set_mac_addr(bp);
1362 spin_unlock_irq(&bp->lock);
1367 /* Called at device open time to get the chip ready for
1368 * packet processing. Invoked with bp->lock held.
1370 static void __b44_set_rx_mode(struct net_device *);
1371 static void b44_init_hw(struct b44 *bp, int full_reset)
1381 /* Enable CRC32, set proper LED modes and power on PHY */
1382 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1383 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1385 /* This sets the MAC address too. */
1386 __b44_set_rx_mode(bp->dev);
1388 /* MTU + eth header + possible VLAN tag + struct rx_header */
1389 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1390 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1392 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1394 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1395 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1396 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1397 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1398 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1400 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1401 bp->rx_prod = bp->rx_pending;
1403 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1405 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1409 val = br32(bp, B44_ENET_CTRL);
1410 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1413 static int b44_open(struct net_device *dev)
1415 struct b44 *bp = netdev_priv(dev);
1418 err = b44_alloc_consistent(bp);
1427 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1428 if (unlikely(err < 0)) {
1431 b44_free_consistent(bp);
1435 init_timer(&bp->timer);
1436 bp->timer.expires = jiffies + HZ;
1437 bp->timer.data = (unsigned long) bp;
1438 bp->timer.function = b44_timer;
1439 add_timer(&bp->timer);
1441 b44_enable_ints(bp);
1442 netif_start_queue(dev);
1448 /*static*/ void b44_dump_state(struct b44 *bp)
1450 u32 val32, val32_2, val32_3, val32_4, val32_5;
1453 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1454 printk("DEBUG: PCI status [%04x] \n", val16);
1459 #ifdef CONFIG_NET_POLL_CONTROLLER
1461 * Polling receive - used by netconsole and other diagnostic tools
1462 * to allow network i/o with interrupts disabled.
1464 static void b44_poll_controller(struct net_device *dev)
1466 disable_irq(dev->irq);
1467 b44_interrupt(dev->irq, dev);
1468 enable_irq(dev->irq);
1472 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1475 u32 *pattern = (u32 *) pp;
1477 for (i = 0; i < bytes; i += sizeof(u32)) {
1478 bw32(bp, B44_FILT_ADDR, table_offset + i);
1479 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1483 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1486 int k, j, len = offset;
1487 int ethaddr_bytes = ETH_ALEN;
1489 memset(ppattern + offset, 0xff, magicsync);
1490 for (j = 0; j < magicsync; j++)
1491 set_bit(len++, (unsigned long *) pmask);
1493 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1494 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1495 ethaddr_bytes = ETH_ALEN;
1497 ethaddr_bytes = B44_PATTERN_SIZE - len;
1498 if (ethaddr_bytes <=0)
1500 for (k = 0; k< ethaddr_bytes; k++) {
1501 ppattern[offset + magicsync +
1502 (j * ETH_ALEN) + k] = macaddr[k];
1504 set_bit(len, (unsigned long *) pmask);
1510 /* Setup magic packet patterns in the b44 WOL
1511 * pattern matching filter.
1513 static void b44_setup_pseudo_magicp(struct b44 *bp)
1517 int plen0, plen1, plen2;
1519 u8 pwol_mask[B44_PMASK_SIZE];
1521 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1522 if (!pwol_pattern) {
1523 printk(KERN_ERR PFX "Memory not available for WOL\n");
1527 /* Ipv4 magic packet pattern - pattern 0.*/
1528 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1529 memset(pwol_mask, 0, B44_PMASK_SIZE);
1530 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1531 B44_ETHIPV4UDP_HLEN);
1533 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1534 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1536 /* Raw ethernet II magic packet pattern - pattern 1 */
1537 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1538 memset(pwol_mask, 0, B44_PMASK_SIZE);
1539 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1542 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1543 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1544 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1545 B44_PMASK_BASE + B44_PMASK_SIZE);
1547 /* Ipv6 magic packet pattern - pattern 2 */
1548 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1549 memset(pwol_mask, 0, B44_PMASK_SIZE);
1550 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1551 B44_ETHIPV6UDP_HLEN);
1553 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1554 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1555 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1556 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1558 kfree(pwol_pattern);
1560 /* set these pattern's lengths: one less than each real length */
1561 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1562 bw32(bp, B44_WKUP_LEN, val);
1564 /* enable wakeup pattern matching */
1565 val = br32(bp, B44_DEVCTRL);
1566 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1570 static void b44_setup_wol(struct b44 *bp)
1575 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1577 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1579 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1581 val = bp->dev->dev_addr[2] << 24 |
1582 bp->dev->dev_addr[3] << 16 |
1583 bp->dev->dev_addr[4] << 8 |
1584 bp->dev->dev_addr[5];
1585 bw32(bp, B44_ADDR_LO, val);
1587 val = bp->dev->dev_addr[0] << 8 |
1588 bp->dev->dev_addr[1];
1589 bw32(bp, B44_ADDR_HI, val);
1591 val = br32(bp, B44_DEVCTRL);
1592 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1595 b44_setup_pseudo_magicp(bp);
1598 val = br32(bp, B44_SBTMSLOW);
1599 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1601 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1602 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1606 static int b44_close(struct net_device *dev)
1608 struct b44 *bp = netdev_priv(dev);
1610 netif_stop_queue(dev);
1612 netif_poll_disable(dev);
1614 del_timer_sync(&bp->timer);
1616 spin_lock_irq(&bp->lock);
1623 netif_carrier_off(dev);
1625 spin_unlock_irq(&bp->lock);
1627 free_irq(dev->irq, dev);
1629 netif_poll_enable(dev);
1631 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1636 b44_free_consistent(bp);
1641 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1643 struct b44 *bp = netdev_priv(dev);
1644 struct net_device_stats *nstat = &bp->stats;
1645 struct b44_hw_stats *hwstat = &bp->hw_stats;
1647 /* Convert HW stats into netdevice stats. */
1648 nstat->rx_packets = hwstat->rx_pkts;
1649 nstat->tx_packets = hwstat->tx_pkts;
1650 nstat->rx_bytes = hwstat->rx_octets;
1651 nstat->tx_bytes = hwstat->tx_octets;
1652 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1653 hwstat->tx_oversize_pkts +
1654 hwstat->tx_underruns +
1655 hwstat->tx_excessive_cols +
1656 hwstat->tx_late_cols);
1657 nstat->multicast = hwstat->tx_multicast_pkts;
1658 nstat->collisions = hwstat->tx_total_cols;
1660 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1661 hwstat->rx_undersize);
1662 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1663 nstat->rx_frame_errors = hwstat->rx_align_errs;
1664 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1665 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1666 hwstat->rx_oversize_pkts +
1667 hwstat->rx_missed_pkts +
1668 hwstat->rx_crc_align_errs +
1669 hwstat->rx_undersize +
1670 hwstat->rx_crc_errs +
1671 hwstat->rx_align_errs +
1672 hwstat->rx_symbol_errs);
1674 nstat->tx_aborted_errors = hwstat->tx_underruns;
1676 /* Carrier lost counter seems to be broken for some devices */
1677 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1683 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1685 struct dev_mc_list *mclist;
1688 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1689 mclist = dev->mc_list;
1690 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1691 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1696 static void __b44_set_rx_mode(struct net_device *dev)
1698 struct b44 *bp = netdev_priv(dev);
1701 val = br32(bp, B44_RXCONFIG);
1702 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1703 if (dev->flags & IFF_PROMISC) {
1704 val |= RXCONFIG_PROMISC;
1705 bw32(bp, B44_RXCONFIG, val);
1707 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1710 __b44_set_mac_addr(bp);
1712 if ((dev->flags & IFF_ALLMULTI) ||
1713 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1714 val |= RXCONFIG_ALLMULTI;
1716 i = __b44_load_mcast(bp, dev);
1719 __b44_cam_write(bp, zero, i);
1721 bw32(bp, B44_RXCONFIG, val);
1722 val = br32(bp, B44_CAM_CTRL);
1723 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1727 static void b44_set_rx_mode(struct net_device *dev)
1729 struct b44 *bp = netdev_priv(dev);
1731 spin_lock_irq(&bp->lock);
1732 __b44_set_rx_mode(dev);
1733 spin_unlock_irq(&bp->lock);
1736 static u32 b44_get_msglevel(struct net_device *dev)
1738 struct b44 *bp = netdev_priv(dev);
1739 return bp->msg_enable;
1742 static void b44_set_msglevel(struct net_device *dev, u32 value)
1744 struct b44 *bp = netdev_priv(dev);
1745 bp->msg_enable = value;
1748 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1750 struct b44 *bp = netdev_priv(dev);
1751 struct pci_dev *pci_dev = bp->pdev;
1753 strcpy (info->driver, DRV_MODULE_NAME);
1754 strcpy (info->version, DRV_MODULE_VERSION);
1755 strcpy (info->bus_info, pci_name(pci_dev));
1758 static int b44_nway_reset(struct net_device *dev)
1760 struct b44 *bp = netdev_priv(dev);
1764 spin_lock_irq(&bp->lock);
1765 b44_readphy(bp, MII_BMCR, &bmcr);
1766 b44_readphy(bp, MII_BMCR, &bmcr);
1768 if (bmcr & BMCR_ANENABLE) {
1769 b44_writephy(bp, MII_BMCR,
1770 bmcr | BMCR_ANRESTART);
1773 spin_unlock_irq(&bp->lock);
1778 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1780 struct b44 *bp = netdev_priv(dev);
1782 cmd->supported = (SUPPORTED_Autoneg);
1783 cmd->supported |= (SUPPORTED_100baseT_Half |
1784 SUPPORTED_100baseT_Full |
1785 SUPPORTED_10baseT_Half |
1786 SUPPORTED_10baseT_Full |
1789 cmd->advertising = 0;
1790 if (bp->flags & B44_FLAG_ADV_10HALF)
1791 cmd->advertising |= ADVERTISED_10baseT_Half;
1792 if (bp->flags & B44_FLAG_ADV_10FULL)
1793 cmd->advertising |= ADVERTISED_10baseT_Full;
1794 if (bp->flags & B44_FLAG_ADV_100HALF)
1795 cmd->advertising |= ADVERTISED_100baseT_Half;
1796 if (bp->flags & B44_FLAG_ADV_100FULL)
1797 cmd->advertising |= ADVERTISED_100baseT_Full;
1798 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1799 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1800 SPEED_100 : SPEED_10;
1801 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1802 DUPLEX_FULL : DUPLEX_HALF;
1804 cmd->phy_address = bp->phy_addr;
1805 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1806 XCVR_INTERNAL : XCVR_EXTERNAL;
1807 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1808 AUTONEG_DISABLE : AUTONEG_ENABLE;
1809 if (cmd->autoneg == AUTONEG_ENABLE)
1810 cmd->advertising |= ADVERTISED_Autoneg;
1811 if (!netif_running(dev)){
1820 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1822 struct b44 *bp = netdev_priv(dev);
1824 /* We do not support gigabit. */
1825 if (cmd->autoneg == AUTONEG_ENABLE) {
1826 if (cmd->advertising &
1827 (ADVERTISED_1000baseT_Half |
1828 ADVERTISED_1000baseT_Full))
1830 } else if ((cmd->speed != SPEED_100 &&
1831 cmd->speed != SPEED_10) ||
1832 (cmd->duplex != DUPLEX_HALF &&
1833 cmd->duplex != DUPLEX_FULL)) {
1837 spin_lock_irq(&bp->lock);
1839 if (cmd->autoneg == AUTONEG_ENABLE) {
1840 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1841 B44_FLAG_100_BASE_T |
1842 B44_FLAG_FULL_DUPLEX |
1843 B44_FLAG_ADV_10HALF |
1844 B44_FLAG_ADV_10FULL |
1845 B44_FLAG_ADV_100HALF |
1846 B44_FLAG_ADV_100FULL);
1847 if (cmd->advertising == 0) {
1848 bp->flags |= (B44_FLAG_ADV_10HALF |
1849 B44_FLAG_ADV_10FULL |
1850 B44_FLAG_ADV_100HALF |
1851 B44_FLAG_ADV_100FULL);
1853 if (cmd->advertising & ADVERTISED_10baseT_Half)
1854 bp->flags |= B44_FLAG_ADV_10HALF;
1855 if (cmd->advertising & ADVERTISED_10baseT_Full)
1856 bp->flags |= B44_FLAG_ADV_10FULL;
1857 if (cmd->advertising & ADVERTISED_100baseT_Half)
1858 bp->flags |= B44_FLAG_ADV_100HALF;
1859 if (cmd->advertising & ADVERTISED_100baseT_Full)
1860 bp->flags |= B44_FLAG_ADV_100FULL;
1863 bp->flags |= B44_FLAG_FORCE_LINK;
1864 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1865 if (cmd->speed == SPEED_100)
1866 bp->flags |= B44_FLAG_100_BASE_T;
1867 if (cmd->duplex == DUPLEX_FULL)
1868 bp->flags |= B44_FLAG_FULL_DUPLEX;
1871 if (netif_running(dev))
1874 spin_unlock_irq(&bp->lock);
1879 static void b44_get_ringparam(struct net_device *dev,
1880 struct ethtool_ringparam *ering)
1882 struct b44 *bp = netdev_priv(dev);
1884 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1885 ering->rx_pending = bp->rx_pending;
1887 /* XXX ethtool lacks a tx_max_pending, oops... */
1890 static int b44_set_ringparam(struct net_device *dev,
1891 struct ethtool_ringparam *ering)
1893 struct b44 *bp = netdev_priv(dev);
1895 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1896 (ering->rx_mini_pending != 0) ||
1897 (ering->rx_jumbo_pending != 0) ||
1898 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1901 spin_lock_irq(&bp->lock);
1903 bp->rx_pending = ering->rx_pending;
1904 bp->tx_pending = ering->tx_pending;
1909 netif_wake_queue(bp->dev);
1910 spin_unlock_irq(&bp->lock);
1912 b44_enable_ints(bp);
1917 static void b44_get_pauseparam(struct net_device *dev,
1918 struct ethtool_pauseparam *epause)
1920 struct b44 *bp = netdev_priv(dev);
1923 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1925 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1927 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1930 static int b44_set_pauseparam(struct net_device *dev,
1931 struct ethtool_pauseparam *epause)
1933 struct b44 *bp = netdev_priv(dev);
1935 spin_lock_irq(&bp->lock);
1936 if (epause->autoneg)
1937 bp->flags |= B44_FLAG_PAUSE_AUTO;
1939 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1940 if (epause->rx_pause)
1941 bp->flags |= B44_FLAG_RX_PAUSE;
1943 bp->flags &= ~B44_FLAG_RX_PAUSE;
1944 if (epause->tx_pause)
1945 bp->flags |= B44_FLAG_TX_PAUSE;
1947 bp->flags &= ~B44_FLAG_TX_PAUSE;
1948 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1953 __b44_set_flow_ctrl(bp, bp->flags);
1955 spin_unlock_irq(&bp->lock);
1957 b44_enable_ints(bp);
1962 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1966 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1971 static int b44_get_stats_count(struct net_device *dev)
1973 return ARRAY_SIZE(b44_gstrings);
1976 static void b44_get_ethtool_stats(struct net_device *dev,
1977 struct ethtool_stats *stats, u64 *data)
1979 struct b44 *bp = netdev_priv(dev);
1980 u32 *val = &bp->hw_stats.tx_good_octets;
1983 spin_lock_irq(&bp->lock);
1985 b44_stats_update(bp);
1987 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1990 spin_unlock_irq(&bp->lock);
1993 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1995 struct b44 *bp = netdev_priv(dev);
1997 wol->supported = WAKE_MAGIC;
1998 if (bp->flags & B44_FLAG_WOL_ENABLE)
1999 wol->wolopts = WAKE_MAGIC;
2002 memset(&wol->sopass, 0, sizeof(wol->sopass));
2005 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2007 struct b44 *bp = netdev_priv(dev);
2009 spin_lock_irq(&bp->lock);
2010 if (wol->wolopts & WAKE_MAGIC)
2011 bp->flags |= B44_FLAG_WOL_ENABLE;
2013 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2014 spin_unlock_irq(&bp->lock);
2019 static const struct ethtool_ops b44_ethtool_ops = {
2020 .get_drvinfo = b44_get_drvinfo,
2021 .get_settings = b44_get_settings,
2022 .set_settings = b44_set_settings,
2023 .nway_reset = b44_nway_reset,
2024 .get_link = ethtool_op_get_link,
2025 .get_wol = b44_get_wol,
2026 .set_wol = b44_set_wol,
2027 .get_ringparam = b44_get_ringparam,
2028 .set_ringparam = b44_set_ringparam,
2029 .get_pauseparam = b44_get_pauseparam,
2030 .set_pauseparam = b44_set_pauseparam,
2031 .get_msglevel = b44_get_msglevel,
2032 .set_msglevel = b44_set_msglevel,
2033 .get_strings = b44_get_strings,
2034 .get_stats_count = b44_get_stats_count,
2035 .get_ethtool_stats = b44_get_ethtool_stats,
2036 .get_perm_addr = ethtool_op_get_perm_addr,
2039 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2041 struct mii_ioctl_data *data = if_mii(ifr);
2042 struct b44 *bp = netdev_priv(dev);
2045 if (!netif_running(dev))
2048 spin_lock_irq(&bp->lock);
2049 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2050 spin_unlock_irq(&bp->lock);
2055 /* Read 128-bytes of EEPROM. */
2056 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2059 u16 *ptr = (u16 *) data;
2061 for (i = 0; i < 128; i += 2)
2062 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2067 static int __devinit b44_get_invariants(struct b44 *bp)
2072 err = b44_read_eeprom(bp, &eeprom[0]);
2076 bp->dev->dev_addr[0] = eeprom[79];
2077 bp->dev->dev_addr[1] = eeprom[78];
2078 bp->dev->dev_addr[2] = eeprom[81];
2079 bp->dev->dev_addr[3] = eeprom[80];
2080 bp->dev->dev_addr[4] = eeprom[83];
2081 bp->dev->dev_addr[5] = eeprom[82];
2083 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2084 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2088 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2090 bp->phy_addr = eeprom[90] & 0x1f;
2092 /* With this, plus the rx_header prepended to the data by the
2093 * hardware, we'll land the ethernet header on a 2-byte boundary.
2097 bp->imask = IMASK_DEF;
2099 bp->core_unit = ssb_core_unit(bp);
2100 bp->dma_offset = SB_PCI_DMA;
2102 /* XXX - really required?
2103 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2106 if (ssb_get_core_rev(bp) >= 7)
2107 bp->flags |= B44_FLAG_B0_ANDLATER;
2113 static int __devinit b44_init_one(struct pci_dev *pdev,
2114 const struct pci_device_id *ent)
2116 static int b44_version_printed = 0;
2117 unsigned long b44reg_base, b44reg_len;
2118 struct net_device *dev;
2122 if (b44_version_printed++ == 0)
2123 printk(KERN_INFO "%s", version);
2125 err = pci_enable_device(pdev);
2127 dev_err(&pdev->dev, "Cannot enable PCI device, "
2132 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2134 "Cannot find proper PCI device "
2135 "base address, aborting.\n");
2137 goto err_out_disable_pdev;
2140 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2143 "Cannot obtain PCI resources, aborting.\n");
2144 goto err_out_disable_pdev;
2147 pci_set_master(pdev);
2149 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2151 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2152 goto err_out_free_res;
2155 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2157 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2158 goto err_out_free_res;
2161 b44reg_base = pci_resource_start(pdev, 0);
2162 b44reg_len = pci_resource_len(pdev, 0);
2164 dev = alloc_etherdev(sizeof(*bp));
2166 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2168 goto err_out_free_res;
2171 SET_MODULE_OWNER(dev);
2172 SET_NETDEV_DEV(dev,&pdev->dev);
2174 /* No interesting netdevice features in this card... */
2177 bp = netdev_priv(dev);
2181 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2183 spin_lock_init(&bp->lock);
2185 bp->regs = ioremap(b44reg_base, b44reg_len);
2186 if (bp->regs == 0UL) {
2187 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2189 goto err_out_free_dev;
2192 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2193 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2195 dev->open = b44_open;
2196 dev->stop = b44_close;
2197 dev->hard_start_xmit = b44_start_xmit;
2198 dev->get_stats = b44_get_stats;
2199 dev->set_multicast_list = b44_set_rx_mode;
2200 dev->set_mac_address = b44_set_mac_addr;
2201 dev->do_ioctl = b44_ioctl;
2202 dev->tx_timeout = b44_tx_timeout;
2203 dev->poll = b44_poll;
2205 dev->watchdog_timeo = B44_TX_TIMEOUT;
2206 #ifdef CONFIG_NET_POLL_CONTROLLER
2207 dev->poll_controller = b44_poll_controller;
2209 dev->change_mtu = b44_change_mtu;
2210 dev->irq = pdev->irq;
2211 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2213 netif_carrier_off(dev);
2215 err = b44_get_invariants(bp);
2218 "Problem fetching invariants of chip, aborting.\n");
2219 goto err_out_iounmap;
2222 bp->mii_if.dev = dev;
2223 bp->mii_if.mdio_read = b44_mii_read;
2224 bp->mii_if.mdio_write = b44_mii_write;
2225 bp->mii_if.phy_id = bp->phy_addr;
2226 bp->mii_if.phy_id_mask = 0x1f;
2227 bp->mii_if.reg_num_mask = 0x1f;
2229 /* By default, advertise all speed/duplex settings. */
2230 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2231 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2233 /* By default, auto-negotiate PAUSE. */
2234 bp->flags |= B44_FLAG_PAUSE_AUTO;
2236 err = register_netdev(dev);
2238 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2239 goto err_out_iounmap;
2242 pci_set_drvdata(pdev, dev);
2244 pci_save_state(bp->pdev);
2246 /* Chip reset provides power to the b44 MAC & PCI cores, which
2247 * is necessary for MAC register access.
2251 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2252 for (i = 0; i < 6; i++)
2253 printk("%2.2x%c", dev->dev_addr[i],
2254 i == 5 ? '\n' : ':');
2265 pci_release_regions(pdev);
2267 err_out_disable_pdev:
2268 pci_disable_device(pdev);
2269 pci_set_drvdata(pdev, NULL);
2273 static void __devexit b44_remove_one(struct pci_dev *pdev)
2275 struct net_device *dev = pci_get_drvdata(pdev);
2276 struct b44 *bp = netdev_priv(dev);
2278 unregister_netdev(dev);
2281 pci_release_regions(pdev);
2282 pci_disable_device(pdev);
2283 pci_set_drvdata(pdev, NULL);
2286 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2288 struct net_device *dev = pci_get_drvdata(pdev);
2289 struct b44 *bp = netdev_priv(dev);
2291 if (!netif_running(dev))
2294 del_timer_sync(&bp->timer);
2296 spin_lock_irq(&bp->lock);
2299 netif_carrier_off(bp->dev);
2300 netif_device_detach(bp->dev);
2303 spin_unlock_irq(&bp->lock);
2305 free_irq(dev->irq, dev);
2306 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2310 pci_disable_device(pdev);
2314 static int b44_resume(struct pci_dev *pdev)
2316 struct net_device *dev = pci_get_drvdata(pdev);
2317 struct b44 *bp = netdev_priv(dev);
2319 pci_restore_state(pdev);
2320 pci_enable_device(pdev);
2321 pci_set_master(pdev);
2323 if (!netif_running(dev))
2326 if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev))
2327 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2329 spin_lock_irq(&bp->lock);
2333 netif_device_attach(bp->dev);
2334 spin_unlock_irq(&bp->lock);
2336 bp->timer.expires = jiffies + HZ;
2337 add_timer(&bp->timer);
2339 b44_enable_ints(bp);
2340 netif_wake_queue(dev);
2344 static struct pci_driver b44_driver = {
2345 .name = DRV_MODULE_NAME,
2346 .id_table = b44_pci_tbl,
2347 .probe = b44_init_one,
2348 .remove = __devexit_p(b44_remove_one),
2349 .suspend = b44_suspend,
2350 .resume = b44_resume,
2353 static int __init b44_init(void)
2355 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2357 /* Setup paramaters for syncing RX/TX DMA descriptors */
2358 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2359 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2361 return pci_register_driver(&b44_driver);
2364 static void __exit b44_cleanup(void)
2366 pci_unregister_driver(&b44_driver);
2369 module_init(b44_init);
2370 module_exit(b44_cleanup);