1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/dma-mapping.h>
25 #include <asm/uaccess.h>
31 #define DRV_MODULE_NAME "b44"
32 #define PFX DRV_MODULE_NAME ": "
33 #define DRV_MODULE_VERSION "1.01"
34 #define DRV_MODULE_RELDATE "Jun 16, 2006"
36 #define B44_DEF_MSG_ENABLE \
46 /* length of time before we decide the hardware is borked,
47 * and dev->tx_timeout() should be called to fix the problem
49 #define B44_TX_TIMEOUT (5 * HZ)
51 /* hardware minimum and maximum for a single frame's data payload */
52 #define B44_MIN_MTU 60
53 #define B44_MAX_MTU 1500
55 #define B44_RX_RING_SIZE 512
56 #define B44_DEF_RX_RING_PENDING 200
57 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
59 #define B44_TX_RING_SIZE 512
60 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
61 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_OFFSET 30
73 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
99 static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
114 #define B44_FULL_RESET 1
115 #define B44_FULL_RESET_SKIP_PHY 2
116 #define B44_PARTIAL_RESET 3
118 static void b44_init_hw(struct b44 *, int);
120 static int dma_desc_align_mask;
121 static int dma_desc_sync_size;
123 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124 #define _B44(x...) # x,
129 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
131 unsigned long offset,
132 enum dma_data_direction dir)
134 dma_sync_single_range_for_device(&pdev->dev, dma_base,
135 offset & dma_desc_align_mask,
136 dma_desc_sync_size, dir);
139 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
141 unsigned long offset,
142 enum dma_data_direction dir)
144 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
145 offset & dma_desc_align_mask,
146 dma_desc_sync_size, dir);
149 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
151 return readl(bp->regs + reg);
154 static inline void bw32(const struct b44 *bp,
155 unsigned long reg, unsigned long val)
157 writel(val, bp->regs + reg);
160 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
161 u32 bit, unsigned long timeout, const int clear)
165 for (i = 0; i < timeout; i++) {
166 u32 val = br32(bp, reg);
168 if (clear && !(val & bit))
170 if (!clear && (val & bit))
175 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
179 (clear ? "clear" : "set"));
185 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
186 * buzz words used on this company's website :-)
188 * All of these routines must be invoked with bp->lock held and
189 * interrupts disabled.
192 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
193 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
195 static u32 ssb_get_core_rev(struct b44 *bp)
197 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
200 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
202 u32 bar_orig, pci_rev, val;
204 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206 pci_rev = ssb_get_core_rev(bp);
208 val = br32(bp, B44_SBINTVEC);
210 bw32(bp, B44_SBINTVEC, val);
212 val = br32(bp, SSB_PCI_TRANS_2);
213 val |= SSB_PCI_PREF | SSB_PCI_BURST;
214 bw32(bp, SSB_PCI_TRANS_2, val);
216 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
221 static void ssb_core_disable(struct b44 *bp)
223 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
226 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230 SBTMSLOW_REJECT | SBTMSLOW_RESET));
231 br32(bp, B44_SBTMSLOW);
233 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234 br32(bp, B44_SBTMSLOW);
238 static void ssb_core_reset(struct b44 *bp)
242 ssb_core_disable(bp);
243 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
244 br32(bp, B44_SBTMSLOW);
247 /* Clear SERR if set, this is a hw bug workaround. */
248 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249 bw32(bp, B44_SBTMSHIGH, 0);
251 val = br32(bp, B44_SBIMSTATE);
252 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
255 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256 br32(bp, B44_SBTMSLOW);
259 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260 br32(bp, B44_SBTMSLOW);
264 static int ssb_core_unit(struct b44 *bp)
267 u32 val = br32(bp, B44_SBADMATCH0);
270 type = val & SBADMATCH0_TYPE_MASK;
273 base = val & SBADMATCH0_BS0_MASK;
277 base = val & SBADMATCH0_BS1_MASK;
282 base = val & SBADMATCH0_BS2_MASK;
289 static int ssb_is_core_up(struct b44 *bp)
291 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
295 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
299 val = ((u32) data[2]) << 24;
300 val |= ((u32) data[3]) << 16;
301 val |= ((u32) data[4]) << 8;
302 val |= ((u32) data[5]) << 0;
303 bw32(bp, B44_CAM_DATA_LO, val);
304 val = (CAM_DATA_HI_VALID |
305 (((u32) data[0]) << 8) |
306 (((u32) data[1]) << 0));
307 bw32(bp, B44_CAM_DATA_HI, val);
308 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
309 (index << CAM_CTRL_INDEX_SHIFT)));
310 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
313 static inline void __b44_disable_ints(struct b44 *bp)
315 bw32(bp, B44_IMASK, 0);
318 static void b44_disable_ints(struct b44 *bp)
320 __b44_disable_ints(bp);
322 /* Flush posted writes. */
326 static void b44_enable_ints(struct b44 *bp)
328 bw32(bp, B44_IMASK, bp->imask);
331 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
347 static int b44_writephy(struct b44 *bp, int reg, u32 val)
349 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
353 (reg << MDIO_DATA_RA_SHIFT) |
354 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355 (val & MDIO_DATA_DATA)));
356 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
359 /* miilib interface */
360 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361 * due to code existing before miilib use was added to this driver.
362 * Someone should remove this artificial driver limitation in
363 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
365 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
368 struct b44 *bp = netdev_priv(dev);
369 int rc = b44_readphy(bp, location, &val);
375 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
378 struct b44 *bp = netdev_priv(dev);
379 b44_writephy(bp, location, val);
382 static int b44_phy_reset(struct b44 *bp)
387 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
391 err = b44_readphy(bp, MII_BMCR, &val);
393 if (val & BMCR_RESET) {
394 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
403 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
407 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
408 bp->flags |= pause_flags;
410 val = br32(bp, B44_RXCONFIG);
411 if (pause_flags & B44_FLAG_RX_PAUSE)
412 val |= RXCONFIG_FLOW;
414 val &= ~RXCONFIG_FLOW;
415 bw32(bp, B44_RXCONFIG, val);
417 val = br32(bp, B44_MAC_FLOW);
418 if (pause_flags & B44_FLAG_TX_PAUSE)
419 val |= (MAC_FLOW_PAUSE_ENAB |
420 (0xc0 & MAC_FLOW_RX_HI_WATER));
422 val &= ~MAC_FLOW_PAUSE_ENAB;
423 bw32(bp, B44_MAC_FLOW, val);
426 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
430 /* The driver supports only rx pause by default because
431 the b44 mac tx pause mechanism generates excessive
433 Use ethtool to turn on b44 tx pause if necessary.
435 if ((local & ADVERTISE_PAUSE_CAP) &&
436 (local & ADVERTISE_PAUSE_ASYM)){
437 if ((remote & LPA_PAUSE_ASYM) &&
438 !(remote & LPA_PAUSE_CAP))
439 pause_enab |= B44_FLAG_RX_PAUSE;
442 __b44_set_flow_ctrl(bp, pause_enab);
445 static int b44_setup_phy(struct b44 *bp)
450 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
452 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
453 val & MII_ALEDCTRL_ALLMSK)) != 0)
455 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
457 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
458 val | MII_TLEDCTRL_ENABLE)) != 0)
461 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
462 u32 adv = ADVERTISE_CSMA;
464 if (bp->flags & B44_FLAG_ADV_10HALF)
465 adv |= ADVERTISE_10HALF;
466 if (bp->flags & B44_FLAG_ADV_10FULL)
467 adv |= ADVERTISE_10FULL;
468 if (bp->flags & B44_FLAG_ADV_100HALF)
469 adv |= ADVERTISE_100HALF;
470 if (bp->flags & B44_FLAG_ADV_100FULL)
471 adv |= ADVERTISE_100FULL;
473 if (bp->flags & B44_FLAG_PAUSE_AUTO)
474 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
476 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
478 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479 BMCR_ANRESTART))) != 0)
484 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
486 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
487 if (bp->flags & B44_FLAG_100_BASE_T)
488 bmcr |= BMCR_SPEED100;
489 if (bp->flags & B44_FLAG_FULL_DUPLEX)
490 bmcr |= BMCR_FULLDPLX;
491 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
494 /* Since we will not be negotiating there is no safe way
495 * to determine if the link partner supports flow control
496 * or not. So just disable it completely in this case.
498 b44_set_flow_ctrl(bp, 0, 0);
505 static void b44_stats_update(struct b44 *bp)
510 val = &bp->hw_stats.tx_good_octets;
511 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
512 *val++ += br32(bp, reg);
518 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
519 *val++ += br32(bp, reg);
523 static void b44_link_report(struct b44 *bp)
525 if (!netif_carrier_ok(bp->dev)) {
526 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
528 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
530 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
531 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
533 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
536 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
537 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
541 static void b44_check_phy(struct b44 *bp)
545 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
548 if (aux & MII_AUXCTRL_SPEED)
549 bp->flags |= B44_FLAG_100_BASE_T;
551 bp->flags &= ~B44_FLAG_100_BASE_T;
552 if (aux & MII_AUXCTRL_DUPLEX)
553 bp->flags |= B44_FLAG_FULL_DUPLEX;
555 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
557 if (!netif_carrier_ok(bp->dev) &&
558 (bmsr & BMSR_LSTATUS)) {
559 u32 val = br32(bp, B44_TX_CTRL);
560 u32 local_adv, remote_adv;
562 if (bp->flags & B44_FLAG_FULL_DUPLEX)
563 val |= TX_CTRL_DUPLEX;
565 val &= ~TX_CTRL_DUPLEX;
566 bw32(bp, B44_TX_CTRL, val);
568 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
569 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
570 !b44_readphy(bp, MII_LPA, &remote_adv))
571 b44_set_flow_ctrl(bp, local_adv, remote_adv);
574 netif_carrier_on(bp->dev);
576 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
578 netif_carrier_off(bp->dev);
582 if (bmsr & BMSR_RFAULT)
583 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
586 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
591 static void b44_timer(unsigned long __opaque)
593 struct b44 *bp = (struct b44 *) __opaque;
595 spin_lock_irq(&bp->lock);
599 b44_stats_update(bp);
601 spin_unlock_irq(&bp->lock);
603 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
606 static void b44_tx(struct b44 *bp)
610 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611 cur /= sizeof(struct dma_desc);
613 /* XXX needs updating when NETIF_F_SG is supported */
614 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615 struct ring_info *rp = &bp->tx_buffers[cons];
616 struct sk_buff *skb = rp->skb;
620 pci_unmap_single(bp->pdev,
621 pci_unmap_addr(rp, mapping),
625 dev_kfree_skb_irq(skb);
629 if (netif_queue_stopped(bp->dev) &&
630 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631 netif_wake_queue(bp->dev);
633 bw32(bp, B44_GPTIMER, 0);
636 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
637 * before the DMA address you give it. So we allocate 30 more bytes
638 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639 * point the chip at 30 bytes past where the rx_header will go.
641 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
644 struct ring_info *src_map, *map;
645 struct rx_header *rh;
653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx];
656 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
660 mapping = pci_map_single(bp->pdev, skb->data,
664 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */
666 if (dma_mapping_error(mapping) ||
667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
671 dev_kfree_skb_any(skb);
672 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
675 mapping = pci_map_single(bp->pdev, skb->data,
678 if (dma_mapping_error(mapping) ||
679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
680 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
682 dev_kfree_skb_any(skb);
687 rh = (struct rx_header *) skb->data;
688 skb_reserve(skb, RX_PKT_OFFSET);
694 pci_unmap_addr_set(map, mapping, mapping);
699 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
700 if (dest_idx == (B44_RX_RING_SIZE - 1))
701 ctrl |= DESC_CTRL_EOT;
703 dp = &bp->rx_ring[dest_idx];
704 dp->ctrl = cpu_to_le32(ctrl);
705 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
707 if (bp->flags & B44_FLAG_RX_RING_HACK)
708 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
709 dest_idx * sizeof(dp),
712 return RX_PKT_BUF_SZ;
715 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
717 struct dma_desc *src_desc, *dest_desc;
718 struct ring_info *src_map, *dest_map;
719 struct rx_header *rh;
723 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
724 dest_desc = &bp->rx_ring[dest_idx];
725 dest_map = &bp->rx_buffers[dest_idx];
726 src_desc = &bp->rx_ring[src_idx];
727 src_map = &bp->rx_buffers[src_idx];
729 dest_map->skb = src_map->skb;
730 rh = (struct rx_header *) src_map->skb->data;
733 pci_unmap_addr_set(dest_map, mapping,
734 pci_unmap_addr(src_map, mapping));
736 if (bp->flags & B44_FLAG_RX_RING_HACK)
737 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
738 src_idx * sizeof(src_desc),
741 ctrl = src_desc->ctrl;
742 if (dest_idx == (B44_RX_RING_SIZE - 1))
743 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
745 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
747 dest_desc->ctrl = ctrl;
748 dest_desc->addr = src_desc->addr;
752 if (bp->flags & B44_FLAG_RX_RING_HACK)
753 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
754 dest_idx * sizeof(dest_desc),
757 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
762 static int b44_rx(struct b44 *bp, int budget)
768 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
769 prod /= sizeof(struct dma_desc);
772 while (cons != prod && budget > 0) {
773 struct ring_info *rp = &bp->rx_buffers[cons];
774 struct sk_buff *skb = rp->skb;
775 dma_addr_t map = pci_unmap_addr(rp, mapping);
776 struct rx_header *rh;
779 pci_dma_sync_single_for_cpu(bp->pdev, map,
782 rh = (struct rx_header *) skb->data;
783 len = le16_to_cpu(rh->len);
784 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
785 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
787 b44_recycle_rx(bp, cons, bp->rx_prod);
789 bp->stats.rx_dropped++;
799 len = le16_to_cpu(rh->len);
800 } while (len == 0 && i++ < 5);
808 if (len > RX_COPY_THRESHOLD) {
810 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
813 pci_unmap_single(bp->pdev, map,
814 skb_size, PCI_DMA_FROMDEVICE);
815 /* Leave out rx_header */
816 skb_put(skb, len + RX_PKT_OFFSET);
817 skb_pull(skb, RX_PKT_OFFSET);
819 struct sk_buff *copy_skb;
821 b44_recycle_rx(bp, cons, bp->rx_prod);
822 copy_skb = dev_alloc_skb(len + 2);
823 if (copy_skb == NULL)
824 goto drop_it_no_recycle;
826 skb_reserve(copy_skb, 2);
827 skb_put(copy_skb, len);
828 /* DMA sync done above, copy just the actual packet */
829 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
830 copy_skb->data, len);
833 skb->ip_summed = CHECKSUM_NONE;
834 skb->protocol = eth_type_trans(skb, bp->dev);
835 netif_receive_skb(skb);
836 bp->dev->last_rx = jiffies;
840 bp->rx_prod = (bp->rx_prod + 1) &
841 (B44_RX_RING_SIZE - 1);
842 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
846 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
851 static int b44_poll(struct napi_struct *napi, int budget)
853 struct b44 *bp = container_of(napi, struct b44, napi);
854 struct net_device *netdev = bp->dev;
857 spin_lock_irq(&bp->lock);
859 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
860 /* spin_lock(&bp->tx_lock); */
862 /* spin_unlock(&bp->tx_lock); */
864 spin_unlock_irq(&bp->lock);
867 if (bp->istat & ISTAT_RX)
868 work_done += b44_rx(bp, budget);
870 if (bp->istat & ISTAT_ERRORS) {
873 spin_lock_irqsave(&bp->lock, flags);
876 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
877 netif_wake_queue(bp->dev);
878 spin_unlock_irqrestore(&bp->lock, flags);
882 if (work_done < budget) {
883 netif_rx_complete(netdev, napi);
890 static irqreturn_t b44_interrupt(int irq, void *dev_id)
892 struct net_device *dev = dev_id;
893 struct b44 *bp = netdev_priv(dev);
897 spin_lock(&bp->lock);
899 istat = br32(bp, B44_ISTAT);
900 imask = br32(bp, B44_IMASK);
902 /* The interrupt mask register controls which interrupt bits
903 * will actually raise an interrupt to the CPU when set by hw/firmware,
904 * but doesn't mask off the bits.
910 if (unlikely(!netif_running(dev))) {
911 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
915 if (netif_rx_schedule_prep(dev, &bp->napi)) {
916 /* NOTE: These writes are posted by the readback of
917 * the ISTAT register below.
920 __b44_disable_ints(bp);
921 __netif_rx_schedule(dev, &bp->napi);
923 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
928 bw32(bp, B44_ISTAT, istat);
931 spin_unlock(&bp->lock);
932 return IRQ_RETVAL(handled);
935 static void b44_tx_timeout(struct net_device *dev)
937 struct b44 *bp = netdev_priv(dev);
939 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
942 spin_lock_irq(&bp->lock);
946 b44_init_hw(bp, B44_FULL_RESET);
948 spin_unlock_irq(&bp->lock);
952 netif_wake_queue(dev);
955 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
957 struct b44 *bp = netdev_priv(dev);
958 int rc = NETDEV_TX_OK;
960 u32 len, entry, ctrl;
963 spin_lock_irq(&bp->lock);
965 /* This is a hard error, log it. */
966 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
967 netif_stop_queue(dev);
968 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
974 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
975 struct sk_buff *bounce_skb;
977 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
978 if (!dma_mapping_error(mapping))
979 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
981 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
985 mapping = pci_map_single(bp->pdev, bounce_skb->data,
986 len, PCI_DMA_TODEVICE);
987 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
988 if (!dma_mapping_error(mapping))
989 pci_unmap_single(bp->pdev, mapping,
990 len, PCI_DMA_TODEVICE);
991 dev_kfree_skb_any(bounce_skb);
995 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
996 dev_kfree_skb_any(skb);
1000 entry = bp->tx_prod;
1001 bp->tx_buffers[entry].skb = skb;
1002 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1004 ctrl = (len & DESC_CTRL_LEN);
1005 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1006 if (entry == (B44_TX_RING_SIZE - 1))
1007 ctrl |= DESC_CTRL_EOT;
1009 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1010 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1012 if (bp->flags & B44_FLAG_TX_RING_HACK)
1013 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1014 entry * sizeof(bp->tx_ring[0]),
1017 entry = NEXT_TX(entry);
1019 bp->tx_prod = entry;
1023 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1024 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1025 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1026 if (bp->flags & B44_FLAG_REORDER_BUG)
1027 br32(bp, B44_DMATX_PTR);
1029 if (TX_BUFFS_AVAIL(bp) < 1)
1030 netif_stop_queue(dev);
1032 dev->trans_start = jiffies;
1035 spin_unlock_irq(&bp->lock);
1040 rc = NETDEV_TX_BUSY;
1044 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1046 struct b44 *bp = netdev_priv(dev);
1048 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1051 if (!netif_running(dev)) {
1052 /* We'll just catch it later when the
1059 spin_lock_irq(&bp->lock);
1063 b44_init_hw(bp, B44_FULL_RESET);
1064 spin_unlock_irq(&bp->lock);
1066 b44_enable_ints(bp);
1071 /* Free up pending packets in all rx/tx rings.
1073 * The chip has been shut down and the driver detached from
1074 * the networking, so no interrupts or new tx packets will
1075 * end up in the driver. bp->lock is not held and we are not
1076 * in an interrupt context and thus may sleep.
1078 static void b44_free_rings(struct b44 *bp)
1080 struct ring_info *rp;
1083 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1084 rp = &bp->rx_buffers[i];
1086 if (rp->skb == NULL)
1088 pci_unmap_single(bp->pdev,
1089 pci_unmap_addr(rp, mapping),
1091 PCI_DMA_FROMDEVICE);
1092 dev_kfree_skb_any(rp->skb);
1096 /* XXX needs changes once NETIF_F_SG is set... */
1097 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1098 rp = &bp->tx_buffers[i];
1100 if (rp->skb == NULL)
1102 pci_unmap_single(bp->pdev,
1103 pci_unmap_addr(rp, mapping),
1106 dev_kfree_skb_any(rp->skb);
1111 /* Initialize tx/rx rings for packet processing.
1113 * The chip has been shut down and the driver detached from
1114 * the networking, so no interrupts or new tx packets will
1115 * end up in the driver.
1117 static void b44_init_rings(struct b44 *bp)
1123 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1124 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1126 if (bp->flags & B44_FLAG_RX_RING_HACK)
1127 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1129 PCI_DMA_BIDIRECTIONAL);
1131 if (bp->flags & B44_FLAG_TX_RING_HACK)
1132 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1136 for (i = 0; i < bp->rx_pending; i++) {
1137 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1143 * Must not be invoked with interrupt sources disabled and
1144 * the hardware shutdown down.
1146 static void b44_free_consistent(struct b44 *bp)
1148 kfree(bp->rx_buffers);
1149 bp->rx_buffers = NULL;
1150 kfree(bp->tx_buffers);
1151 bp->tx_buffers = NULL;
1153 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1154 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1159 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1160 bp->rx_ring, bp->rx_ring_dma);
1162 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1165 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1166 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1171 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1172 bp->tx_ring, bp->tx_ring_dma);
1174 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1179 * Must not be invoked with interrupt sources disabled and
1180 * the hardware shutdown down. Can sleep.
1182 static int b44_alloc_consistent(struct b44 *bp)
1186 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1187 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1188 if (!bp->rx_buffers)
1191 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1192 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1193 if (!bp->tx_buffers)
1196 size = DMA_TABLE_BYTES;
1197 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1199 /* Allocation may have failed due to pci_alloc_consistent
1200 insisting on use of GFP_DMA, which is more restrictive
1201 than necessary... */
1202 struct dma_desc *rx_ring;
1203 dma_addr_t rx_ring_dma;
1205 rx_ring = kzalloc(size, GFP_KERNEL);
1209 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1213 if (dma_mapping_error(rx_ring_dma) ||
1214 rx_ring_dma + size > DMA_30BIT_MASK) {
1219 bp->rx_ring = rx_ring;
1220 bp->rx_ring_dma = rx_ring_dma;
1221 bp->flags |= B44_FLAG_RX_RING_HACK;
1224 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1226 /* Allocation may have failed due to pci_alloc_consistent
1227 insisting on use of GFP_DMA, which is more restrictive
1228 than necessary... */
1229 struct dma_desc *tx_ring;
1230 dma_addr_t tx_ring_dma;
1232 tx_ring = kzalloc(size, GFP_KERNEL);
1236 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1240 if (dma_mapping_error(tx_ring_dma) ||
1241 tx_ring_dma + size > DMA_30BIT_MASK) {
1246 bp->tx_ring = tx_ring;
1247 bp->tx_ring_dma = tx_ring_dma;
1248 bp->flags |= B44_FLAG_TX_RING_HACK;
1254 b44_free_consistent(bp);
1258 /* bp->lock is held. */
1259 static void b44_clear_stats(struct b44 *bp)
1263 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1264 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1266 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1270 /* bp->lock is held. */
1271 static void b44_chip_reset(struct b44 *bp)
1273 if (ssb_is_core_up(bp)) {
1274 bw32(bp, B44_RCV_LAZY, 0);
1275 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1277 bw32(bp, B44_DMATX_CTRL, 0);
1278 bp->tx_prod = bp->tx_cons = 0;
1279 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1283 bw32(bp, B44_DMARX_CTRL, 0);
1284 bp->rx_prod = bp->rx_cons = 0;
1286 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1293 b44_clear_stats(bp);
1295 /* Make PHY accessible. */
1296 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297 (0x0d & MDIO_CTRL_MAXF_MASK)));
1298 br32(bp, B44_MDIO_CTRL);
1300 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1301 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1302 br32(bp, B44_ENET_CTRL);
1303 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1305 u32 val = br32(bp, B44_DEVCTRL);
1307 if (val & DEVCTRL_EPR) {
1308 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1309 br32(bp, B44_DEVCTRL);
1312 bp->flags |= B44_FLAG_INTERNAL_PHY;
1316 /* bp->lock is held. */
1317 static void b44_halt(struct b44 *bp)
1319 b44_disable_ints(bp);
1323 /* bp->lock is held. */
1324 static void __b44_set_mac_addr(struct b44 *bp)
1326 bw32(bp, B44_CAM_CTRL, 0);
1327 if (!(bp->dev->flags & IFF_PROMISC)) {
1330 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1331 val = br32(bp, B44_CAM_CTRL);
1332 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1336 static int b44_set_mac_addr(struct net_device *dev, void *p)
1338 struct b44 *bp = netdev_priv(dev);
1339 struct sockaddr *addr = p;
1341 if (netif_running(dev))
1344 if (!is_valid_ether_addr(addr->sa_data))
1347 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1349 spin_lock_irq(&bp->lock);
1350 __b44_set_mac_addr(bp);
1351 spin_unlock_irq(&bp->lock);
1356 /* Called at device open time to get the chip ready for
1357 * packet processing. Invoked with bp->lock held.
1359 static void __b44_set_rx_mode(struct net_device *);
1360 static void b44_init_hw(struct b44 *bp, int reset_kind)
1365 if (reset_kind == B44_FULL_RESET) {
1370 /* Enable CRC32, set proper LED modes and power on PHY */
1371 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1372 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1374 /* This sets the MAC address too. */
1375 __b44_set_rx_mode(bp->dev);
1377 /* MTU + eth header + possible VLAN tag + struct rx_header */
1378 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1379 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1381 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1382 if (reset_kind == B44_PARTIAL_RESET) {
1383 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1384 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1386 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1387 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1388 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1389 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1390 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1392 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1393 bp->rx_prod = bp->rx_pending;
1395 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1398 val = br32(bp, B44_ENET_CTRL);
1399 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1402 static int b44_open(struct net_device *dev)
1404 struct b44 *bp = netdev_priv(dev);
1407 err = b44_alloc_consistent(bp);
1411 napi_enable(&bp->napi);
1414 b44_init_hw(bp, B44_FULL_RESET);
1418 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1419 if (unlikely(err < 0)) {
1420 napi_disable(&bp->napi);
1423 b44_free_consistent(bp);
1427 init_timer(&bp->timer);
1428 bp->timer.expires = jiffies + HZ;
1429 bp->timer.data = (unsigned long) bp;
1430 bp->timer.function = b44_timer;
1431 add_timer(&bp->timer);
1433 b44_enable_ints(bp);
1434 netif_start_queue(dev);
1440 /*static*/ void b44_dump_state(struct b44 *bp)
1442 u32 val32, val32_2, val32_3, val32_4, val32_5;
1445 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1446 printk("DEBUG: PCI status [%04x] \n", val16);
1451 #ifdef CONFIG_NET_POLL_CONTROLLER
1453 * Polling receive - used by netconsole and other diagnostic tools
1454 * to allow network i/o with interrupts disabled.
1456 static void b44_poll_controller(struct net_device *dev)
1458 disable_irq(dev->irq);
1459 b44_interrupt(dev->irq, dev);
1460 enable_irq(dev->irq);
1464 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1467 u32 *pattern = (u32 *) pp;
1469 for (i = 0; i < bytes; i += sizeof(u32)) {
1470 bw32(bp, B44_FILT_ADDR, table_offset + i);
1471 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1475 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1478 int k, j, len = offset;
1479 int ethaddr_bytes = ETH_ALEN;
1481 memset(ppattern + offset, 0xff, magicsync);
1482 for (j = 0; j < magicsync; j++)
1483 set_bit(len++, (unsigned long *) pmask);
1485 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1486 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1487 ethaddr_bytes = ETH_ALEN;
1489 ethaddr_bytes = B44_PATTERN_SIZE - len;
1490 if (ethaddr_bytes <=0)
1492 for (k = 0; k< ethaddr_bytes; k++) {
1493 ppattern[offset + magicsync +
1494 (j * ETH_ALEN) + k] = macaddr[k];
1496 set_bit(len, (unsigned long *) pmask);
1502 /* Setup magic packet patterns in the b44 WOL
1503 * pattern matching filter.
1505 static void b44_setup_pseudo_magicp(struct b44 *bp)
1509 int plen0, plen1, plen2;
1511 u8 pwol_mask[B44_PMASK_SIZE];
1513 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1514 if (!pwol_pattern) {
1515 printk(KERN_ERR PFX "Memory not available for WOL\n");
1519 /* Ipv4 magic packet pattern - pattern 0.*/
1520 memset(pwol_mask, 0, B44_PMASK_SIZE);
1521 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1522 B44_ETHIPV4UDP_HLEN);
1524 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1525 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1527 /* Raw ethernet II magic packet pattern - pattern 1 */
1528 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1529 memset(pwol_mask, 0, B44_PMASK_SIZE);
1530 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1533 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1534 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1535 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1536 B44_PMASK_BASE + B44_PMASK_SIZE);
1538 /* Ipv6 magic packet pattern - pattern 2 */
1539 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1540 memset(pwol_mask, 0, B44_PMASK_SIZE);
1541 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1542 B44_ETHIPV6UDP_HLEN);
1544 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1545 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1546 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1547 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1549 kfree(pwol_pattern);
1551 /* set these pattern's lengths: one less than each real length */
1552 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1553 bw32(bp, B44_WKUP_LEN, val);
1555 /* enable wakeup pattern matching */
1556 val = br32(bp, B44_DEVCTRL);
1557 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1561 static void b44_setup_wol(struct b44 *bp)
1566 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1568 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1570 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1572 val = bp->dev->dev_addr[2] << 24 |
1573 bp->dev->dev_addr[3] << 16 |
1574 bp->dev->dev_addr[4] << 8 |
1575 bp->dev->dev_addr[5];
1576 bw32(bp, B44_ADDR_LO, val);
1578 val = bp->dev->dev_addr[0] << 8 |
1579 bp->dev->dev_addr[1];
1580 bw32(bp, B44_ADDR_HI, val);
1582 val = br32(bp, B44_DEVCTRL);
1583 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1586 b44_setup_pseudo_magicp(bp);
1589 val = br32(bp, B44_SBTMSLOW);
1590 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1592 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1593 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1597 static int b44_close(struct net_device *dev)
1599 struct b44 *bp = netdev_priv(dev);
1601 netif_stop_queue(dev);
1603 napi_disable(&bp->napi);
1605 del_timer_sync(&bp->timer);
1607 spin_lock_irq(&bp->lock);
1614 netif_carrier_off(dev);
1616 spin_unlock_irq(&bp->lock);
1618 free_irq(dev->irq, dev);
1620 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1621 b44_init_hw(bp, B44_PARTIAL_RESET);
1625 b44_free_consistent(bp);
1630 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1632 struct b44 *bp = netdev_priv(dev);
1633 struct net_device_stats *nstat = &bp->stats;
1634 struct b44_hw_stats *hwstat = &bp->hw_stats;
1636 /* Convert HW stats into netdevice stats. */
1637 nstat->rx_packets = hwstat->rx_pkts;
1638 nstat->tx_packets = hwstat->tx_pkts;
1639 nstat->rx_bytes = hwstat->rx_octets;
1640 nstat->tx_bytes = hwstat->tx_octets;
1641 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1642 hwstat->tx_oversize_pkts +
1643 hwstat->tx_underruns +
1644 hwstat->tx_excessive_cols +
1645 hwstat->tx_late_cols);
1646 nstat->multicast = hwstat->tx_multicast_pkts;
1647 nstat->collisions = hwstat->tx_total_cols;
1649 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1650 hwstat->rx_undersize);
1651 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1652 nstat->rx_frame_errors = hwstat->rx_align_errs;
1653 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1654 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1655 hwstat->rx_oversize_pkts +
1656 hwstat->rx_missed_pkts +
1657 hwstat->rx_crc_align_errs +
1658 hwstat->rx_undersize +
1659 hwstat->rx_crc_errs +
1660 hwstat->rx_align_errs +
1661 hwstat->rx_symbol_errs);
1663 nstat->tx_aborted_errors = hwstat->tx_underruns;
1665 /* Carrier lost counter seems to be broken for some devices */
1666 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1672 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1674 struct dev_mc_list *mclist;
1677 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1678 mclist = dev->mc_list;
1679 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1680 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1685 static void __b44_set_rx_mode(struct net_device *dev)
1687 struct b44 *bp = netdev_priv(dev);
1690 val = br32(bp, B44_RXCONFIG);
1691 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1692 if (dev->flags & IFF_PROMISC) {
1693 val |= RXCONFIG_PROMISC;
1694 bw32(bp, B44_RXCONFIG, val);
1696 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1699 __b44_set_mac_addr(bp);
1701 if ((dev->flags & IFF_ALLMULTI) ||
1702 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1703 val |= RXCONFIG_ALLMULTI;
1705 i = __b44_load_mcast(bp, dev);
1708 __b44_cam_write(bp, zero, i);
1710 bw32(bp, B44_RXCONFIG, val);
1711 val = br32(bp, B44_CAM_CTRL);
1712 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1716 static void b44_set_rx_mode(struct net_device *dev)
1718 struct b44 *bp = netdev_priv(dev);
1720 spin_lock_irq(&bp->lock);
1721 __b44_set_rx_mode(dev);
1722 spin_unlock_irq(&bp->lock);
1725 static u32 b44_get_msglevel(struct net_device *dev)
1727 struct b44 *bp = netdev_priv(dev);
1728 return bp->msg_enable;
1731 static void b44_set_msglevel(struct net_device *dev, u32 value)
1733 struct b44 *bp = netdev_priv(dev);
1734 bp->msg_enable = value;
1737 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1739 struct b44 *bp = netdev_priv(dev);
1740 struct pci_dev *pci_dev = bp->pdev;
1742 strcpy (info->driver, DRV_MODULE_NAME);
1743 strcpy (info->version, DRV_MODULE_VERSION);
1744 strcpy (info->bus_info, pci_name(pci_dev));
1747 static int b44_nway_reset(struct net_device *dev)
1749 struct b44 *bp = netdev_priv(dev);
1753 spin_lock_irq(&bp->lock);
1754 b44_readphy(bp, MII_BMCR, &bmcr);
1755 b44_readphy(bp, MII_BMCR, &bmcr);
1757 if (bmcr & BMCR_ANENABLE) {
1758 b44_writephy(bp, MII_BMCR,
1759 bmcr | BMCR_ANRESTART);
1762 spin_unlock_irq(&bp->lock);
1767 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1769 struct b44 *bp = netdev_priv(dev);
1771 cmd->supported = (SUPPORTED_Autoneg);
1772 cmd->supported |= (SUPPORTED_100baseT_Half |
1773 SUPPORTED_100baseT_Full |
1774 SUPPORTED_10baseT_Half |
1775 SUPPORTED_10baseT_Full |
1778 cmd->advertising = 0;
1779 if (bp->flags & B44_FLAG_ADV_10HALF)
1780 cmd->advertising |= ADVERTISED_10baseT_Half;
1781 if (bp->flags & B44_FLAG_ADV_10FULL)
1782 cmd->advertising |= ADVERTISED_10baseT_Full;
1783 if (bp->flags & B44_FLAG_ADV_100HALF)
1784 cmd->advertising |= ADVERTISED_100baseT_Half;
1785 if (bp->flags & B44_FLAG_ADV_100FULL)
1786 cmd->advertising |= ADVERTISED_100baseT_Full;
1787 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1788 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1789 SPEED_100 : SPEED_10;
1790 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1791 DUPLEX_FULL : DUPLEX_HALF;
1793 cmd->phy_address = bp->phy_addr;
1794 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1795 XCVR_INTERNAL : XCVR_EXTERNAL;
1796 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1797 AUTONEG_DISABLE : AUTONEG_ENABLE;
1798 if (cmd->autoneg == AUTONEG_ENABLE)
1799 cmd->advertising |= ADVERTISED_Autoneg;
1800 if (!netif_running(dev)){
1809 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1811 struct b44 *bp = netdev_priv(dev);
1813 /* We do not support gigabit. */
1814 if (cmd->autoneg == AUTONEG_ENABLE) {
1815 if (cmd->advertising &
1816 (ADVERTISED_1000baseT_Half |
1817 ADVERTISED_1000baseT_Full))
1819 } else if ((cmd->speed != SPEED_100 &&
1820 cmd->speed != SPEED_10) ||
1821 (cmd->duplex != DUPLEX_HALF &&
1822 cmd->duplex != DUPLEX_FULL)) {
1826 spin_lock_irq(&bp->lock);
1828 if (cmd->autoneg == AUTONEG_ENABLE) {
1829 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1830 B44_FLAG_100_BASE_T |
1831 B44_FLAG_FULL_DUPLEX |
1832 B44_FLAG_ADV_10HALF |
1833 B44_FLAG_ADV_10FULL |
1834 B44_FLAG_ADV_100HALF |
1835 B44_FLAG_ADV_100FULL);
1836 if (cmd->advertising == 0) {
1837 bp->flags |= (B44_FLAG_ADV_10HALF |
1838 B44_FLAG_ADV_10FULL |
1839 B44_FLAG_ADV_100HALF |
1840 B44_FLAG_ADV_100FULL);
1842 if (cmd->advertising & ADVERTISED_10baseT_Half)
1843 bp->flags |= B44_FLAG_ADV_10HALF;
1844 if (cmd->advertising & ADVERTISED_10baseT_Full)
1845 bp->flags |= B44_FLAG_ADV_10FULL;
1846 if (cmd->advertising & ADVERTISED_100baseT_Half)
1847 bp->flags |= B44_FLAG_ADV_100HALF;
1848 if (cmd->advertising & ADVERTISED_100baseT_Full)
1849 bp->flags |= B44_FLAG_ADV_100FULL;
1852 bp->flags |= B44_FLAG_FORCE_LINK;
1853 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1854 if (cmd->speed == SPEED_100)
1855 bp->flags |= B44_FLAG_100_BASE_T;
1856 if (cmd->duplex == DUPLEX_FULL)
1857 bp->flags |= B44_FLAG_FULL_DUPLEX;
1860 if (netif_running(dev))
1863 spin_unlock_irq(&bp->lock);
1868 static void b44_get_ringparam(struct net_device *dev,
1869 struct ethtool_ringparam *ering)
1871 struct b44 *bp = netdev_priv(dev);
1873 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1874 ering->rx_pending = bp->rx_pending;
1876 /* XXX ethtool lacks a tx_max_pending, oops... */
1879 static int b44_set_ringparam(struct net_device *dev,
1880 struct ethtool_ringparam *ering)
1882 struct b44 *bp = netdev_priv(dev);
1884 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1885 (ering->rx_mini_pending != 0) ||
1886 (ering->rx_jumbo_pending != 0) ||
1887 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1890 spin_lock_irq(&bp->lock);
1892 bp->rx_pending = ering->rx_pending;
1893 bp->tx_pending = ering->tx_pending;
1897 b44_init_hw(bp, B44_FULL_RESET);
1898 netif_wake_queue(bp->dev);
1899 spin_unlock_irq(&bp->lock);
1901 b44_enable_ints(bp);
1906 static void b44_get_pauseparam(struct net_device *dev,
1907 struct ethtool_pauseparam *epause)
1909 struct b44 *bp = netdev_priv(dev);
1912 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1914 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1916 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1919 static int b44_set_pauseparam(struct net_device *dev,
1920 struct ethtool_pauseparam *epause)
1922 struct b44 *bp = netdev_priv(dev);
1924 spin_lock_irq(&bp->lock);
1925 if (epause->autoneg)
1926 bp->flags |= B44_FLAG_PAUSE_AUTO;
1928 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1929 if (epause->rx_pause)
1930 bp->flags |= B44_FLAG_RX_PAUSE;
1932 bp->flags &= ~B44_FLAG_RX_PAUSE;
1933 if (epause->tx_pause)
1934 bp->flags |= B44_FLAG_TX_PAUSE;
1936 bp->flags &= ~B44_FLAG_TX_PAUSE;
1937 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1940 b44_init_hw(bp, B44_FULL_RESET);
1942 __b44_set_flow_ctrl(bp, bp->flags);
1944 spin_unlock_irq(&bp->lock);
1946 b44_enable_ints(bp);
1951 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1955 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1960 static int b44_get_stats_count(struct net_device *dev)
1962 return ARRAY_SIZE(b44_gstrings);
1965 static void b44_get_ethtool_stats(struct net_device *dev,
1966 struct ethtool_stats *stats, u64 *data)
1968 struct b44 *bp = netdev_priv(dev);
1969 u32 *val = &bp->hw_stats.tx_good_octets;
1972 spin_lock_irq(&bp->lock);
1974 b44_stats_update(bp);
1976 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1979 spin_unlock_irq(&bp->lock);
1982 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1984 struct b44 *bp = netdev_priv(dev);
1986 wol->supported = WAKE_MAGIC;
1987 if (bp->flags & B44_FLAG_WOL_ENABLE)
1988 wol->wolopts = WAKE_MAGIC;
1991 memset(&wol->sopass, 0, sizeof(wol->sopass));
1994 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1996 struct b44 *bp = netdev_priv(dev);
1998 spin_lock_irq(&bp->lock);
1999 if (wol->wolopts & WAKE_MAGIC)
2000 bp->flags |= B44_FLAG_WOL_ENABLE;
2002 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2003 spin_unlock_irq(&bp->lock);
2008 static const struct ethtool_ops b44_ethtool_ops = {
2009 .get_drvinfo = b44_get_drvinfo,
2010 .get_settings = b44_get_settings,
2011 .set_settings = b44_set_settings,
2012 .nway_reset = b44_nway_reset,
2013 .get_link = ethtool_op_get_link,
2014 .get_wol = b44_get_wol,
2015 .set_wol = b44_set_wol,
2016 .get_ringparam = b44_get_ringparam,
2017 .set_ringparam = b44_set_ringparam,
2018 .get_pauseparam = b44_get_pauseparam,
2019 .set_pauseparam = b44_set_pauseparam,
2020 .get_msglevel = b44_get_msglevel,
2021 .set_msglevel = b44_set_msglevel,
2022 .get_strings = b44_get_strings,
2023 .get_stats_count = b44_get_stats_count,
2024 .get_ethtool_stats = b44_get_ethtool_stats,
2027 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2029 struct mii_ioctl_data *data = if_mii(ifr);
2030 struct b44 *bp = netdev_priv(dev);
2033 if (!netif_running(dev))
2036 spin_lock_irq(&bp->lock);
2037 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2038 spin_unlock_irq(&bp->lock);
2043 /* Read 128-bytes of EEPROM. */
2044 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2047 __le16 *ptr = (__le16 *) data;
2049 for (i = 0; i < 128; i += 2)
2050 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2055 static int __devinit b44_get_invariants(struct b44 *bp)
2060 err = b44_read_eeprom(bp, &eeprom[0]);
2064 bp->dev->dev_addr[0] = eeprom[79];
2065 bp->dev->dev_addr[1] = eeprom[78];
2066 bp->dev->dev_addr[2] = eeprom[81];
2067 bp->dev->dev_addr[3] = eeprom[80];
2068 bp->dev->dev_addr[4] = eeprom[83];
2069 bp->dev->dev_addr[5] = eeprom[82];
2071 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2072 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2076 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2078 bp->phy_addr = eeprom[90] & 0x1f;
2080 bp->imask = IMASK_DEF;
2082 bp->core_unit = ssb_core_unit(bp);
2083 bp->dma_offset = SB_PCI_DMA;
2085 /* XXX - really required?
2086 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2089 if (ssb_get_core_rev(bp) >= 7)
2090 bp->flags |= B44_FLAG_B0_ANDLATER;
2096 static int __devinit b44_init_one(struct pci_dev *pdev,
2097 const struct pci_device_id *ent)
2099 static int b44_version_printed = 0;
2100 unsigned long b44reg_base, b44reg_len;
2101 struct net_device *dev;
2105 if (b44_version_printed++ == 0)
2106 printk(KERN_INFO "%s", version);
2108 err = pci_enable_device(pdev);
2110 dev_err(&pdev->dev, "Cannot enable PCI device, "
2115 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2117 "Cannot find proper PCI device "
2118 "base address, aborting.\n");
2120 goto err_out_disable_pdev;
2123 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2126 "Cannot obtain PCI resources, aborting.\n");
2127 goto err_out_disable_pdev;
2130 pci_set_master(pdev);
2132 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2134 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2135 goto err_out_free_res;
2138 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2140 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2141 goto err_out_free_res;
2144 b44reg_base = pci_resource_start(pdev, 0);
2145 b44reg_len = pci_resource_len(pdev, 0);
2147 dev = alloc_etherdev(sizeof(*bp));
2149 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2151 goto err_out_free_res;
2154 SET_NETDEV_DEV(dev,&pdev->dev);
2156 /* No interesting netdevice features in this card... */
2159 bp = netdev_priv(dev);
2163 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2165 spin_lock_init(&bp->lock);
2167 bp->regs = ioremap(b44reg_base, b44reg_len);
2168 if (bp->regs == 0UL) {
2169 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2171 goto err_out_free_dev;
2174 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2175 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2177 dev->open = b44_open;
2178 dev->stop = b44_close;
2179 dev->hard_start_xmit = b44_start_xmit;
2180 dev->get_stats = b44_get_stats;
2181 dev->set_multicast_list = b44_set_rx_mode;
2182 dev->set_mac_address = b44_set_mac_addr;
2183 dev->do_ioctl = b44_ioctl;
2184 dev->tx_timeout = b44_tx_timeout;
2185 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2186 dev->watchdog_timeo = B44_TX_TIMEOUT;
2187 #ifdef CONFIG_NET_POLL_CONTROLLER
2188 dev->poll_controller = b44_poll_controller;
2190 dev->change_mtu = b44_change_mtu;
2191 dev->irq = pdev->irq;
2192 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2194 netif_carrier_off(dev);
2196 err = b44_get_invariants(bp);
2199 "Problem fetching invariants of chip, aborting.\n");
2200 goto err_out_iounmap;
2203 bp->mii_if.dev = dev;
2204 bp->mii_if.mdio_read = b44_mii_read;
2205 bp->mii_if.mdio_write = b44_mii_write;
2206 bp->mii_if.phy_id = bp->phy_addr;
2207 bp->mii_if.phy_id_mask = 0x1f;
2208 bp->mii_if.reg_num_mask = 0x1f;
2210 /* By default, advertise all speed/duplex settings. */
2211 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2212 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2214 /* By default, auto-negotiate PAUSE. */
2215 bp->flags |= B44_FLAG_PAUSE_AUTO;
2217 err = register_netdev(dev);
2219 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2220 goto err_out_iounmap;
2223 pci_set_drvdata(pdev, dev);
2225 pci_save_state(bp->pdev);
2227 /* Chip reset provides power to the b44 MAC & PCI cores, which
2228 * is necessary for MAC register access.
2232 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2233 for (i = 0; i < 6; i++)
2234 printk("%2.2x%c", dev->dev_addr[i],
2235 i == 5 ? '\n' : ':');
2246 pci_release_regions(pdev);
2248 err_out_disable_pdev:
2249 pci_disable_device(pdev);
2250 pci_set_drvdata(pdev, NULL);
2254 static void __devexit b44_remove_one(struct pci_dev *pdev)
2256 struct net_device *dev = pci_get_drvdata(pdev);
2257 struct b44 *bp = netdev_priv(dev);
2259 unregister_netdev(dev);
2262 pci_release_regions(pdev);
2263 pci_disable_device(pdev);
2264 pci_set_drvdata(pdev, NULL);
2267 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2269 struct net_device *dev = pci_get_drvdata(pdev);
2270 struct b44 *bp = netdev_priv(dev);
2272 if (!netif_running(dev))
2275 del_timer_sync(&bp->timer);
2277 spin_lock_irq(&bp->lock);
2280 netif_carrier_off(bp->dev);
2281 netif_device_detach(bp->dev);
2284 spin_unlock_irq(&bp->lock);
2286 free_irq(dev->irq, dev);
2287 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2288 b44_init_hw(bp, B44_PARTIAL_RESET);
2291 pci_disable_device(pdev);
2295 static int b44_resume(struct pci_dev *pdev)
2297 struct net_device *dev = pci_get_drvdata(pdev);
2298 struct b44 *bp = netdev_priv(dev);
2301 pci_restore_state(pdev);
2302 rc = pci_enable_device(pdev);
2304 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2309 pci_set_master(pdev);
2311 if (!netif_running(dev))
2314 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2316 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2317 pci_disable_device(pdev);
2321 spin_lock_irq(&bp->lock);
2324 b44_init_hw(bp, B44_FULL_RESET);
2325 netif_device_attach(bp->dev);
2326 spin_unlock_irq(&bp->lock);
2328 b44_enable_ints(bp);
2329 netif_wake_queue(dev);
2331 mod_timer(&bp->timer, jiffies + 1);
2336 static struct pci_driver b44_driver = {
2337 .name = DRV_MODULE_NAME,
2338 .id_table = b44_pci_tbl,
2339 .probe = b44_init_one,
2340 .remove = __devexit_p(b44_remove_one),
2341 .suspend = b44_suspend,
2342 .resume = b44_resume,
2345 static int __init b44_init(void)
2347 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2349 /* Setup paramaters for syncing RX/TX DMA descriptors */
2350 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2351 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2353 return pci_register_driver(&b44_driver);
2356 static void __exit b44_cleanup(void)
2358 pci_unregister_driver(&b44_driver);
2361 module_init(b44_init);
2362 module_exit(b44_cleanup);