1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.00"
33 #define DRV_MODULE_RELDATE "Apr 7, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 static char version[] __devinitdata =
79 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
86 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
90 static struct pci_device_id b44_pci_tbl[] = {
91 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
92 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
93 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
94 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
95 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
96 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97 { } /* terminate list with empty entry */
100 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
102 static void b44_halt(struct b44 *);
103 static void b44_init_rings(struct b44 *);
104 static void b44_init_hw(struct b44 *);
106 static int dma_desc_align_mask;
107 static int dma_desc_sync_size;
109 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
110 #define _B44(x...) # x,
115 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
117 unsigned long offset,
118 enum dma_data_direction dir)
120 dma_sync_single_range_for_device(&pdev->dev, dma_base,
121 offset & dma_desc_align_mask,
122 dma_desc_sync_size, dir);
125 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
127 unsigned long offset,
128 enum dma_data_direction dir)
130 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
131 offset & dma_desc_align_mask,
132 dma_desc_sync_size, dir);
135 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
137 return readl(bp->regs + reg);
140 static inline void bw32(const struct b44 *bp,
141 unsigned long reg, unsigned long val)
143 writel(val, bp->regs + reg);
146 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
147 u32 bit, unsigned long timeout, const int clear)
151 for (i = 0; i < timeout; i++) {
152 u32 val = br32(bp, reg);
154 if (clear && !(val & bit))
156 if (!clear && (val & bit))
161 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
165 (clear ? "clear" : "set"));
171 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
172 * buzz words used on this company's website :-)
174 * All of these routines must be invoked with bp->lock held and
175 * interrupts disabled.
178 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
179 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
181 static u32 ssb_get_core_rev(struct b44 *bp)
183 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
186 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
188 u32 bar_orig, pci_rev, val;
190 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
191 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
192 pci_rev = ssb_get_core_rev(bp);
194 val = br32(bp, B44_SBINTVEC);
196 bw32(bp, B44_SBINTVEC, val);
198 val = br32(bp, SSB_PCI_TRANS_2);
199 val |= SSB_PCI_PREF | SSB_PCI_BURST;
200 bw32(bp, SSB_PCI_TRANS_2, val);
202 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
207 static void ssb_core_disable(struct b44 *bp)
209 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
212 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216 SBTMSLOW_REJECT | SBTMSLOW_RESET));
217 br32(bp, B44_SBTMSLOW);
219 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220 br32(bp, B44_SBTMSLOW);
224 static void ssb_core_reset(struct b44 *bp)
228 ssb_core_disable(bp);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230 br32(bp, B44_SBTMSLOW);
233 /* Clear SERR if set, this is a hw bug workaround. */
234 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235 bw32(bp, B44_SBTMSHIGH, 0);
237 val = br32(bp, B44_SBIMSTATE);
238 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
241 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242 br32(bp, B44_SBTMSLOW);
245 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246 br32(bp, B44_SBTMSLOW);
250 static int ssb_core_unit(struct b44 *bp)
253 u32 val = br32(bp, B44_SBADMATCH0);
256 type = val & SBADMATCH0_TYPE_MASK;
259 base = val & SBADMATCH0_BS0_MASK;
263 base = val & SBADMATCH0_BS1_MASK;
268 base = val & SBADMATCH0_BS2_MASK;
275 static int ssb_is_core_up(struct b44 *bp)
277 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
281 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
285 val = ((u32) data[2]) << 24;
286 val |= ((u32) data[3]) << 16;
287 val |= ((u32) data[4]) << 8;
288 val |= ((u32) data[5]) << 0;
289 bw32(bp, B44_CAM_DATA_LO, val);
290 val = (CAM_DATA_HI_VALID |
291 (((u32) data[0]) << 8) |
292 (((u32) data[1]) << 0));
293 bw32(bp, B44_CAM_DATA_HI, val);
294 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295 (index << CAM_CTRL_INDEX_SHIFT)));
296 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
299 static inline void __b44_disable_ints(struct b44 *bp)
301 bw32(bp, B44_IMASK, 0);
304 static void b44_disable_ints(struct b44 *bp)
306 __b44_disable_ints(bp);
308 /* Flush posted writes. */
312 static void b44_enable_ints(struct b44 *bp)
314 bw32(bp, B44_IMASK, bp->imask);
317 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
321 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325 (reg << MDIO_DATA_RA_SHIFT) |
326 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
333 static int b44_writephy(struct b44 *bp, int reg, u32 val)
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341 (val & MDIO_DATA_DATA)));
342 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
345 /* miilib interface */
346 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347 * due to code existing before miilib use was added to this driver.
348 * Someone should remove this artificial driver limitation in
349 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
351 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
354 struct b44 *bp = netdev_priv(dev);
355 int rc = b44_readphy(bp, location, &val);
361 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
364 struct b44 *bp = netdev_priv(dev);
365 b44_writephy(bp, location, val);
368 static int b44_phy_reset(struct b44 *bp)
373 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
377 err = b44_readphy(bp, MII_BMCR, &val);
379 if (val & BMCR_RESET) {
380 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
389 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
393 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394 bp->flags |= pause_flags;
396 val = br32(bp, B44_RXCONFIG);
397 if (pause_flags & B44_FLAG_RX_PAUSE)
398 val |= RXCONFIG_FLOW;
400 val &= ~RXCONFIG_FLOW;
401 bw32(bp, B44_RXCONFIG, val);
403 val = br32(bp, B44_MAC_FLOW);
404 if (pause_flags & B44_FLAG_TX_PAUSE)
405 val |= (MAC_FLOW_PAUSE_ENAB |
406 (0xc0 & MAC_FLOW_RX_HI_WATER));
408 val &= ~MAC_FLOW_PAUSE_ENAB;
409 bw32(bp, B44_MAC_FLOW, val);
412 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
416 /* The driver supports only rx pause by default because
417 the b44 mac tx pause mechanism generates excessive
419 Use ethtool to turn on b44 tx pause if necessary.
421 if ((local & ADVERTISE_PAUSE_CAP) &&
422 (local & ADVERTISE_PAUSE_ASYM)){
423 if ((remote & LPA_PAUSE_ASYM) &&
424 !(remote & LPA_PAUSE_CAP))
425 pause_enab |= B44_FLAG_RX_PAUSE;
428 __b44_set_flow_ctrl(bp, pause_enab);
431 static int b44_setup_phy(struct b44 *bp)
436 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
438 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
439 val & MII_ALEDCTRL_ALLMSK)) != 0)
441 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
443 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
444 val | MII_TLEDCTRL_ENABLE)) != 0)
447 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
448 u32 adv = ADVERTISE_CSMA;
450 if (bp->flags & B44_FLAG_ADV_10HALF)
451 adv |= ADVERTISE_10HALF;
452 if (bp->flags & B44_FLAG_ADV_10FULL)
453 adv |= ADVERTISE_10FULL;
454 if (bp->flags & B44_FLAG_ADV_100HALF)
455 adv |= ADVERTISE_100HALF;
456 if (bp->flags & B44_FLAG_ADV_100FULL)
457 adv |= ADVERTISE_100FULL;
459 if (bp->flags & B44_FLAG_PAUSE_AUTO)
460 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
462 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
464 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
465 BMCR_ANRESTART))) != 0)
470 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
472 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
473 if (bp->flags & B44_FLAG_100_BASE_T)
474 bmcr |= BMCR_SPEED100;
475 if (bp->flags & B44_FLAG_FULL_DUPLEX)
476 bmcr |= BMCR_FULLDPLX;
477 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
480 /* Since we will not be negotiating there is no safe way
481 * to determine if the link partner supports flow control
482 * or not. So just disable it completely in this case.
484 b44_set_flow_ctrl(bp, 0, 0);
491 static void b44_stats_update(struct b44 *bp)
496 val = &bp->hw_stats.tx_good_octets;
497 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
498 *val++ += br32(bp, reg);
504 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
505 *val++ += br32(bp, reg);
509 static void b44_link_report(struct b44 *bp)
511 if (!netif_carrier_ok(bp->dev)) {
512 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
514 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
516 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
517 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
519 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
522 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
523 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
527 static void b44_check_phy(struct b44 *bp)
531 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
532 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534 if (aux & MII_AUXCTRL_SPEED)
535 bp->flags |= B44_FLAG_100_BASE_T;
537 bp->flags &= ~B44_FLAG_100_BASE_T;
538 if (aux & MII_AUXCTRL_DUPLEX)
539 bp->flags |= B44_FLAG_FULL_DUPLEX;
541 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543 if (!netif_carrier_ok(bp->dev) &&
544 (bmsr & BMSR_LSTATUS)) {
545 u32 val = br32(bp, B44_TX_CTRL);
546 u32 local_adv, remote_adv;
548 if (bp->flags & B44_FLAG_FULL_DUPLEX)
549 val |= TX_CTRL_DUPLEX;
551 val &= ~TX_CTRL_DUPLEX;
552 bw32(bp, B44_TX_CTRL, val);
554 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
555 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
556 !b44_readphy(bp, MII_LPA, &remote_adv))
557 b44_set_flow_ctrl(bp, local_adv, remote_adv);
560 netif_carrier_on(bp->dev);
562 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564 netif_carrier_off(bp->dev);
568 if (bmsr & BMSR_RFAULT)
569 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
572 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
577 static void b44_timer(unsigned long __opaque)
579 struct b44 *bp = (struct b44 *) __opaque;
581 spin_lock_irq(&bp->lock);
585 b44_stats_update(bp);
587 spin_unlock_irq(&bp->lock);
589 bp->timer.expires = jiffies + HZ;
590 add_timer(&bp->timer);
593 static void b44_tx(struct b44 *bp)
597 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598 cur /= sizeof(struct dma_desc);
600 /* XXX needs updating when NETIF_F_SG is supported */
601 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602 struct ring_info *rp = &bp->tx_buffers[cons];
603 struct sk_buff *skb = rp->skb;
607 pci_unmap_single(bp->pdev,
608 pci_unmap_addr(rp, mapping),
612 dev_kfree_skb_irq(skb);
616 if (netif_queue_stopped(bp->dev) &&
617 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618 netif_wake_queue(bp->dev);
620 bw32(bp, B44_GPTIMER, 0);
623 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
624 * before the DMA address you give it. So we allocate 30 more bytes
625 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626 * point the chip at 30 bytes past where the rx_header will go.
628 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
631 struct ring_info *src_map, *map;
632 struct rx_header *rh;
640 src_map = &bp->rx_buffers[src_idx];
641 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642 map = &bp->rx_buffers[dest_idx];
643 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
647 mapping = pci_map_single(bp->pdev, skb->data,
651 /* Hardware bug work-around, the chip is unable to do PCI DMA
652 to/from anything above 1GB :-( */
653 if (dma_mapping_error(mapping) ||
654 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
656 if (!dma_mapping_error(mapping))
657 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
658 dev_kfree_skb_any(skb);
659 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
662 mapping = pci_map_single(bp->pdev, skb->data,
665 if (dma_mapping_error(mapping) ||
666 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667 if (!dma_mapping_error(mapping))
668 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
669 dev_kfree_skb_any(skb);
675 skb_reserve(skb, bp->rx_offset);
677 rh = (struct rx_header *)
678 (skb->data - bp->rx_offset);
683 pci_unmap_addr_set(map, mapping, mapping);
688 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
689 if (dest_idx == (B44_RX_RING_SIZE - 1))
690 ctrl |= DESC_CTRL_EOT;
692 dp = &bp->rx_ring[dest_idx];
693 dp->ctrl = cpu_to_le32(ctrl);
694 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
696 if (bp->flags & B44_FLAG_RX_RING_HACK)
697 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
698 dest_idx * sizeof(dp),
701 return RX_PKT_BUF_SZ;
704 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
706 struct dma_desc *src_desc, *dest_desc;
707 struct ring_info *src_map, *dest_map;
708 struct rx_header *rh;
712 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
713 dest_desc = &bp->rx_ring[dest_idx];
714 dest_map = &bp->rx_buffers[dest_idx];
715 src_desc = &bp->rx_ring[src_idx];
716 src_map = &bp->rx_buffers[src_idx];
718 dest_map->skb = src_map->skb;
719 rh = (struct rx_header *) src_map->skb->data;
722 pci_unmap_addr_set(dest_map, mapping,
723 pci_unmap_addr(src_map, mapping));
725 if (bp->flags & B44_FLAG_RX_RING_HACK)
726 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
727 src_idx * sizeof(src_desc),
730 ctrl = src_desc->ctrl;
731 if (dest_idx == (B44_RX_RING_SIZE - 1))
732 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
734 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
736 dest_desc->ctrl = ctrl;
737 dest_desc->addr = src_desc->addr;
741 if (bp->flags & B44_FLAG_RX_RING_HACK)
742 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
743 dest_idx * sizeof(dest_desc),
746 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
751 static int b44_rx(struct b44 *bp, int budget)
757 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
758 prod /= sizeof(struct dma_desc);
761 while (cons != prod && budget > 0) {
762 struct ring_info *rp = &bp->rx_buffers[cons];
763 struct sk_buff *skb = rp->skb;
764 dma_addr_t map = pci_unmap_addr(rp, mapping);
765 struct rx_header *rh;
768 pci_dma_sync_single_for_cpu(bp->pdev, map,
771 rh = (struct rx_header *) skb->data;
772 len = cpu_to_le16(rh->len);
773 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
774 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
776 b44_recycle_rx(bp, cons, bp->rx_prod);
778 bp->stats.rx_dropped++;
788 len = cpu_to_le16(rh->len);
789 } while (len == 0 && i++ < 5);
797 if (len > RX_COPY_THRESHOLD) {
799 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802 pci_unmap_single(bp->pdev, map,
803 skb_size, PCI_DMA_FROMDEVICE);
804 /* Leave out rx_header */
805 skb_put(skb, len+bp->rx_offset);
806 skb_pull(skb,bp->rx_offset);
808 struct sk_buff *copy_skb;
810 b44_recycle_rx(bp, cons, bp->rx_prod);
811 copy_skb = dev_alloc_skb(len + 2);
812 if (copy_skb == NULL)
813 goto drop_it_no_recycle;
815 copy_skb->dev = bp->dev;
816 skb_reserve(copy_skb, 2);
817 skb_put(copy_skb, len);
818 /* DMA sync done above, copy just the actual packet */
819 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
823 skb->ip_summed = CHECKSUM_NONE;
824 skb->protocol = eth_type_trans(skb, bp->dev);
825 netif_receive_skb(skb);
826 bp->dev->last_rx = jiffies;
830 bp->rx_prod = (bp->rx_prod + 1) &
831 (B44_RX_RING_SIZE - 1);
832 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
836 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
841 static int b44_poll(struct net_device *netdev, int *budget)
843 struct b44 *bp = netdev_priv(netdev);
846 spin_lock_irq(&bp->lock);
848 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849 /* spin_lock(&bp->tx_lock); */
851 /* spin_unlock(&bp->tx_lock); */
853 spin_unlock_irq(&bp->lock);
856 if (bp->istat & ISTAT_RX) {
857 int orig_budget = *budget;
860 if (orig_budget > netdev->quota)
861 orig_budget = netdev->quota;
863 work_done = b44_rx(bp, orig_budget);
865 *budget -= work_done;
866 netdev->quota -= work_done;
868 if (work_done >= orig_budget)
872 if (bp->istat & ISTAT_ERRORS) {
873 spin_lock_irq(&bp->lock);
877 netif_wake_queue(bp->dev);
878 spin_unlock_irq(&bp->lock);
883 netif_rx_complete(netdev);
887 return (done ? 0 : 1);
890 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
892 struct net_device *dev = dev_id;
893 struct b44 *bp = netdev_priv(dev);
897 spin_lock(&bp->lock);
899 istat = br32(bp, B44_ISTAT);
900 imask = br32(bp, B44_IMASK);
902 /* ??? What the fuck is the purpose of the interrupt mask
903 * ??? register if we have to mask it out by hand anyways?
909 if (unlikely(!netif_running(dev))) {
910 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
914 if (netif_rx_schedule_prep(dev)) {
915 /* NOTE: These writes are posted by the readback of
916 * the ISTAT register below.
919 __b44_disable_ints(bp);
920 __netif_rx_schedule(dev);
922 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
927 bw32(bp, B44_ISTAT, istat);
930 spin_unlock(&bp->lock);
931 return IRQ_RETVAL(handled);
934 static void b44_tx_timeout(struct net_device *dev)
936 struct b44 *bp = netdev_priv(dev);
938 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
941 spin_lock_irq(&bp->lock);
947 spin_unlock_irq(&bp->lock);
951 netif_wake_queue(dev);
954 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
956 struct b44 *bp = netdev_priv(dev);
957 struct sk_buff *bounce_skb;
958 int rc = NETDEV_TX_OK;
960 u32 len, entry, ctrl;
963 spin_lock_irq(&bp->lock);
965 /* This is a hard error, log it. */
966 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
967 netif_stop_queue(dev);
968 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
974 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
975 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
976 if (!dma_mapping_error(mapping))
977 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
979 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
984 mapping = pci_map_single(bp->pdev, bounce_skb->data,
985 len, PCI_DMA_TODEVICE);
986 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
987 if (!dma_mapping_error(mapping))
988 pci_unmap_single(bp->pdev, mapping,
989 len, PCI_DMA_TODEVICE);
990 dev_kfree_skb_any(bounce_skb);
994 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
995 dev_kfree_skb_any(skb);
1000 bp->tx_buffers[entry].skb = skb;
1001 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1003 ctrl = (len & DESC_CTRL_LEN);
1004 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1005 if (entry == (B44_TX_RING_SIZE - 1))
1006 ctrl |= DESC_CTRL_EOT;
1008 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1009 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1011 if (bp->flags & B44_FLAG_TX_RING_HACK)
1012 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1013 entry * sizeof(bp->tx_ring[0]),
1016 entry = NEXT_TX(entry);
1018 bp->tx_prod = entry;
1022 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1023 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1024 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1025 if (bp->flags & B44_FLAG_REORDER_BUG)
1026 br32(bp, B44_DMATX_PTR);
1028 if (TX_BUFFS_AVAIL(bp) < 1)
1029 netif_stop_queue(dev);
1031 dev->trans_start = jiffies;
1034 spin_unlock_irq(&bp->lock);
1039 rc = NETDEV_TX_BUSY;
1043 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1045 struct b44 *bp = netdev_priv(dev);
1047 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1050 if (!netif_running(dev)) {
1051 /* We'll just catch it later when the
1058 spin_lock_irq(&bp->lock);
1063 spin_unlock_irq(&bp->lock);
1065 b44_enable_ints(bp);
1070 /* Free up pending packets in all rx/tx rings.
1072 * The chip has been shut down and the driver detached from
1073 * the networking, so no interrupts or new tx packets will
1074 * end up in the driver. bp->lock is not held and we are not
1075 * in an interrupt context and thus may sleep.
1077 static void b44_free_rings(struct b44 *bp)
1079 struct ring_info *rp;
1082 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1083 rp = &bp->rx_buffers[i];
1085 if (rp->skb == NULL)
1087 pci_unmap_single(bp->pdev,
1088 pci_unmap_addr(rp, mapping),
1090 PCI_DMA_FROMDEVICE);
1091 dev_kfree_skb_any(rp->skb);
1095 /* XXX needs changes once NETIF_F_SG is set... */
1096 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1097 rp = &bp->tx_buffers[i];
1099 if (rp->skb == NULL)
1101 pci_unmap_single(bp->pdev,
1102 pci_unmap_addr(rp, mapping),
1105 dev_kfree_skb_any(rp->skb);
1110 /* Initialize tx/rx rings for packet processing.
1112 * The chip has been shut down and the driver detached from
1113 * the networking, so no interrupts or new tx packets will
1114 * end up in the driver.
1116 static void b44_init_rings(struct b44 *bp)
1122 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1123 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1125 if (bp->flags & B44_FLAG_RX_RING_HACK)
1126 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1128 PCI_DMA_BIDIRECTIONAL);
1130 if (bp->flags & B44_FLAG_TX_RING_HACK)
1131 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1135 for (i = 0; i < bp->rx_pending; i++) {
1136 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1142 * Must not be invoked with interrupt sources disabled and
1143 * the hardware shutdown down.
1145 static void b44_free_consistent(struct b44 *bp)
1147 kfree(bp->rx_buffers);
1148 bp->rx_buffers = NULL;
1149 kfree(bp->tx_buffers);
1150 bp->tx_buffers = NULL;
1152 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1153 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1158 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1159 bp->rx_ring, bp->rx_ring_dma);
1161 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1164 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1165 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1170 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1171 bp->tx_ring, bp->tx_ring_dma);
1173 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1178 * Must not be invoked with interrupt sources disabled and
1179 * the hardware shutdown down. Can sleep.
1181 static int b44_alloc_consistent(struct b44 *bp)
1185 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1186 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1187 if (!bp->rx_buffers)
1190 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1191 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1192 if (!bp->tx_buffers)
1195 size = DMA_TABLE_BYTES;
1196 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1198 /* Allocation may have failed due to pci_alloc_consistent
1199 insisting on use of GFP_DMA, which is more restrictive
1200 than necessary... */
1201 struct dma_desc *rx_ring;
1202 dma_addr_t rx_ring_dma;
1204 rx_ring = kzalloc(size, GFP_KERNEL);
1208 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1212 if (dma_mapping_error(rx_ring_dma) ||
1213 rx_ring_dma + size > B44_DMA_MASK) {
1218 bp->rx_ring = rx_ring;
1219 bp->rx_ring_dma = rx_ring_dma;
1220 bp->flags |= B44_FLAG_RX_RING_HACK;
1223 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1225 /* Allocation may have failed due to pci_alloc_consistent
1226 insisting on use of GFP_DMA, which is more restrictive
1227 than necessary... */
1228 struct dma_desc *tx_ring;
1229 dma_addr_t tx_ring_dma;
1231 tx_ring = kzalloc(size, GFP_KERNEL);
1235 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1239 if (dma_mapping_error(tx_ring_dma) ||
1240 tx_ring_dma + size > B44_DMA_MASK) {
1245 bp->tx_ring = tx_ring;
1246 bp->tx_ring_dma = tx_ring_dma;
1247 bp->flags |= B44_FLAG_TX_RING_HACK;
1253 b44_free_consistent(bp);
1257 /* bp->lock is held. */
1258 static void b44_clear_stats(struct b44 *bp)
1262 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1263 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1265 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1269 /* bp->lock is held. */
1270 static void b44_chip_reset(struct b44 *bp)
1272 if (ssb_is_core_up(bp)) {
1273 bw32(bp, B44_RCV_LAZY, 0);
1274 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1275 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1276 bw32(bp, B44_DMATX_CTRL, 0);
1277 bp->tx_prod = bp->tx_cons = 0;
1278 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1279 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1282 bw32(bp, B44_DMARX_CTRL, 0);
1283 bp->rx_prod = bp->rx_cons = 0;
1285 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1292 b44_clear_stats(bp);
1294 /* Make PHY accessible. */
1295 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296 (0x0d & MDIO_CTRL_MAXF_MASK)));
1297 br32(bp, B44_MDIO_CTRL);
1299 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1300 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1301 br32(bp, B44_ENET_CTRL);
1302 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1304 u32 val = br32(bp, B44_DEVCTRL);
1306 if (val & DEVCTRL_EPR) {
1307 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1308 br32(bp, B44_DEVCTRL);
1311 bp->flags |= B44_FLAG_INTERNAL_PHY;
1315 /* bp->lock is held. */
1316 static void b44_halt(struct b44 *bp)
1318 b44_disable_ints(bp);
1322 /* bp->lock is held. */
1323 static void __b44_set_mac_addr(struct b44 *bp)
1325 bw32(bp, B44_CAM_CTRL, 0);
1326 if (!(bp->dev->flags & IFF_PROMISC)) {
1329 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1330 val = br32(bp, B44_CAM_CTRL);
1331 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1335 static int b44_set_mac_addr(struct net_device *dev, void *p)
1337 struct b44 *bp = netdev_priv(dev);
1338 struct sockaddr *addr = p;
1340 if (netif_running(dev))
1343 if (!is_valid_ether_addr(addr->sa_data))
1346 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1348 spin_lock_irq(&bp->lock);
1349 __b44_set_mac_addr(bp);
1350 spin_unlock_irq(&bp->lock);
1355 /* Called at device open time to get the chip ready for
1356 * packet processing. Invoked with bp->lock held.
1358 static void __b44_set_rx_mode(struct net_device *);
1359 static void b44_init_hw(struct b44 *bp)
1367 /* Enable CRC32, set proper LED modes and power on PHY */
1368 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1369 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1371 /* This sets the MAC address too. */
1372 __b44_set_rx_mode(bp->dev);
1374 /* MTU + eth header + possible VLAN tag + struct rx_header */
1375 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1376 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1378 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1379 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1380 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1381 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1382 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1383 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1385 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1386 bp->rx_prod = bp->rx_pending;
1388 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1390 val = br32(bp, B44_ENET_CTRL);
1391 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1394 static int b44_open(struct net_device *dev)
1396 struct b44 *bp = netdev_priv(dev);
1399 err = b44_alloc_consistent(bp);
1408 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1409 if (unlikely(err < 0)) {
1412 b44_free_consistent(bp);
1416 init_timer(&bp->timer);
1417 bp->timer.expires = jiffies + HZ;
1418 bp->timer.data = (unsigned long) bp;
1419 bp->timer.function = b44_timer;
1420 add_timer(&bp->timer);
1422 b44_enable_ints(bp);
1423 netif_start_queue(dev);
1429 /*static*/ void b44_dump_state(struct b44 *bp)
1431 u32 val32, val32_2, val32_3, val32_4, val32_5;
1434 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1435 printk("DEBUG: PCI status [%04x] \n", val16);
1440 #ifdef CONFIG_NET_POLL_CONTROLLER
1442 * Polling receive - used by netconsole and other diagnostic tools
1443 * to allow network i/o with interrupts disabled.
1445 static void b44_poll_controller(struct net_device *dev)
1447 disable_irq(dev->irq);
1448 b44_interrupt(dev->irq, dev, NULL);
1449 enable_irq(dev->irq);
1453 static int b44_close(struct net_device *dev)
1455 struct b44 *bp = netdev_priv(dev);
1457 netif_stop_queue(dev);
1459 netif_poll_disable(dev);
1461 del_timer_sync(&bp->timer);
1463 spin_lock_irq(&bp->lock);
1470 netif_carrier_off(dev);
1472 spin_unlock_irq(&bp->lock);
1474 free_irq(dev->irq, dev);
1476 netif_poll_enable(dev);
1478 b44_free_consistent(bp);
1483 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1485 struct b44 *bp = netdev_priv(dev);
1486 struct net_device_stats *nstat = &bp->stats;
1487 struct b44_hw_stats *hwstat = &bp->hw_stats;
1489 /* Convert HW stats into netdevice stats. */
1490 nstat->rx_packets = hwstat->rx_pkts;
1491 nstat->tx_packets = hwstat->tx_pkts;
1492 nstat->rx_bytes = hwstat->rx_octets;
1493 nstat->tx_bytes = hwstat->tx_octets;
1494 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1495 hwstat->tx_oversize_pkts +
1496 hwstat->tx_underruns +
1497 hwstat->tx_excessive_cols +
1498 hwstat->tx_late_cols);
1499 nstat->multicast = hwstat->tx_multicast_pkts;
1500 nstat->collisions = hwstat->tx_total_cols;
1502 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1503 hwstat->rx_undersize);
1504 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1505 nstat->rx_frame_errors = hwstat->rx_align_errs;
1506 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1507 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1508 hwstat->rx_oversize_pkts +
1509 hwstat->rx_missed_pkts +
1510 hwstat->rx_crc_align_errs +
1511 hwstat->rx_undersize +
1512 hwstat->rx_crc_errs +
1513 hwstat->rx_align_errs +
1514 hwstat->rx_symbol_errs);
1516 nstat->tx_aborted_errors = hwstat->tx_underruns;
1518 /* Carrier lost counter seems to be broken for some devices */
1519 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1525 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1527 struct dev_mc_list *mclist;
1530 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1531 mclist = dev->mc_list;
1532 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1533 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1538 static void __b44_set_rx_mode(struct net_device *dev)
1540 struct b44 *bp = netdev_priv(dev);
1543 val = br32(bp, B44_RXCONFIG);
1544 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1545 if (dev->flags & IFF_PROMISC) {
1546 val |= RXCONFIG_PROMISC;
1547 bw32(bp, B44_RXCONFIG, val);
1549 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1552 __b44_set_mac_addr(bp);
1554 if (dev->flags & IFF_ALLMULTI)
1555 val |= RXCONFIG_ALLMULTI;
1557 i = __b44_load_mcast(bp, dev);
1559 for (; i < 64; i++) {
1560 __b44_cam_write(bp, zero, i);
1562 bw32(bp, B44_RXCONFIG, val);
1563 val = br32(bp, B44_CAM_CTRL);
1564 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1568 static void b44_set_rx_mode(struct net_device *dev)
1570 struct b44 *bp = netdev_priv(dev);
1572 spin_lock_irq(&bp->lock);
1573 __b44_set_rx_mode(dev);
1574 spin_unlock_irq(&bp->lock);
1577 static u32 b44_get_msglevel(struct net_device *dev)
1579 struct b44 *bp = netdev_priv(dev);
1580 return bp->msg_enable;
1583 static void b44_set_msglevel(struct net_device *dev, u32 value)
1585 struct b44 *bp = netdev_priv(dev);
1586 bp->msg_enable = value;
1589 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1591 struct b44 *bp = netdev_priv(dev);
1592 struct pci_dev *pci_dev = bp->pdev;
1594 strcpy (info->driver, DRV_MODULE_NAME);
1595 strcpy (info->version, DRV_MODULE_VERSION);
1596 strcpy (info->bus_info, pci_name(pci_dev));
1599 static int b44_nway_reset(struct net_device *dev)
1601 struct b44 *bp = netdev_priv(dev);
1605 spin_lock_irq(&bp->lock);
1606 b44_readphy(bp, MII_BMCR, &bmcr);
1607 b44_readphy(bp, MII_BMCR, &bmcr);
1609 if (bmcr & BMCR_ANENABLE) {
1610 b44_writephy(bp, MII_BMCR,
1611 bmcr | BMCR_ANRESTART);
1614 spin_unlock_irq(&bp->lock);
1619 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1621 struct b44 *bp = netdev_priv(dev);
1623 if (!netif_running(dev))
1625 cmd->supported = (SUPPORTED_Autoneg);
1626 cmd->supported |= (SUPPORTED_100baseT_Half |
1627 SUPPORTED_100baseT_Full |
1628 SUPPORTED_10baseT_Half |
1629 SUPPORTED_10baseT_Full |
1632 cmd->advertising = 0;
1633 if (bp->flags & B44_FLAG_ADV_10HALF)
1634 cmd->advertising |= ADVERTISED_10baseT_Half;
1635 if (bp->flags & B44_FLAG_ADV_10FULL)
1636 cmd->advertising |= ADVERTISED_10baseT_Full;
1637 if (bp->flags & B44_FLAG_ADV_100HALF)
1638 cmd->advertising |= ADVERTISED_100baseT_Half;
1639 if (bp->flags & B44_FLAG_ADV_100FULL)
1640 cmd->advertising |= ADVERTISED_100baseT_Full;
1641 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1642 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1643 SPEED_100 : SPEED_10;
1644 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1645 DUPLEX_FULL : DUPLEX_HALF;
1647 cmd->phy_address = bp->phy_addr;
1648 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1649 XCVR_INTERNAL : XCVR_EXTERNAL;
1650 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1651 AUTONEG_DISABLE : AUTONEG_ENABLE;
1657 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1659 struct b44 *bp = netdev_priv(dev);
1661 if (!netif_running(dev))
1664 /* We do not support gigabit. */
1665 if (cmd->autoneg == AUTONEG_ENABLE) {
1666 if (cmd->advertising &
1667 (ADVERTISED_1000baseT_Half |
1668 ADVERTISED_1000baseT_Full))
1670 } else if ((cmd->speed != SPEED_100 &&
1671 cmd->speed != SPEED_10) ||
1672 (cmd->duplex != DUPLEX_HALF &&
1673 cmd->duplex != DUPLEX_FULL)) {
1677 spin_lock_irq(&bp->lock);
1679 if (cmd->autoneg == AUTONEG_ENABLE) {
1680 bp->flags &= ~B44_FLAG_FORCE_LINK;
1681 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1682 B44_FLAG_ADV_10FULL |
1683 B44_FLAG_ADV_100HALF |
1684 B44_FLAG_ADV_100FULL);
1685 if (cmd->advertising & ADVERTISE_10HALF)
1686 bp->flags |= B44_FLAG_ADV_10HALF;
1687 if (cmd->advertising & ADVERTISE_10FULL)
1688 bp->flags |= B44_FLAG_ADV_10FULL;
1689 if (cmd->advertising & ADVERTISE_100HALF)
1690 bp->flags |= B44_FLAG_ADV_100HALF;
1691 if (cmd->advertising & ADVERTISE_100FULL)
1692 bp->flags |= B44_FLAG_ADV_100FULL;
1694 bp->flags |= B44_FLAG_FORCE_LINK;
1695 if (cmd->speed == SPEED_100)
1696 bp->flags |= B44_FLAG_100_BASE_T;
1697 if (cmd->duplex == DUPLEX_FULL)
1698 bp->flags |= B44_FLAG_FULL_DUPLEX;
1703 spin_unlock_irq(&bp->lock);
1708 static void b44_get_ringparam(struct net_device *dev,
1709 struct ethtool_ringparam *ering)
1711 struct b44 *bp = netdev_priv(dev);
1713 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1714 ering->rx_pending = bp->rx_pending;
1716 /* XXX ethtool lacks a tx_max_pending, oops... */
1719 static int b44_set_ringparam(struct net_device *dev,
1720 struct ethtool_ringparam *ering)
1722 struct b44 *bp = netdev_priv(dev);
1724 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1725 (ering->rx_mini_pending != 0) ||
1726 (ering->rx_jumbo_pending != 0) ||
1727 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1730 spin_lock_irq(&bp->lock);
1732 bp->rx_pending = ering->rx_pending;
1733 bp->tx_pending = ering->tx_pending;
1738 netif_wake_queue(bp->dev);
1739 spin_unlock_irq(&bp->lock);
1741 b44_enable_ints(bp);
1746 static void b44_get_pauseparam(struct net_device *dev,
1747 struct ethtool_pauseparam *epause)
1749 struct b44 *bp = netdev_priv(dev);
1752 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1754 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1756 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1759 static int b44_set_pauseparam(struct net_device *dev,
1760 struct ethtool_pauseparam *epause)
1762 struct b44 *bp = netdev_priv(dev);
1764 spin_lock_irq(&bp->lock);
1765 if (epause->autoneg)
1766 bp->flags |= B44_FLAG_PAUSE_AUTO;
1768 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1769 if (epause->rx_pause)
1770 bp->flags |= B44_FLAG_RX_PAUSE;
1772 bp->flags &= ~B44_FLAG_RX_PAUSE;
1773 if (epause->tx_pause)
1774 bp->flags |= B44_FLAG_TX_PAUSE;
1776 bp->flags &= ~B44_FLAG_TX_PAUSE;
1777 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1782 __b44_set_flow_ctrl(bp, bp->flags);
1784 spin_unlock_irq(&bp->lock);
1786 b44_enable_ints(bp);
1791 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1795 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1800 static int b44_get_stats_count(struct net_device *dev)
1802 return ARRAY_SIZE(b44_gstrings);
1805 static void b44_get_ethtool_stats(struct net_device *dev,
1806 struct ethtool_stats *stats, u64 *data)
1808 struct b44 *bp = netdev_priv(dev);
1809 u32 *val = &bp->hw_stats.tx_good_octets;
1812 spin_lock_irq(&bp->lock);
1814 b44_stats_update(bp);
1816 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1819 spin_unlock_irq(&bp->lock);
1822 static struct ethtool_ops b44_ethtool_ops = {
1823 .get_drvinfo = b44_get_drvinfo,
1824 .get_settings = b44_get_settings,
1825 .set_settings = b44_set_settings,
1826 .nway_reset = b44_nway_reset,
1827 .get_link = ethtool_op_get_link,
1828 .get_ringparam = b44_get_ringparam,
1829 .set_ringparam = b44_set_ringparam,
1830 .get_pauseparam = b44_get_pauseparam,
1831 .set_pauseparam = b44_set_pauseparam,
1832 .get_msglevel = b44_get_msglevel,
1833 .set_msglevel = b44_set_msglevel,
1834 .get_strings = b44_get_strings,
1835 .get_stats_count = b44_get_stats_count,
1836 .get_ethtool_stats = b44_get_ethtool_stats,
1837 .get_perm_addr = ethtool_op_get_perm_addr,
1840 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1842 struct mii_ioctl_data *data = if_mii(ifr);
1843 struct b44 *bp = netdev_priv(dev);
1846 if (!netif_running(dev))
1849 spin_lock_irq(&bp->lock);
1850 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1851 spin_unlock_irq(&bp->lock);
1856 /* Read 128-bytes of EEPROM. */
1857 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1860 u16 *ptr = (u16 *) data;
1862 for (i = 0; i < 128; i += 2)
1863 ptr[i / 2] = readw(bp->regs + 4096 + i);
1868 static int __devinit b44_get_invariants(struct b44 *bp)
1873 err = b44_read_eeprom(bp, &eeprom[0]);
1877 bp->dev->dev_addr[0] = eeprom[79];
1878 bp->dev->dev_addr[1] = eeprom[78];
1879 bp->dev->dev_addr[2] = eeprom[81];
1880 bp->dev->dev_addr[3] = eeprom[80];
1881 bp->dev->dev_addr[4] = eeprom[83];
1882 bp->dev->dev_addr[5] = eeprom[82];
1884 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1885 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1889 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1891 bp->phy_addr = eeprom[90] & 0x1f;
1893 /* With this, plus the rx_header prepended to the data by the
1894 * hardware, we'll land the ethernet header on a 2-byte boundary.
1898 bp->imask = IMASK_DEF;
1900 bp->core_unit = ssb_core_unit(bp);
1901 bp->dma_offset = SB_PCI_DMA;
1903 /* XXX - really required?
1904 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1910 static int __devinit b44_init_one(struct pci_dev *pdev,
1911 const struct pci_device_id *ent)
1913 static int b44_version_printed = 0;
1914 unsigned long b44reg_base, b44reg_len;
1915 struct net_device *dev;
1919 if (b44_version_printed++ == 0)
1920 printk(KERN_INFO "%s", version);
1922 err = pci_enable_device(pdev);
1924 printk(KERN_ERR PFX "Cannot enable PCI device, "
1929 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1930 printk(KERN_ERR PFX "Cannot find proper PCI device "
1931 "base address, aborting.\n");
1933 goto err_out_disable_pdev;
1936 err = pci_request_regions(pdev, DRV_MODULE_NAME);
1938 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1940 goto err_out_disable_pdev;
1943 pci_set_master(pdev);
1945 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
1947 printk(KERN_ERR PFX "No usable DMA configuration, "
1949 goto err_out_free_res;
1952 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1954 printk(KERN_ERR PFX "No usable DMA configuration, "
1956 goto err_out_free_res;
1959 b44reg_base = pci_resource_start(pdev, 0);
1960 b44reg_len = pci_resource_len(pdev, 0);
1962 dev = alloc_etherdev(sizeof(*bp));
1964 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1966 goto err_out_free_res;
1969 SET_MODULE_OWNER(dev);
1970 SET_NETDEV_DEV(dev,&pdev->dev);
1972 /* No interesting netdevice features in this card... */
1975 bp = netdev_priv(dev);
1979 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1981 spin_lock_init(&bp->lock);
1983 bp->regs = ioremap(b44reg_base, b44reg_len);
1984 if (bp->regs == 0UL) {
1985 printk(KERN_ERR PFX "Cannot map device registers, "
1988 goto err_out_free_dev;
1991 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1992 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1994 dev->open = b44_open;
1995 dev->stop = b44_close;
1996 dev->hard_start_xmit = b44_start_xmit;
1997 dev->get_stats = b44_get_stats;
1998 dev->set_multicast_list = b44_set_rx_mode;
1999 dev->set_mac_address = b44_set_mac_addr;
2000 dev->do_ioctl = b44_ioctl;
2001 dev->tx_timeout = b44_tx_timeout;
2002 dev->poll = b44_poll;
2004 dev->watchdog_timeo = B44_TX_TIMEOUT;
2005 #ifdef CONFIG_NET_POLL_CONTROLLER
2006 dev->poll_controller = b44_poll_controller;
2008 dev->change_mtu = b44_change_mtu;
2009 dev->irq = pdev->irq;
2010 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2012 netif_carrier_off(dev);
2014 err = b44_get_invariants(bp);
2016 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2018 goto err_out_iounmap;
2021 bp->mii_if.dev = dev;
2022 bp->mii_if.mdio_read = b44_mii_read;
2023 bp->mii_if.mdio_write = b44_mii_write;
2024 bp->mii_if.phy_id = bp->phy_addr;
2025 bp->mii_if.phy_id_mask = 0x1f;
2026 bp->mii_if.reg_num_mask = 0x1f;
2028 /* By default, advertise all speed/duplex settings. */
2029 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2030 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2032 /* By default, auto-negotiate PAUSE. */
2033 bp->flags |= B44_FLAG_PAUSE_AUTO;
2035 err = register_netdev(dev);
2037 printk(KERN_ERR PFX "Cannot register net device, "
2039 goto err_out_iounmap;
2042 pci_set_drvdata(pdev, dev);
2044 pci_save_state(bp->pdev);
2046 /* Chip reset provides power to the b44 MAC & PCI cores, which
2047 * is necessary for MAC register access.
2051 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2052 for (i = 0; i < 6; i++)
2053 printk("%2.2x%c", dev->dev_addr[i],
2054 i == 5 ? '\n' : ':');
2065 pci_release_regions(pdev);
2067 err_out_disable_pdev:
2068 pci_disable_device(pdev);
2069 pci_set_drvdata(pdev, NULL);
2073 static void __devexit b44_remove_one(struct pci_dev *pdev)
2075 struct net_device *dev = pci_get_drvdata(pdev);
2076 struct b44 *bp = netdev_priv(dev);
2078 unregister_netdev(dev);
2081 pci_release_regions(pdev);
2082 pci_disable_device(pdev);
2083 pci_set_drvdata(pdev, NULL);
2086 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2088 struct net_device *dev = pci_get_drvdata(pdev);
2089 struct b44 *bp = netdev_priv(dev);
2091 if (!netif_running(dev))
2094 del_timer_sync(&bp->timer);
2096 spin_lock_irq(&bp->lock);
2099 netif_carrier_off(bp->dev);
2100 netif_device_detach(bp->dev);
2103 spin_unlock_irq(&bp->lock);
2105 free_irq(dev->irq, dev);
2106 pci_disable_device(pdev);
2110 static int b44_resume(struct pci_dev *pdev)
2112 struct net_device *dev = pci_get_drvdata(pdev);
2113 struct b44 *bp = netdev_priv(dev);
2115 pci_restore_state(pdev);
2116 pci_enable_device(pdev);
2117 pci_set_master(pdev);
2119 if (!netif_running(dev))
2122 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2123 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2125 spin_lock_irq(&bp->lock);
2129 netif_device_attach(bp->dev);
2130 spin_unlock_irq(&bp->lock);
2132 bp->timer.expires = jiffies + HZ;
2133 add_timer(&bp->timer);
2135 b44_enable_ints(bp);
2136 netif_wake_queue(dev);
2140 static struct pci_driver b44_driver = {
2141 .name = DRV_MODULE_NAME,
2142 .id_table = b44_pci_tbl,
2143 .probe = b44_init_one,
2144 .remove = __devexit_p(b44_remove_one),
2145 .suspend = b44_suspend,
2146 .resume = b44_resume,
2149 static int __init b44_init(void)
2151 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2153 /* Setup paramaters for syncing RX/TX DMA descriptors */
2154 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2155 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2157 return pci_module_init(&b44_driver);
2160 static void __exit b44_cleanup(void)
2162 pci_unregister_driver(&b44_driver);
2165 module_init(b44_init);
2166 module_exit(b44_cleanup);