1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.00"
33 #define DRV_MODULE_RELDATE "Apr 7, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 static char version[] __devinitdata =
79 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
86 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
90 static struct pci_device_id b44_pci_tbl[] = {
91 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
92 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
93 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
94 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
95 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
96 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97 { } /* terminate list with empty entry */
100 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
102 static void b44_halt(struct b44 *);
103 static void b44_init_rings(struct b44 *);
104 static void b44_init_hw(struct b44 *);
106 static int dma_desc_align_mask;
107 static int dma_desc_sync_size;
109 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
110 #define _B44(x...) # x,
115 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
117 unsigned long offset,
118 enum dma_data_direction dir)
120 dma_sync_single_range_for_device(&pdev->dev, dma_base,
121 offset & dma_desc_align_mask,
122 dma_desc_sync_size, dir);
125 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
127 unsigned long offset,
128 enum dma_data_direction dir)
130 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
131 offset & dma_desc_align_mask,
132 dma_desc_sync_size, dir);
135 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
137 return readl(bp->regs + reg);
140 static inline void bw32(const struct b44 *bp,
141 unsigned long reg, unsigned long val)
143 writel(val, bp->regs + reg);
146 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
147 u32 bit, unsigned long timeout, const int clear)
151 for (i = 0; i < timeout; i++) {
152 u32 val = br32(bp, reg);
154 if (clear && !(val & bit))
156 if (!clear && (val & bit))
161 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
165 (clear ? "clear" : "set"));
171 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
172 * buzz words used on this company's website :-)
174 * All of these routines must be invoked with bp->lock held and
175 * interrupts disabled.
178 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
179 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
181 static u32 ssb_get_core_rev(struct b44 *bp)
183 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
186 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
188 u32 bar_orig, pci_rev, val;
190 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
191 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
192 pci_rev = ssb_get_core_rev(bp);
194 val = br32(bp, B44_SBINTVEC);
196 bw32(bp, B44_SBINTVEC, val);
198 val = br32(bp, SSB_PCI_TRANS_2);
199 val |= SSB_PCI_PREF | SSB_PCI_BURST;
200 bw32(bp, SSB_PCI_TRANS_2, val);
202 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
207 static void ssb_core_disable(struct b44 *bp)
209 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
212 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216 SBTMSLOW_REJECT | SBTMSLOW_RESET));
217 br32(bp, B44_SBTMSLOW);
219 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220 br32(bp, B44_SBTMSLOW);
224 static void ssb_core_reset(struct b44 *bp)
228 ssb_core_disable(bp);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230 br32(bp, B44_SBTMSLOW);
233 /* Clear SERR if set, this is a hw bug workaround. */
234 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235 bw32(bp, B44_SBTMSHIGH, 0);
237 val = br32(bp, B44_SBIMSTATE);
238 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
241 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242 br32(bp, B44_SBTMSLOW);
245 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246 br32(bp, B44_SBTMSLOW);
250 static int ssb_core_unit(struct b44 *bp)
253 u32 val = br32(bp, B44_SBADMATCH0);
256 type = val & SBADMATCH0_TYPE_MASK;
259 base = val & SBADMATCH0_BS0_MASK;
263 base = val & SBADMATCH0_BS1_MASK;
268 base = val & SBADMATCH0_BS2_MASK;
275 static int ssb_is_core_up(struct b44 *bp)
277 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
281 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
285 val = ((u32) data[2]) << 24;
286 val |= ((u32) data[3]) << 16;
287 val |= ((u32) data[4]) << 8;
288 val |= ((u32) data[5]) << 0;
289 bw32(bp, B44_CAM_DATA_LO, val);
290 val = (CAM_DATA_HI_VALID |
291 (((u32) data[0]) << 8) |
292 (((u32) data[1]) << 0));
293 bw32(bp, B44_CAM_DATA_HI, val);
294 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295 (index << CAM_CTRL_INDEX_SHIFT)));
296 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
299 static inline void __b44_disable_ints(struct b44 *bp)
301 bw32(bp, B44_IMASK, 0);
304 static void b44_disable_ints(struct b44 *bp)
306 __b44_disable_ints(bp);
308 /* Flush posted writes. */
312 static void b44_enable_ints(struct b44 *bp)
314 bw32(bp, B44_IMASK, bp->imask);
317 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
321 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325 (reg << MDIO_DATA_RA_SHIFT) |
326 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
333 static int b44_writephy(struct b44 *bp, int reg, u32 val)
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341 (val & MDIO_DATA_DATA)));
342 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
345 /* miilib interface */
346 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347 * due to code existing before miilib use was added to this driver.
348 * Someone should remove this artificial driver limitation in
349 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
351 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
354 struct b44 *bp = netdev_priv(dev);
355 int rc = b44_readphy(bp, location, &val);
361 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
364 struct b44 *bp = netdev_priv(dev);
365 b44_writephy(bp, location, val);
368 static int b44_phy_reset(struct b44 *bp)
373 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
377 err = b44_readphy(bp, MII_BMCR, &val);
379 if (val & BMCR_RESET) {
380 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
389 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
393 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394 bp->flags |= pause_flags;
396 val = br32(bp, B44_RXCONFIG);
397 if (pause_flags & B44_FLAG_RX_PAUSE)
398 val |= RXCONFIG_FLOW;
400 val &= ~RXCONFIG_FLOW;
401 bw32(bp, B44_RXCONFIG, val);
403 val = br32(bp, B44_MAC_FLOW);
404 if (pause_flags & B44_FLAG_TX_PAUSE)
405 val |= (MAC_FLOW_PAUSE_ENAB |
406 (0xc0 & MAC_FLOW_RX_HI_WATER));
408 val &= ~MAC_FLOW_PAUSE_ENAB;
409 bw32(bp, B44_MAC_FLOW, val);
412 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
416 /* The driver supports only rx pause by default because
417 the b44 mac tx pause mechanism generates excessive
419 Use ethtool to turn on b44 tx pause if necessary.
421 if ((local & ADVERTISE_PAUSE_CAP) &&
422 (local & ADVERTISE_PAUSE_ASYM)){
423 if ((remote & LPA_PAUSE_ASYM) &&
424 !(remote & LPA_PAUSE_CAP))
425 pause_enab |= B44_FLAG_RX_PAUSE;
428 __b44_set_flow_ctrl(bp, pause_enab);
431 static int b44_setup_phy(struct b44 *bp)
436 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
438 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
439 val & MII_ALEDCTRL_ALLMSK)) != 0)
441 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
443 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
444 val | MII_TLEDCTRL_ENABLE)) != 0)
447 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
448 u32 adv = ADVERTISE_CSMA;
450 if (bp->flags & B44_FLAG_ADV_10HALF)
451 adv |= ADVERTISE_10HALF;
452 if (bp->flags & B44_FLAG_ADV_10FULL)
453 adv |= ADVERTISE_10FULL;
454 if (bp->flags & B44_FLAG_ADV_100HALF)
455 adv |= ADVERTISE_100HALF;
456 if (bp->flags & B44_FLAG_ADV_100FULL)
457 adv |= ADVERTISE_100FULL;
459 if (bp->flags & B44_FLAG_PAUSE_AUTO)
460 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
462 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
464 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
465 BMCR_ANRESTART))) != 0)
470 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
472 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
473 if (bp->flags & B44_FLAG_100_BASE_T)
474 bmcr |= BMCR_SPEED100;
475 if (bp->flags & B44_FLAG_FULL_DUPLEX)
476 bmcr |= BMCR_FULLDPLX;
477 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
480 /* Since we will not be negotiating there is no safe way
481 * to determine if the link partner supports flow control
482 * or not. So just disable it completely in this case.
484 b44_set_flow_ctrl(bp, 0, 0);
491 static void b44_stats_update(struct b44 *bp)
496 val = &bp->hw_stats.tx_good_octets;
497 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
498 *val++ += br32(bp, reg);
504 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
505 *val++ += br32(bp, reg);
509 static void b44_link_report(struct b44 *bp)
511 if (!netif_carrier_ok(bp->dev)) {
512 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
514 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
516 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
517 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
519 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
522 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
523 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
527 static void b44_check_phy(struct b44 *bp)
531 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
532 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534 if (aux & MII_AUXCTRL_SPEED)
535 bp->flags |= B44_FLAG_100_BASE_T;
537 bp->flags &= ~B44_FLAG_100_BASE_T;
538 if (aux & MII_AUXCTRL_DUPLEX)
539 bp->flags |= B44_FLAG_FULL_DUPLEX;
541 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543 if (!netif_carrier_ok(bp->dev) &&
544 (bmsr & BMSR_LSTATUS)) {
545 u32 val = br32(bp, B44_TX_CTRL);
546 u32 local_adv, remote_adv;
548 if (bp->flags & B44_FLAG_FULL_DUPLEX)
549 val |= TX_CTRL_DUPLEX;
551 val &= ~TX_CTRL_DUPLEX;
552 bw32(bp, B44_TX_CTRL, val);
554 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
555 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
556 !b44_readphy(bp, MII_LPA, &remote_adv))
557 b44_set_flow_ctrl(bp, local_adv, remote_adv);
560 netif_carrier_on(bp->dev);
562 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564 netif_carrier_off(bp->dev);
568 if (bmsr & BMSR_RFAULT)
569 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
572 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
577 static void b44_timer(unsigned long __opaque)
579 struct b44 *bp = (struct b44 *) __opaque;
581 spin_lock_irq(&bp->lock);
585 b44_stats_update(bp);
587 spin_unlock_irq(&bp->lock);
589 bp->timer.expires = jiffies + HZ;
590 add_timer(&bp->timer);
593 static void b44_tx(struct b44 *bp)
597 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598 cur /= sizeof(struct dma_desc);
600 /* XXX needs updating when NETIF_F_SG is supported */
601 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602 struct ring_info *rp = &bp->tx_buffers[cons];
603 struct sk_buff *skb = rp->skb;
607 pci_unmap_single(bp->pdev,
608 pci_unmap_addr(rp, mapping),
612 dev_kfree_skb_irq(skb);
616 if (netif_queue_stopped(bp->dev) &&
617 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618 netif_wake_queue(bp->dev);
620 bw32(bp, B44_GPTIMER, 0);
623 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
624 * before the DMA address you give it. So we allocate 30 more bytes
625 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626 * point the chip at 30 bytes past where the rx_header will go.
628 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
631 struct ring_info *src_map, *map;
632 struct rx_header *rh;
640 src_map = &bp->rx_buffers[src_idx];
641 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642 map = &bp->rx_buffers[dest_idx];
643 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
647 mapping = pci_map_single(bp->pdev, skb->data,
651 /* Hardware bug work-around, the chip is unable to do PCI DMA
652 to/from anything above 1GB :-( */
653 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
655 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
656 dev_kfree_skb_any(skb);
657 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
660 mapping = pci_map_single(bp->pdev, skb->data,
663 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
664 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
665 dev_kfree_skb_any(skb);
671 skb_reserve(skb, bp->rx_offset);
673 rh = (struct rx_header *)
674 (skb->data - bp->rx_offset);
679 pci_unmap_addr_set(map, mapping, mapping);
684 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
685 if (dest_idx == (B44_RX_RING_SIZE - 1))
686 ctrl |= DESC_CTRL_EOT;
688 dp = &bp->rx_ring[dest_idx];
689 dp->ctrl = cpu_to_le32(ctrl);
690 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
692 if (bp->flags & B44_FLAG_RX_RING_HACK)
693 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
694 dest_idx * sizeof(dp),
697 return RX_PKT_BUF_SZ;
700 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
702 struct dma_desc *src_desc, *dest_desc;
703 struct ring_info *src_map, *dest_map;
704 struct rx_header *rh;
708 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
709 dest_desc = &bp->rx_ring[dest_idx];
710 dest_map = &bp->rx_buffers[dest_idx];
711 src_desc = &bp->rx_ring[src_idx];
712 src_map = &bp->rx_buffers[src_idx];
714 dest_map->skb = src_map->skb;
715 rh = (struct rx_header *) src_map->skb->data;
718 pci_unmap_addr_set(dest_map, mapping,
719 pci_unmap_addr(src_map, mapping));
721 if (bp->flags & B44_FLAG_RX_RING_HACK)
722 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
723 src_idx * sizeof(src_desc),
726 ctrl = src_desc->ctrl;
727 if (dest_idx == (B44_RX_RING_SIZE - 1))
728 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
730 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
732 dest_desc->ctrl = ctrl;
733 dest_desc->addr = src_desc->addr;
737 if (bp->flags & B44_FLAG_RX_RING_HACK)
738 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
739 dest_idx * sizeof(dest_desc),
742 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
747 static int b44_rx(struct b44 *bp, int budget)
753 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
754 prod /= sizeof(struct dma_desc);
757 while (cons != prod && budget > 0) {
758 struct ring_info *rp = &bp->rx_buffers[cons];
759 struct sk_buff *skb = rp->skb;
760 dma_addr_t map = pci_unmap_addr(rp, mapping);
761 struct rx_header *rh;
764 pci_dma_sync_single_for_cpu(bp->pdev, map,
767 rh = (struct rx_header *) skb->data;
768 len = cpu_to_le16(rh->len);
769 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
770 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
772 b44_recycle_rx(bp, cons, bp->rx_prod);
774 bp->stats.rx_dropped++;
784 len = cpu_to_le16(rh->len);
785 } while (len == 0 && i++ < 5);
793 if (len > RX_COPY_THRESHOLD) {
795 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
798 pci_unmap_single(bp->pdev, map,
799 skb_size, PCI_DMA_FROMDEVICE);
800 /* Leave out rx_header */
801 skb_put(skb, len+bp->rx_offset);
802 skb_pull(skb,bp->rx_offset);
804 struct sk_buff *copy_skb;
806 b44_recycle_rx(bp, cons, bp->rx_prod);
807 copy_skb = dev_alloc_skb(len + 2);
808 if (copy_skb == NULL)
809 goto drop_it_no_recycle;
811 copy_skb->dev = bp->dev;
812 skb_reserve(copy_skb, 2);
813 skb_put(copy_skb, len);
814 /* DMA sync done above, copy just the actual packet */
815 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
819 skb->ip_summed = CHECKSUM_NONE;
820 skb->protocol = eth_type_trans(skb, bp->dev);
821 netif_receive_skb(skb);
822 bp->dev->last_rx = jiffies;
826 bp->rx_prod = (bp->rx_prod + 1) &
827 (B44_RX_RING_SIZE - 1);
828 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
837 static int b44_poll(struct net_device *netdev, int *budget)
839 struct b44 *bp = netdev_priv(netdev);
842 spin_lock_irq(&bp->lock);
844 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
845 /* spin_lock(&bp->tx_lock); */
847 /* spin_unlock(&bp->tx_lock); */
849 spin_unlock_irq(&bp->lock);
852 if (bp->istat & ISTAT_RX) {
853 int orig_budget = *budget;
856 if (orig_budget > netdev->quota)
857 orig_budget = netdev->quota;
859 work_done = b44_rx(bp, orig_budget);
861 *budget -= work_done;
862 netdev->quota -= work_done;
864 if (work_done >= orig_budget)
868 if (bp->istat & ISTAT_ERRORS) {
869 spin_lock_irq(&bp->lock);
873 netif_wake_queue(bp->dev);
874 spin_unlock_irq(&bp->lock);
879 netif_rx_complete(netdev);
883 return (done ? 0 : 1);
886 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
888 struct net_device *dev = dev_id;
889 struct b44 *bp = netdev_priv(dev);
893 spin_lock(&bp->lock);
895 istat = br32(bp, B44_ISTAT);
896 imask = br32(bp, B44_IMASK);
898 /* ??? What the fuck is the purpose of the interrupt mask
899 * ??? register if we have to mask it out by hand anyways?
905 if (unlikely(!netif_running(dev))) {
906 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
910 if (netif_rx_schedule_prep(dev)) {
911 /* NOTE: These writes are posted by the readback of
912 * the ISTAT register below.
915 __b44_disable_ints(bp);
916 __netif_rx_schedule(dev);
918 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
923 bw32(bp, B44_ISTAT, istat);
926 spin_unlock(&bp->lock);
927 return IRQ_RETVAL(handled);
930 static void b44_tx_timeout(struct net_device *dev)
932 struct b44 *bp = netdev_priv(dev);
934 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
937 spin_lock_irq(&bp->lock);
943 spin_unlock_irq(&bp->lock);
947 netif_wake_queue(dev);
950 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
952 struct b44 *bp = netdev_priv(dev);
953 struct sk_buff *bounce_skb;
954 int rc = NETDEV_TX_OK;
956 u32 len, entry, ctrl;
959 spin_lock_irq(&bp->lock);
961 /* This is a hard error, log it. */
962 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
963 netif_stop_queue(dev);
964 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
969 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
970 if (mapping + len > B44_DMA_MASK) {
971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
972 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
974 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
979 mapping = pci_map_single(bp->pdev, bounce_skb->data,
980 len, PCI_DMA_TODEVICE);
981 if (mapping + len > B44_DMA_MASK) {
982 pci_unmap_single(bp->pdev, mapping,
983 len, PCI_DMA_TODEVICE);
984 dev_kfree_skb_any(bounce_skb);
988 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
989 dev_kfree_skb_any(skb);
994 bp->tx_buffers[entry].skb = skb;
995 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
997 ctrl = (len & DESC_CTRL_LEN);
998 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
999 if (entry == (B44_TX_RING_SIZE - 1))
1000 ctrl |= DESC_CTRL_EOT;
1002 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1005 if (bp->flags & B44_FLAG_TX_RING_HACK)
1006 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1007 entry * sizeof(bp->tx_ring[0]),
1010 entry = NEXT_TX(entry);
1012 bp->tx_prod = entry;
1016 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019 if (bp->flags & B44_FLAG_REORDER_BUG)
1020 br32(bp, B44_DMATX_PTR);
1022 if (TX_BUFFS_AVAIL(bp) < 1)
1023 netif_stop_queue(dev);
1025 dev->trans_start = jiffies;
1028 spin_unlock_irq(&bp->lock);
1033 rc = NETDEV_TX_BUSY;
1037 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1039 struct b44 *bp = netdev_priv(dev);
1041 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1044 if (!netif_running(dev)) {
1045 /* We'll just catch it later when the
1052 spin_lock_irq(&bp->lock);
1057 spin_unlock_irq(&bp->lock);
1059 b44_enable_ints(bp);
1064 /* Free up pending packets in all rx/tx rings.
1066 * The chip has been shut down and the driver detached from
1067 * the networking, so no interrupts or new tx packets will
1068 * end up in the driver. bp->lock is not held and we are not
1069 * in an interrupt context and thus may sleep.
1071 static void b44_free_rings(struct b44 *bp)
1073 struct ring_info *rp;
1076 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1077 rp = &bp->rx_buffers[i];
1079 if (rp->skb == NULL)
1081 pci_unmap_single(bp->pdev,
1082 pci_unmap_addr(rp, mapping),
1084 PCI_DMA_FROMDEVICE);
1085 dev_kfree_skb_any(rp->skb);
1089 /* XXX needs changes once NETIF_F_SG is set... */
1090 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1091 rp = &bp->tx_buffers[i];
1093 if (rp->skb == NULL)
1095 pci_unmap_single(bp->pdev,
1096 pci_unmap_addr(rp, mapping),
1099 dev_kfree_skb_any(rp->skb);
1104 /* Initialize tx/rx rings for packet processing.
1106 * The chip has been shut down and the driver detached from
1107 * the networking, so no interrupts or new tx packets will
1108 * end up in the driver.
1110 static void b44_init_rings(struct b44 *bp)
1116 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1117 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1119 if (bp->flags & B44_FLAG_RX_RING_HACK)
1120 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1122 PCI_DMA_BIDIRECTIONAL);
1124 if (bp->flags & B44_FLAG_TX_RING_HACK)
1125 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1129 for (i = 0; i < bp->rx_pending; i++) {
1130 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1136 * Must not be invoked with interrupt sources disabled and
1137 * the hardware shutdown down.
1139 static void b44_free_consistent(struct b44 *bp)
1141 kfree(bp->rx_buffers);
1142 bp->rx_buffers = NULL;
1143 kfree(bp->tx_buffers);
1144 bp->tx_buffers = NULL;
1146 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1147 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1152 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1153 bp->rx_ring, bp->rx_ring_dma);
1155 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1158 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1159 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1164 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1165 bp->tx_ring, bp->tx_ring_dma);
1167 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1172 * Must not be invoked with interrupt sources disabled and
1173 * the hardware shutdown down. Can sleep.
1175 static int b44_alloc_consistent(struct b44 *bp)
1179 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1180 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1181 if (!bp->rx_buffers)
1184 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1185 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1186 if (!bp->tx_buffers)
1189 size = DMA_TABLE_BYTES;
1190 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1192 /* Allocation may have failed due to pci_alloc_consistent
1193 insisting on use of GFP_DMA, which is more restrictive
1194 than necessary... */
1195 struct dma_desc *rx_ring;
1196 dma_addr_t rx_ring_dma;
1198 rx_ring = kzalloc(size, GFP_KERNEL);
1202 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1206 if (rx_ring_dma + size > B44_DMA_MASK) {
1211 bp->rx_ring = rx_ring;
1212 bp->rx_ring_dma = rx_ring_dma;
1213 bp->flags |= B44_FLAG_RX_RING_HACK;
1216 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1218 /* Allocation may have failed due to pci_alloc_consistent
1219 insisting on use of GFP_DMA, which is more restrictive
1220 than necessary... */
1221 struct dma_desc *tx_ring;
1222 dma_addr_t tx_ring_dma;
1224 tx_ring = kzalloc(size, GFP_KERNEL);
1228 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1232 if (tx_ring_dma + size > B44_DMA_MASK) {
1237 bp->tx_ring = tx_ring;
1238 bp->tx_ring_dma = tx_ring_dma;
1239 bp->flags |= B44_FLAG_TX_RING_HACK;
1245 b44_free_consistent(bp);
1249 /* bp->lock is held. */
1250 static void b44_clear_stats(struct b44 *bp)
1254 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1255 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1257 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1261 /* bp->lock is held. */
1262 static void b44_chip_reset(struct b44 *bp)
1264 if (ssb_is_core_up(bp)) {
1265 bw32(bp, B44_RCV_LAZY, 0);
1266 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1267 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1268 bw32(bp, B44_DMATX_CTRL, 0);
1269 bp->tx_prod = bp->tx_cons = 0;
1270 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1271 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1274 bw32(bp, B44_DMARX_CTRL, 0);
1275 bp->rx_prod = bp->rx_cons = 0;
1277 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1284 b44_clear_stats(bp);
1286 /* Make PHY accessible. */
1287 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1288 (0x0d & MDIO_CTRL_MAXF_MASK)));
1289 br32(bp, B44_MDIO_CTRL);
1291 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1292 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1293 br32(bp, B44_ENET_CTRL);
1294 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1296 u32 val = br32(bp, B44_DEVCTRL);
1298 if (val & DEVCTRL_EPR) {
1299 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1300 br32(bp, B44_DEVCTRL);
1303 bp->flags |= B44_FLAG_INTERNAL_PHY;
1307 /* bp->lock is held. */
1308 static void b44_halt(struct b44 *bp)
1310 b44_disable_ints(bp);
1314 /* bp->lock is held. */
1315 static void __b44_set_mac_addr(struct b44 *bp)
1317 bw32(bp, B44_CAM_CTRL, 0);
1318 if (!(bp->dev->flags & IFF_PROMISC)) {
1321 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1322 val = br32(bp, B44_CAM_CTRL);
1323 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1327 static int b44_set_mac_addr(struct net_device *dev, void *p)
1329 struct b44 *bp = netdev_priv(dev);
1330 struct sockaddr *addr = p;
1332 if (netif_running(dev))
1335 if (!is_valid_ether_addr(addr->sa_data))
1338 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1340 spin_lock_irq(&bp->lock);
1341 __b44_set_mac_addr(bp);
1342 spin_unlock_irq(&bp->lock);
1347 /* Called at device open time to get the chip ready for
1348 * packet processing. Invoked with bp->lock held.
1350 static void __b44_set_rx_mode(struct net_device *);
1351 static void b44_init_hw(struct b44 *bp)
1359 /* Enable CRC32, set proper LED modes and power on PHY */
1360 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1361 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1363 /* This sets the MAC address too. */
1364 __b44_set_rx_mode(bp->dev);
1366 /* MTU + eth header + possible VLAN tag + struct rx_header */
1367 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1368 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1370 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1371 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1372 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1373 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1374 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1375 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1377 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1378 bp->rx_prod = bp->rx_pending;
1380 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1382 val = br32(bp, B44_ENET_CTRL);
1383 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1386 static int b44_open(struct net_device *dev)
1388 struct b44 *bp = netdev_priv(dev);
1391 err = b44_alloc_consistent(bp);
1400 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1401 if (unlikely(err < 0)) {
1404 b44_free_consistent(bp);
1408 init_timer(&bp->timer);
1409 bp->timer.expires = jiffies + HZ;
1410 bp->timer.data = (unsigned long) bp;
1411 bp->timer.function = b44_timer;
1412 add_timer(&bp->timer);
1414 b44_enable_ints(bp);
1415 netif_start_queue(dev);
1421 /*static*/ void b44_dump_state(struct b44 *bp)
1423 u32 val32, val32_2, val32_3, val32_4, val32_5;
1426 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1427 printk("DEBUG: PCI status [%04x] \n", val16);
1432 #ifdef CONFIG_NET_POLL_CONTROLLER
1434 * Polling receive - used by netconsole and other diagnostic tools
1435 * to allow network i/o with interrupts disabled.
1437 static void b44_poll_controller(struct net_device *dev)
1439 disable_irq(dev->irq);
1440 b44_interrupt(dev->irq, dev, NULL);
1441 enable_irq(dev->irq);
1445 static int b44_close(struct net_device *dev)
1447 struct b44 *bp = netdev_priv(dev);
1449 netif_stop_queue(dev);
1451 netif_poll_disable(dev);
1453 del_timer_sync(&bp->timer);
1455 spin_lock_irq(&bp->lock);
1462 netif_carrier_off(dev);
1464 spin_unlock_irq(&bp->lock);
1466 free_irq(dev->irq, dev);
1468 netif_poll_enable(dev);
1470 b44_free_consistent(bp);
1475 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1477 struct b44 *bp = netdev_priv(dev);
1478 struct net_device_stats *nstat = &bp->stats;
1479 struct b44_hw_stats *hwstat = &bp->hw_stats;
1481 /* Convert HW stats into netdevice stats. */
1482 nstat->rx_packets = hwstat->rx_pkts;
1483 nstat->tx_packets = hwstat->tx_pkts;
1484 nstat->rx_bytes = hwstat->rx_octets;
1485 nstat->tx_bytes = hwstat->tx_octets;
1486 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1487 hwstat->tx_oversize_pkts +
1488 hwstat->tx_underruns +
1489 hwstat->tx_excessive_cols +
1490 hwstat->tx_late_cols);
1491 nstat->multicast = hwstat->tx_multicast_pkts;
1492 nstat->collisions = hwstat->tx_total_cols;
1494 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1495 hwstat->rx_undersize);
1496 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1497 nstat->rx_frame_errors = hwstat->rx_align_errs;
1498 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1499 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1500 hwstat->rx_oversize_pkts +
1501 hwstat->rx_missed_pkts +
1502 hwstat->rx_crc_align_errs +
1503 hwstat->rx_undersize +
1504 hwstat->rx_crc_errs +
1505 hwstat->rx_align_errs +
1506 hwstat->rx_symbol_errs);
1508 nstat->tx_aborted_errors = hwstat->tx_underruns;
1510 /* Carrier lost counter seems to be broken for some devices */
1511 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1517 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1519 struct dev_mc_list *mclist;
1522 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1523 mclist = dev->mc_list;
1524 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1525 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1530 static void __b44_set_rx_mode(struct net_device *dev)
1532 struct b44 *bp = netdev_priv(dev);
1535 val = br32(bp, B44_RXCONFIG);
1536 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1537 if (dev->flags & IFF_PROMISC) {
1538 val |= RXCONFIG_PROMISC;
1539 bw32(bp, B44_RXCONFIG, val);
1541 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1544 __b44_set_mac_addr(bp);
1546 if (dev->flags & IFF_ALLMULTI)
1547 val |= RXCONFIG_ALLMULTI;
1549 i = __b44_load_mcast(bp, dev);
1551 for (; i < 64; i++) {
1552 __b44_cam_write(bp, zero, i);
1554 bw32(bp, B44_RXCONFIG, val);
1555 val = br32(bp, B44_CAM_CTRL);
1556 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1560 static void b44_set_rx_mode(struct net_device *dev)
1562 struct b44 *bp = netdev_priv(dev);
1564 spin_lock_irq(&bp->lock);
1565 __b44_set_rx_mode(dev);
1566 spin_unlock_irq(&bp->lock);
1569 static u32 b44_get_msglevel(struct net_device *dev)
1571 struct b44 *bp = netdev_priv(dev);
1572 return bp->msg_enable;
1575 static void b44_set_msglevel(struct net_device *dev, u32 value)
1577 struct b44 *bp = netdev_priv(dev);
1578 bp->msg_enable = value;
1581 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1583 struct b44 *bp = netdev_priv(dev);
1584 struct pci_dev *pci_dev = bp->pdev;
1586 strcpy (info->driver, DRV_MODULE_NAME);
1587 strcpy (info->version, DRV_MODULE_VERSION);
1588 strcpy (info->bus_info, pci_name(pci_dev));
1591 static int b44_nway_reset(struct net_device *dev)
1593 struct b44 *bp = netdev_priv(dev);
1597 spin_lock_irq(&bp->lock);
1598 b44_readphy(bp, MII_BMCR, &bmcr);
1599 b44_readphy(bp, MII_BMCR, &bmcr);
1601 if (bmcr & BMCR_ANENABLE) {
1602 b44_writephy(bp, MII_BMCR,
1603 bmcr | BMCR_ANRESTART);
1606 spin_unlock_irq(&bp->lock);
1611 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1613 struct b44 *bp = netdev_priv(dev);
1615 if (!netif_running(dev))
1617 cmd->supported = (SUPPORTED_Autoneg);
1618 cmd->supported |= (SUPPORTED_100baseT_Half |
1619 SUPPORTED_100baseT_Full |
1620 SUPPORTED_10baseT_Half |
1621 SUPPORTED_10baseT_Full |
1624 cmd->advertising = 0;
1625 if (bp->flags & B44_FLAG_ADV_10HALF)
1626 cmd->advertising |= ADVERTISED_10baseT_Half;
1627 if (bp->flags & B44_FLAG_ADV_10FULL)
1628 cmd->advertising |= ADVERTISED_10baseT_Full;
1629 if (bp->flags & B44_FLAG_ADV_100HALF)
1630 cmd->advertising |= ADVERTISED_100baseT_Half;
1631 if (bp->flags & B44_FLAG_ADV_100FULL)
1632 cmd->advertising |= ADVERTISED_100baseT_Full;
1633 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1634 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1635 SPEED_100 : SPEED_10;
1636 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1637 DUPLEX_FULL : DUPLEX_HALF;
1639 cmd->phy_address = bp->phy_addr;
1640 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1641 XCVR_INTERNAL : XCVR_EXTERNAL;
1642 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1643 AUTONEG_DISABLE : AUTONEG_ENABLE;
1649 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1651 struct b44 *bp = netdev_priv(dev);
1653 if (!netif_running(dev))
1656 /* We do not support gigabit. */
1657 if (cmd->autoneg == AUTONEG_ENABLE) {
1658 if (cmd->advertising &
1659 (ADVERTISED_1000baseT_Half |
1660 ADVERTISED_1000baseT_Full))
1662 } else if ((cmd->speed != SPEED_100 &&
1663 cmd->speed != SPEED_10) ||
1664 (cmd->duplex != DUPLEX_HALF &&
1665 cmd->duplex != DUPLEX_FULL)) {
1669 spin_lock_irq(&bp->lock);
1671 if (cmd->autoneg == AUTONEG_ENABLE) {
1672 bp->flags &= ~B44_FLAG_FORCE_LINK;
1673 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1674 B44_FLAG_ADV_10FULL |
1675 B44_FLAG_ADV_100HALF |
1676 B44_FLAG_ADV_100FULL);
1677 if (cmd->advertising & ADVERTISE_10HALF)
1678 bp->flags |= B44_FLAG_ADV_10HALF;
1679 if (cmd->advertising & ADVERTISE_10FULL)
1680 bp->flags |= B44_FLAG_ADV_10FULL;
1681 if (cmd->advertising & ADVERTISE_100HALF)
1682 bp->flags |= B44_FLAG_ADV_100HALF;
1683 if (cmd->advertising & ADVERTISE_100FULL)
1684 bp->flags |= B44_FLAG_ADV_100FULL;
1686 bp->flags |= B44_FLAG_FORCE_LINK;
1687 if (cmd->speed == SPEED_100)
1688 bp->flags |= B44_FLAG_100_BASE_T;
1689 if (cmd->duplex == DUPLEX_FULL)
1690 bp->flags |= B44_FLAG_FULL_DUPLEX;
1695 spin_unlock_irq(&bp->lock);
1700 static void b44_get_ringparam(struct net_device *dev,
1701 struct ethtool_ringparam *ering)
1703 struct b44 *bp = netdev_priv(dev);
1705 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1706 ering->rx_pending = bp->rx_pending;
1708 /* XXX ethtool lacks a tx_max_pending, oops... */
1711 static int b44_set_ringparam(struct net_device *dev,
1712 struct ethtool_ringparam *ering)
1714 struct b44 *bp = netdev_priv(dev);
1716 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1717 (ering->rx_mini_pending != 0) ||
1718 (ering->rx_jumbo_pending != 0) ||
1719 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1722 spin_lock_irq(&bp->lock);
1724 bp->rx_pending = ering->rx_pending;
1725 bp->tx_pending = ering->tx_pending;
1730 netif_wake_queue(bp->dev);
1731 spin_unlock_irq(&bp->lock);
1733 b44_enable_ints(bp);
1738 static void b44_get_pauseparam(struct net_device *dev,
1739 struct ethtool_pauseparam *epause)
1741 struct b44 *bp = netdev_priv(dev);
1744 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1746 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1748 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1751 static int b44_set_pauseparam(struct net_device *dev,
1752 struct ethtool_pauseparam *epause)
1754 struct b44 *bp = netdev_priv(dev);
1756 spin_lock_irq(&bp->lock);
1757 if (epause->autoneg)
1758 bp->flags |= B44_FLAG_PAUSE_AUTO;
1760 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1761 if (epause->rx_pause)
1762 bp->flags |= B44_FLAG_RX_PAUSE;
1764 bp->flags &= ~B44_FLAG_RX_PAUSE;
1765 if (epause->tx_pause)
1766 bp->flags |= B44_FLAG_TX_PAUSE;
1768 bp->flags &= ~B44_FLAG_TX_PAUSE;
1769 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1774 __b44_set_flow_ctrl(bp, bp->flags);
1776 spin_unlock_irq(&bp->lock);
1778 b44_enable_ints(bp);
1783 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1787 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1792 static int b44_get_stats_count(struct net_device *dev)
1794 return ARRAY_SIZE(b44_gstrings);
1797 static void b44_get_ethtool_stats(struct net_device *dev,
1798 struct ethtool_stats *stats, u64 *data)
1800 struct b44 *bp = netdev_priv(dev);
1801 u32 *val = &bp->hw_stats.tx_good_octets;
1804 spin_lock_irq(&bp->lock);
1806 b44_stats_update(bp);
1808 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1811 spin_unlock_irq(&bp->lock);
1814 static struct ethtool_ops b44_ethtool_ops = {
1815 .get_drvinfo = b44_get_drvinfo,
1816 .get_settings = b44_get_settings,
1817 .set_settings = b44_set_settings,
1818 .nway_reset = b44_nway_reset,
1819 .get_link = ethtool_op_get_link,
1820 .get_ringparam = b44_get_ringparam,
1821 .set_ringparam = b44_set_ringparam,
1822 .get_pauseparam = b44_get_pauseparam,
1823 .set_pauseparam = b44_set_pauseparam,
1824 .get_msglevel = b44_get_msglevel,
1825 .set_msglevel = b44_set_msglevel,
1826 .get_strings = b44_get_strings,
1827 .get_stats_count = b44_get_stats_count,
1828 .get_ethtool_stats = b44_get_ethtool_stats,
1829 .get_perm_addr = ethtool_op_get_perm_addr,
1832 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1834 struct mii_ioctl_data *data = if_mii(ifr);
1835 struct b44 *bp = netdev_priv(dev);
1838 if (!netif_running(dev))
1841 spin_lock_irq(&bp->lock);
1842 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1843 spin_unlock_irq(&bp->lock);
1848 /* Read 128-bytes of EEPROM. */
1849 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1852 u16 *ptr = (u16 *) data;
1854 for (i = 0; i < 128; i += 2)
1855 ptr[i / 2] = readw(bp->regs + 4096 + i);
1860 static int __devinit b44_get_invariants(struct b44 *bp)
1865 err = b44_read_eeprom(bp, &eeprom[0]);
1869 bp->dev->dev_addr[0] = eeprom[79];
1870 bp->dev->dev_addr[1] = eeprom[78];
1871 bp->dev->dev_addr[2] = eeprom[81];
1872 bp->dev->dev_addr[3] = eeprom[80];
1873 bp->dev->dev_addr[4] = eeprom[83];
1874 bp->dev->dev_addr[5] = eeprom[82];
1876 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1877 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1881 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1883 bp->phy_addr = eeprom[90] & 0x1f;
1885 /* With this, plus the rx_header prepended to the data by the
1886 * hardware, we'll land the ethernet header on a 2-byte boundary.
1890 bp->imask = IMASK_DEF;
1892 bp->core_unit = ssb_core_unit(bp);
1893 bp->dma_offset = SB_PCI_DMA;
1895 /* XXX - really required?
1896 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1902 static int __devinit b44_init_one(struct pci_dev *pdev,
1903 const struct pci_device_id *ent)
1905 static int b44_version_printed = 0;
1906 unsigned long b44reg_base, b44reg_len;
1907 struct net_device *dev;
1911 if (b44_version_printed++ == 0)
1912 printk(KERN_INFO "%s", version);
1914 err = pci_enable_device(pdev);
1916 printk(KERN_ERR PFX "Cannot enable PCI device, "
1921 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1922 printk(KERN_ERR PFX "Cannot find proper PCI device "
1923 "base address, aborting.\n");
1925 goto err_out_disable_pdev;
1928 err = pci_request_regions(pdev, DRV_MODULE_NAME);
1930 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1932 goto err_out_disable_pdev;
1935 pci_set_master(pdev);
1937 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
1939 printk(KERN_ERR PFX "No usable DMA configuration, "
1941 goto err_out_free_res;
1944 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1946 printk(KERN_ERR PFX "No usable DMA configuration, "
1948 goto err_out_free_res;
1951 b44reg_base = pci_resource_start(pdev, 0);
1952 b44reg_len = pci_resource_len(pdev, 0);
1954 dev = alloc_etherdev(sizeof(*bp));
1956 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1958 goto err_out_free_res;
1961 SET_MODULE_OWNER(dev);
1962 SET_NETDEV_DEV(dev,&pdev->dev);
1964 /* No interesting netdevice features in this card... */
1967 bp = netdev_priv(dev);
1971 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1973 spin_lock_init(&bp->lock);
1975 bp->regs = ioremap(b44reg_base, b44reg_len);
1976 if (bp->regs == 0UL) {
1977 printk(KERN_ERR PFX "Cannot map device registers, "
1980 goto err_out_free_dev;
1983 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1984 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1986 dev->open = b44_open;
1987 dev->stop = b44_close;
1988 dev->hard_start_xmit = b44_start_xmit;
1989 dev->get_stats = b44_get_stats;
1990 dev->set_multicast_list = b44_set_rx_mode;
1991 dev->set_mac_address = b44_set_mac_addr;
1992 dev->do_ioctl = b44_ioctl;
1993 dev->tx_timeout = b44_tx_timeout;
1994 dev->poll = b44_poll;
1996 dev->watchdog_timeo = B44_TX_TIMEOUT;
1997 #ifdef CONFIG_NET_POLL_CONTROLLER
1998 dev->poll_controller = b44_poll_controller;
2000 dev->change_mtu = b44_change_mtu;
2001 dev->irq = pdev->irq;
2002 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2004 netif_carrier_off(dev);
2006 err = b44_get_invariants(bp);
2008 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2010 goto err_out_iounmap;
2013 bp->mii_if.dev = dev;
2014 bp->mii_if.mdio_read = b44_mii_read;
2015 bp->mii_if.mdio_write = b44_mii_write;
2016 bp->mii_if.phy_id = bp->phy_addr;
2017 bp->mii_if.phy_id_mask = 0x1f;
2018 bp->mii_if.reg_num_mask = 0x1f;
2020 /* By default, advertise all speed/duplex settings. */
2021 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2022 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2024 /* By default, auto-negotiate PAUSE. */
2025 bp->flags |= B44_FLAG_PAUSE_AUTO;
2027 err = register_netdev(dev);
2029 printk(KERN_ERR PFX "Cannot register net device, "
2031 goto err_out_iounmap;
2034 pci_set_drvdata(pdev, dev);
2036 pci_save_state(bp->pdev);
2038 /* Chip reset provides power to the b44 MAC & PCI cores, which
2039 * is necessary for MAC register access.
2043 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2044 for (i = 0; i < 6; i++)
2045 printk("%2.2x%c", dev->dev_addr[i],
2046 i == 5 ? '\n' : ':');
2057 pci_release_regions(pdev);
2059 err_out_disable_pdev:
2060 pci_disable_device(pdev);
2061 pci_set_drvdata(pdev, NULL);
2065 static void __devexit b44_remove_one(struct pci_dev *pdev)
2067 struct net_device *dev = pci_get_drvdata(pdev);
2068 struct b44 *bp = netdev_priv(dev);
2070 unregister_netdev(dev);
2073 pci_release_regions(pdev);
2074 pci_disable_device(pdev);
2075 pci_set_drvdata(pdev, NULL);
2078 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2080 struct net_device *dev = pci_get_drvdata(pdev);
2081 struct b44 *bp = netdev_priv(dev);
2083 if (!netif_running(dev))
2086 del_timer_sync(&bp->timer);
2088 spin_lock_irq(&bp->lock);
2091 netif_carrier_off(bp->dev);
2092 netif_device_detach(bp->dev);
2095 spin_unlock_irq(&bp->lock);
2097 free_irq(dev->irq, dev);
2098 pci_disable_device(pdev);
2102 static int b44_resume(struct pci_dev *pdev)
2104 struct net_device *dev = pci_get_drvdata(pdev);
2105 struct b44 *bp = netdev_priv(dev);
2107 pci_restore_state(pdev);
2108 pci_enable_device(pdev);
2109 pci_set_master(pdev);
2111 if (!netif_running(dev))
2114 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2115 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2117 spin_lock_irq(&bp->lock);
2121 netif_device_attach(bp->dev);
2122 spin_unlock_irq(&bp->lock);
2124 bp->timer.expires = jiffies + HZ;
2125 add_timer(&bp->timer);
2127 b44_enable_ints(bp);
2128 netif_wake_queue(dev);
2132 static struct pci_driver b44_driver = {
2133 .name = DRV_MODULE_NAME,
2134 .id_table = b44_pci_tbl,
2135 .probe = b44_init_one,
2136 .remove = __devexit_p(b44_remove_one),
2137 .suspend = b44_suspend,
2138 .resume = b44_resume,
2141 static int __init b44_init(void)
2143 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2145 /* Setup paramaters for syncing RX/TX DMA descriptors */
2146 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2147 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2149 return pci_module_init(&b44_driver);
2152 static void __exit b44_cleanup(void)
2154 pci_unregister_driver(&b44_driver);
2157 module_init(b44_init);
2158 module_exit(b44_cleanup);