1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
10 * Distribute under GPL.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/netdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/mii.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/etherdevice.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/ssb/ssb.h>
29 #include <asm/uaccess.h>
36 #define DRV_MODULE_NAME "b44"
37 #define PFX DRV_MODULE_NAME ": "
38 #define DRV_MODULE_VERSION "2.0"
40 #define B44_DEF_MSG_ENABLE \
50 /* length of time before we decide the hardware is borked,
51 * and dev->tx_timeout() should be called to fix the problem
53 #define B44_TX_TIMEOUT (5 * HZ)
55 /* hardware minimum and maximum for a single frame's data payload */
56 #define B44_MIN_MTU 60
57 #define B44_MAX_MTU 1500
59 #define B44_RX_RING_SIZE 512
60 #define B44_DEF_RX_RING_PENDING 200
61 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
63 #define B44_TX_RING_SIZE 512
64 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
65 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
68 #define TX_RING_GAP(BP) \
69 (B44_TX_RING_SIZE - (BP)->tx_pending)
70 #define TX_BUFFS_AVAIL(BP) \
71 (((BP)->tx_cons <= (BP)->tx_prod) ? \
72 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
73 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
74 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
76 #define RX_PKT_OFFSET 30
77 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
79 /* minimum number of free TX descriptors required to wake up TX process */
80 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
82 /* b44 internal pattern match filter info */
83 #define B44_PATTERN_BASE 0x400
84 #define B44_PATTERN_SIZE 0x80
85 #define B44_PMASK_BASE 0x600
86 #define B44_PMASK_SIZE 0x10
87 #define B44_MAX_PATTERNS 16
88 #define B44_ETHIPV6UDP_HLEN 62
89 #define B44_ETHIPV4UDP_HLEN 42
91 static char version[] __devinitdata =
92 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
94 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
95 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_MODULE_VERSION);
99 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
100 module_param(b44_debug, int, 0);
101 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104 #ifdef CONFIG_B44_PCI
105 static const struct pci_device_id b44_pci_tbl[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
109 { 0 } /* terminate list with empty entry */
111 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113 static struct pci_driver b44_pci_driver = {
114 .name = DRV_MODULE_NAME,
115 .id_table = b44_pci_tbl,
117 #endif /* CONFIG_B44_PCI */
119 static const struct ssb_device_id b44_ssb_tbl[] = {
120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
123 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125 static void b44_halt(struct b44 *);
126 static void b44_init_rings(struct b44 *);
128 #define B44_FULL_RESET 1
129 #define B44_FULL_RESET_SKIP_PHY 2
130 #define B44_PARTIAL_RESET 3
131 #define B44_CHIP_RESET_FULL 4
132 #define B44_CHIP_RESET_PARTIAL 5
134 static void b44_init_hw(struct b44 *, int);
136 static int dma_desc_align_mask;
137 static int dma_desc_sync_size;
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...) # x,
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 unsigned long offset,
149 enum dma_data_direction dir)
151 dma_sync_single_range_for_device(sdev->dev, dma_base,
152 offset & dma_desc_align_mask,
153 dma_desc_sync_size, dir);
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
158 unsigned long offset,
159 enum dma_data_direction dir)
161 dma_sync_single_range_for_cpu(sdev->dev, dma_base,
162 offset & dma_desc_align_mask,
163 dma_desc_sync_size, dir);
166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
168 return ssb_read32(bp->sdev, reg);
171 static inline void bw32(const struct b44 *bp,
172 unsigned long reg, unsigned long val)
174 ssb_write32(bp->sdev, reg, val);
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178 u32 bit, unsigned long timeout, const int clear)
182 for (i = 0; i < timeout; i++) {
183 u32 val = br32(bp, reg);
185 if (clear && !(val & bit))
187 if (!clear && (val & bit))
192 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
196 (clear ? "clear" : "set"));
202 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
206 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
207 (index << CAM_CTRL_INDEX_SHIFT)));
209 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
211 val = br32(bp, B44_CAM_DATA_LO);
213 data[2] = (val >> 24) & 0xFF;
214 data[3] = (val >> 16) & 0xFF;
215 data[4] = (val >> 8) & 0xFF;
216 data[5] = (val >> 0) & 0xFF;
218 val = br32(bp, B44_CAM_DATA_HI);
220 data[0] = (val >> 8) & 0xFF;
221 data[1] = (val >> 0) & 0xFF;
224 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
228 val = ((u32) data[2]) << 24;
229 val |= ((u32) data[3]) << 16;
230 val |= ((u32) data[4]) << 8;
231 val |= ((u32) data[5]) << 0;
232 bw32(bp, B44_CAM_DATA_LO, val);
233 val = (CAM_DATA_HI_VALID |
234 (((u32) data[0]) << 8) |
235 (((u32) data[1]) << 0));
236 bw32(bp, B44_CAM_DATA_HI, val);
237 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
238 (index << CAM_CTRL_INDEX_SHIFT)));
239 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
242 static inline void __b44_disable_ints(struct b44 *bp)
244 bw32(bp, B44_IMASK, 0);
247 static void b44_disable_ints(struct b44 *bp)
249 __b44_disable_ints(bp);
251 /* Flush posted writes. */
255 static void b44_enable_ints(struct b44 *bp)
257 bw32(bp, B44_IMASK, bp->imask);
260 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 (phy_addr << MDIO_DATA_PMD_SHIFT) |
268 (reg << MDIO_DATA_RA_SHIFT) |
269 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
270 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
271 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
276 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
278 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
279 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
280 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
281 (phy_addr << MDIO_DATA_PMD_SHIFT) |
282 (reg << MDIO_DATA_RA_SHIFT) |
283 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
284 (val & MDIO_DATA_DATA)));
285 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
288 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
290 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
293 return __b44_readphy(bp, bp->phy_addr, reg, val);
296 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
298 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
301 return __b44_writephy(bp, bp->phy_addr, reg, val);
304 /* miilib interface */
305 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
308 struct b44 *bp = netdev_priv(dev);
309 int rc = __b44_readphy(bp, phy_id, location, &val);
315 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
318 struct b44 *bp = netdev_priv(dev);
319 __b44_writephy(bp, phy_id, location, val);
322 static int b44_phy_reset(struct b44 *bp)
327 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
329 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
333 err = b44_readphy(bp, MII_BMCR, &val);
335 if (val & BMCR_RESET) {
336 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
345 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
349 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
350 bp->flags |= pause_flags;
352 val = br32(bp, B44_RXCONFIG);
353 if (pause_flags & B44_FLAG_RX_PAUSE)
354 val |= RXCONFIG_FLOW;
356 val &= ~RXCONFIG_FLOW;
357 bw32(bp, B44_RXCONFIG, val);
359 val = br32(bp, B44_MAC_FLOW);
360 if (pause_flags & B44_FLAG_TX_PAUSE)
361 val |= (MAC_FLOW_PAUSE_ENAB |
362 (0xc0 & MAC_FLOW_RX_HI_WATER));
364 val &= ~MAC_FLOW_PAUSE_ENAB;
365 bw32(bp, B44_MAC_FLOW, val);
368 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
372 /* The driver supports only rx pause by default because
373 the b44 mac tx pause mechanism generates excessive
375 Use ethtool to turn on b44 tx pause if necessary.
377 if ((local & ADVERTISE_PAUSE_CAP) &&
378 (local & ADVERTISE_PAUSE_ASYM)){
379 if ((remote & LPA_PAUSE_ASYM) &&
380 !(remote & LPA_PAUSE_CAP))
381 pause_enab |= B44_FLAG_RX_PAUSE;
384 __b44_set_flow_ctrl(bp, pause_enab);
387 #ifdef SSB_DRIVER_MIPS
388 extern char *nvram_get(char *name);
389 static void b44_wap54g10_workaround(struct b44 *bp)
396 * workaround for bad hardware design in Linksys WAP54G v1.0
397 * see https://dev.openwrt.org/ticket/146
398 * check and reset bit "isolate"
400 str = nvram_get("boardnum");
403 if (simple_strtoul(str, NULL, 0) == 2) {
404 err = __b44_readphy(bp, 0, MII_BMCR, &val);
407 if (!(val & BMCR_ISOLATE))
409 val &= ~BMCR_ISOLATE;
410 err = __b44_writephy(bp, 0, MII_BMCR, val);
416 printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
419 static inline void b44_wap54g10_workaround(struct b44 *bp)
424 static int b44_setup_phy(struct b44 *bp)
429 b44_wap54g10_workaround(bp);
431 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
433 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
435 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
436 val & MII_ALEDCTRL_ALLMSK)) != 0)
438 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
440 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
441 val | MII_TLEDCTRL_ENABLE)) != 0)
444 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
445 u32 adv = ADVERTISE_CSMA;
447 if (bp->flags & B44_FLAG_ADV_10HALF)
448 adv |= ADVERTISE_10HALF;
449 if (bp->flags & B44_FLAG_ADV_10FULL)
450 adv |= ADVERTISE_10FULL;
451 if (bp->flags & B44_FLAG_ADV_100HALF)
452 adv |= ADVERTISE_100HALF;
453 if (bp->flags & B44_FLAG_ADV_100FULL)
454 adv |= ADVERTISE_100FULL;
456 if (bp->flags & B44_FLAG_PAUSE_AUTO)
457 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
459 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
461 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
462 BMCR_ANRESTART))) != 0)
467 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
469 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
470 if (bp->flags & B44_FLAG_100_BASE_T)
471 bmcr |= BMCR_SPEED100;
472 if (bp->flags & B44_FLAG_FULL_DUPLEX)
473 bmcr |= BMCR_FULLDPLX;
474 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
477 /* Since we will not be negotiating there is no safe way
478 * to determine if the link partner supports flow control
479 * or not. So just disable it completely in this case.
481 b44_set_flow_ctrl(bp, 0, 0);
488 static void b44_stats_update(struct b44 *bp)
493 val = &bp->hw_stats.tx_good_octets;
494 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
495 *val++ += br32(bp, reg);
501 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
502 *val++ += br32(bp, reg);
506 static void b44_link_report(struct b44 *bp)
508 if (!netif_carrier_ok(bp->dev)) {
509 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
511 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
513 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
514 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
516 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
519 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
520 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
524 static void b44_check_phy(struct b44 *bp)
528 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
529 bp->flags |= B44_FLAG_100_BASE_T;
530 bp->flags |= B44_FLAG_FULL_DUPLEX;
531 if (!netif_carrier_ok(bp->dev)) {
532 u32 val = br32(bp, B44_TX_CTRL);
533 val |= TX_CTRL_DUPLEX;
534 bw32(bp, B44_TX_CTRL, val);
535 netif_carrier_on(bp->dev);
541 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
542 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
544 if (aux & MII_AUXCTRL_SPEED)
545 bp->flags |= B44_FLAG_100_BASE_T;
547 bp->flags &= ~B44_FLAG_100_BASE_T;
548 if (aux & MII_AUXCTRL_DUPLEX)
549 bp->flags |= B44_FLAG_FULL_DUPLEX;
551 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
553 if (!netif_carrier_ok(bp->dev) &&
554 (bmsr & BMSR_LSTATUS)) {
555 u32 val = br32(bp, B44_TX_CTRL);
556 u32 local_adv, remote_adv;
558 if (bp->flags & B44_FLAG_FULL_DUPLEX)
559 val |= TX_CTRL_DUPLEX;
561 val &= ~TX_CTRL_DUPLEX;
562 bw32(bp, B44_TX_CTRL, val);
564 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
565 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
566 !b44_readphy(bp, MII_LPA, &remote_adv))
567 b44_set_flow_ctrl(bp, local_adv, remote_adv);
570 netif_carrier_on(bp->dev);
572 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
574 netif_carrier_off(bp->dev);
578 if (bmsr & BMSR_RFAULT)
579 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
582 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
587 static void b44_timer(unsigned long __opaque)
589 struct b44 *bp = (struct b44 *) __opaque;
591 spin_lock_irq(&bp->lock);
595 b44_stats_update(bp);
597 spin_unlock_irq(&bp->lock);
599 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
602 static void b44_tx(struct b44 *bp)
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
616 dma_unmap_single(bp->sdev->dev,
621 dev_kfree_skb_irq(skb);
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
629 bw32(bp, B44_GPTIMER, 0);
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
656 mapping = dma_map_single(bp->sdev->dev, skb->data,
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (dma_mapping_error(mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
665 if (!dma_mapping_error(mapping))
666 dma_unmap_single(bp->sdev->dev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb);
669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
672 mapping = dma_map_single(bp->sdev->dev, skb->data,
675 if (dma_mapping_error(mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
677 if (!dma_mapping_error(mapping))
678 dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb);
684 rh = (struct rx_header *) skb->data;
685 skb_reserve(skb, RX_PKT_OFFSET);
691 map->mapping = mapping;
696 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
697 if (dest_idx == (B44_RX_RING_SIZE - 1))
698 ctrl |= DESC_CTRL_EOT;
700 dp = &bp->rx_ring[dest_idx];
701 dp->ctrl = cpu_to_le32(ctrl);
702 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
704 if (bp->flags & B44_FLAG_RX_RING_HACK)
705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
706 dest_idx * sizeof(dp),
709 return RX_PKT_BUF_SZ;
712 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
714 struct dma_desc *src_desc, *dest_desc;
715 struct ring_info *src_map, *dest_map;
716 struct rx_header *rh;
720 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
721 dest_desc = &bp->rx_ring[dest_idx];
722 dest_map = &bp->rx_buffers[dest_idx];
723 src_desc = &bp->rx_ring[src_idx];
724 src_map = &bp->rx_buffers[src_idx];
726 dest_map->skb = src_map->skb;
727 rh = (struct rx_header *) src_map->skb->data;
730 dest_map->mapping = src_map->mapping;
732 if (bp->flags & B44_FLAG_RX_RING_HACK)
733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
734 src_idx * sizeof(src_desc),
737 ctrl = src_desc->ctrl;
738 if (dest_idx == (B44_RX_RING_SIZE - 1))
739 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
741 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
743 dest_desc->ctrl = ctrl;
744 dest_desc->addr = src_desc->addr;
748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
750 dest_idx * sizeof(dest_desc),
753 dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
758 static int b44_rx(struct b44 *bp, int budget)
764 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
765 prod /= sizeof(struct dma_desc);
768 while (cons != prod && budget > 0) {
769 struct ring_info *rp = &bp->rx_buffers[cons];
770 struct sk_buff *skb = rp->skb;
771 dma_addr_t map = rp->mapping;
772 struct rx_header *rh;
775 dma_sync_single_for_cpu(bp->sdev->dev, map,
778 rh = (struct rx_header *) skb->data;
779 len = le16_to_cpu(rh->len);
780 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
781 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
783 b44_recycle_rx(bp, cons, bp->rx_prod);
785 bp->stats.rx_dropped++;
795 len = le16_to_cpu(rh->len);
796 } while (len == 0 && i++ < 5);
804 if (len > RX_COPY_THRESHOLD) {
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
809 dma_unmap_single(bp->sdev->dev, map,
810 skb_size, DMA_FROM_DEVICE);
811 /* Leave out rx_header */
812 skb_put(skb, len + RX_PKT_OFFSET);
813 skb_pull(skb, RX_PKT_OFFSET);
815 struct sk_buff *copy_skb;
817 b44_recycle_rx(bp, cons, bp->rx_prod);
818 copy_skb = dev_alloc_skb(len + 2);
819 if (copy_skb == NULL)
820 goto drop_it_no_recycle;
822 skb_reserve(copy_skb, 2);
823 skb_put(copy_skb, len);
824 /* DMA sync done above, copy just the actual packet */
825 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
826 copy_skb->data, len);
829 skb->ip_summed = CHECKSUM_NONE;
830 skb->protocol = eth_type_trans(skb, bp->dev);
831 netif_receive_skb(skb);
832 bp->dev->last_rx = jiffies;
836 bp->rx_prod = (bp->rx_prod + 1) &
837 (B44_RX_RING_SIZE - 1);
838 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
842 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
847 static int b44_poll(struct napi_struct *napi, int budget)
849 struct b44 *bp = container_of(napi, struct b44, napi);
850 struct net_device *netdev = bp->dev;
853 spin_lock_irq(&bp->lock);
855 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
856 /* spin_lock(&bp->tx_lock); */
858 /* spin_unlock(&bp->tx_lock); */
860 spin_unlock_irq(&bp->lock);
863 if (bp->istat & ISTAT_RX)
864 work_done += b44_rx(bp, budget);
866 if (bp->istat & ISTAT_ERRORS) {
869 spin_lock_irqsave(&bp->lock, flags);
872 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
873 netif_wake_queue(bp->dev);
874 spin_unlock_irqrestore(&bp->lock, flags);
878 if (work_done < budget) {
879 netif_rx_complete(netdev, napi);
886 static irqreturn_t b44_interrupt(int irq, void *dev_id)
888 struct net_device *dev = dev_id;
889 struct b44 *bp = netdev_priv(dev);
893 spin_lock(&bp->lock);
895 istat = br32(bp, B44_ISTAT);
896 imask = br32(bp, B44_IMASK);
898 /* The interrupt mask register controls which interrupt bits
899 * will actually raise an interrupt to the CPU when set by hw/firmware,
900 * but doesn't mask off the bits.
906 if (unlikely(!netif_running(dev))) {
907 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
911 if (netif_rx_schedule_prep(dev, &bp->napi)) {
912 /* NOTE: These writes are posted by the readback of
913 * the ISTAT register below.
916 __b44_disable_ints(bp);
917 __netif_rx_schedule(dev, &bp->napi);
919 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
924 bw32(bp, B44_ISTAT, istat);
927 spin_unlock(&bp->lock);
928 return IRQ_RETVAL(handled);
931 static void b44_tx_timeout(struct net_device *dev)
933 struct b44 *bp = netdev_priv(dev);
935 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
938 spin_lock_irq(&bp->lock);
942 b44_init_hw(bp, B44_FULL_RESET);
944 spin_unlock_irq(&bp->lock);
948 netif_wake_queue(dev);
951 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
953 struct b44 *bp = netdev_priv(dev);
954 int rc = NETDEV_TX_OK;
956 u32 len, entry, ctrl;
959 spin_lock_irq(&bp->lock);
961 /* This is a hard error, log it. */
962 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
963 netif_stop_queue(dev);
964 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
969 mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
970 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
971 struct sk_buff *bounce_skb;
973 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
974 if (!dma_mapping_error(mapping))
975 dma_unmap_single(bp->sdev->dev, mapping, len,
978 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
982 mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
984 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
985 if (!dma_mapping_error(mapping))
986 dma_unmap_single(bp->sdev->dev, mapping,
988 dev_kfree_skb_any(bounce_skb);
992 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
993 dev_kfree_skb_any(skb);
998 bp->tx_buffers[entry].skb = skb;
999 bp->tx_buffers[entry].mapping = mapping;
1001 ctrl = (len & DESC_CTRL_LEN);
1002 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1003 if (entry == (B44_TX_RING_SIZE - 1))
1004 ctrl |= DESC_CTRL_EOT;
1006 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1007 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1009 if (bp->flags & B44_FLAG_TX_RING_HACK)
1010 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1011 entry * sizeof(bp->tx_ring[0]),
1014 entry = NEXT_TX(entry);
1016 bp->tx_prod = entry;
1020 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1021 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1022 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1023 if (bp->flags & B44_FLAG_REORDER_BUG)
1024 br32(bp, B44_DMATX_PTR);
1026 if (TX_BUFFS_AVAIL(bp) < 1)
1027 netif_stop_queue(dev);
1029 dev->trans_start = jiffies;
1032 spin_unlock_irq(&bp->lock);
1037 rc = NETDEV_TX_BUSY;
1041 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1043 struct b44 *bp = netdev_priv(dev);
1045 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1048 if (!netif_running(dev)) {
1049 /* We'll just catch it later when the
1056 spin_lock_irq(&bp->lock);
1060 b44_init_hw(bp, B44_FULL_RESET);
1061 spin_unlock_irq(&bp->lock);
1063 b44_enable_ints(bp);
1068 /* Free up pending packets in all rx/tx rings.
1070 * The chip has been shut down and the driver detached from
1071 * the networking, so no interrupts or new tx packets will
1072 * end up in the driver. bp->lock is not held and we are not
1073 * in an interrupt context and thus may sleep.
1075 static void b44_free_rings(struct b44 *bp)
1077 struct ring_info *rp;
1080 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1081 rp = &bp->rx_buffers[i];
1083 if (rp->skb == NULL)
1085 dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ,
1087 dev_kfree_skb_any(rp->skb);
1091 /* XXX needs changes once NETIF_F_SG is set... */
1092 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1093 rp = &bp->tx_buffers[i];
1095 if (rp->skb == NULL)
1097 dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len,
1099 dev_kfree_skb_any(rp->skb);
1104 /* Initialize tx/rx rings for packet processing.
1106 * The chip has been shut down and the driver detached from
1107 * the networking, so no interrupts or new tx packets will
1108 * end up in the driver.
1110 static void b44_init_rings(struct b44 *bp)
1116 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1117 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1119 if (bp->flags & B44_FLAG_RX_RING_HACK)
1120 dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
1124 if (bp->flags & B44_FLAG_TX_RING_HACK)
1125 dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
1129 for (i = 0; i < bp->rx_pending; i++) {
1130 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1136 * Must not be invoked with interrupt sources disabled and
1137 * the hardware shutdown down.
1139 static void b44_free_consistent(struct b44 *bp)
1141 kfree(bp->rx_buffers);
1142 bp->rx_buffers = NULL;
1143 kfree(bp->tx_buffers);
1144 bp->tx_buffers = NULL;
1146 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1147 dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
1152 dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
1153 bp->rx_ring, bp->rx_ring_dma);
1155 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1158 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1159 dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
1164 dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
1165 bp->tx_ring, bp->tx_ring_dma);
1167 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1172 * Must not be invoked with interrupt sources disabled and
1173 * the hardware shutdown down. Can sleep.
1175 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1179 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1180 bp->rx_buffers = kzalloc(size, gfp);
1181 if (!bp->rx_buffers)
1184 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1185 bp->tx_buffers = kzalloc(size, gfp);
1186 if (!bp->tx_buffers)
1189 size = DMA_TABLE_BYTES;
1190 bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp);
1192 /* Allocation may have failed due to pci_alloc_consistent
1193 insisting on use of GFP_DMA, which is more restrictive
1194 than necessary... */
1195 struct dma_desc *rx_ring;
1196 dma_addr_t rx_ring_dma;
1198 rx_ring = kzalloc(size, gfp);
1202 rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
1206 if (dma_mapping_error(rx_ring_dma) ||
1207 rx_ring_dma + size > DMA_30BIT_MASK) {
1212 bp->rx_ring = rx_ring;
1213 bp->rx_ring_dma = rx_ring_dma;
1214 bp->flags |= B44_FLAG_RX_RING_HACK;
1217 bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp);
1219 /* Allocation may have failed due to dma_alloc_coherent
1220 insisting on use of GFP_DMA, which is more restrictive
1221 than necessary... */
1222 struct dma_desc *tx_ring;
1223 dma_addr_t tx_ring_dma;
1225 tx_ring = kzalloc(size, gfp);
1229 tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
1233 if (dma_mapping_error(tx_ring_dma) ||
1234 tx_ring_dma + size > DMA_30BIT_MASK) {
1239 bp->tx_ring = tx_ring;
1240 bp->tx_ring_dma = tx_ring_dma;
1241 bp->flags |= B44_FLAG_TX_RING_HACK;
1247 b44_free_consistent(bp);
1251 /* bp->lock is held. */
1252 static void b44_clear_stats(struct b44 *bp)
1256 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1257 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1259 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1263 /* bp->lock is held. */
1264 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1266 struct ssb_device *sdev = bp->sdev;
1268 if (ssb_device_is_enabled(bp->sdev)) {
1269 bw32(bp, B44_RCV_LAZY, 0);
1270 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1271 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1272 bw32(bp, B44_DMATX_CTRL, 0);
1273 bp->tx_prod = bp->tx_cons = 0;
1274 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1275 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1278 bw32(bp, B44_DMARX_CTRL, 0);
1279 bp->rx_prod = bp->rx_cons = 0;
1281 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1283 ssb_device_enable(bp->sdev, 0);
1284 b44_clear_stats(bp);
1287 * Don't enable PHY if we are doing a partial reset
1288 * we are probably going to power down
1290 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1293 switch (sdev->bus->bustype) {
1294 case SSB_BUSTYPE_SSB:
1295 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296 (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
1297 & MDIO_CTRL_MAXF_MASK)));
1299 case SSB_BUSTYPE_PCI:
1300 case SSB_BUSTYPE_PCMCIA:
1301 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1302 (0x0d & MDIO_CTRL_MAXF_MASK)));
1306 br32(bp, B44_MDIO_CTRL);
1308 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1309 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1310 br32(bp, B44_ENET_CTRL);
1311 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1313 u32 val = br32(bp, B44_DEVCTRL);
1315 if (val & DEVCTRL_EPR) {
1316 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1317 br32(bp, B44_DEVCTRL);
1320 bp->flags |= B44_FLAG_INTERNAL_PHY;
1324 /* bp->lock is held. */
1325 static void b44_halt(struct b44 *bp)
1327 b44_disable_ints(bp);
1330 /* power down PHY */
1331 printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
1332 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1333 /* now reset the chip, but without enabling the MAC&PHY
1334 * part of it. This has to be done _after_ we shut down the PHY */
1335 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1338 /* bp->lock is held. */
1339 static void __b44_set_mac_addr(struct b44 *bp)
1341 bw32(bp, B44_CAM_CTRL, 0);
1342 if (!(bp->dev->flags & IFF_PROMISC)) {
1345 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1346 val = br32(bp, B44_CAM_CTRL);
1347 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1351 static int b44_set_mac_addr(struct net_device *dev, void *p)
1353 struct b44 *bp = netdev_priv(dev);
1354 struct sockaddr *addr = p;
1357 if (netif_running(dev))
1360 if (!is_valid_ether_addr(addr->sa_data))
1363 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1365 spin_lock_irq(&bp->lock);
1367 val = br32(bp, B44_RXCONFIG);
1368 if (!(val & RXCONFIG_CAM_ABSENT))
1369 __b44_set_mac_addr(bp);
1371 spin_unlock_irq(&bp->lock);
1376 /* Called at device open time to get the chip ready for
1377 * packet processing. Invoked with bp->lock held.
1379 static void __b44_set_rx_mode(struct net_device *);
1380 static void b44_init_hw(struct b44 *bp, int reset_kind)
1384 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1385 if (reset_kind == B44_FULL_RESET) {
1390 /* Enable CRC32, set proper LED modes and power on PHY */
1391 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1392 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1394 /* This sets the MAC address too. */
1395 __b44_set_rx_mode(bp->dev);
1397 /* MTU + eth header + possible VLAN tag + struct rx_header */
1398 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1399 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1401 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1402 if (reset_kind == B44_PARTIAL_RESET) {
1403 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1406 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1407 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1408 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1409 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1410 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1412 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1413 bp->rx_prod = bp->rx_pending;
1415 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1418 val = br32(bp, B44_ENET_CTRL);
1419 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1422 static int b44_open(struct net_device *dev)
1424 struct b44 *bp = netdev_priv(dev);
1427 err = b44_alloc_consistent(bp, GFP_KERNEL);
1431 napi_enable(&bp->napi);
1434 b44_init_hw(bp, B44_FULL_RESET);
1438 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1439 if (unlikely(err < 0)) {
1440 napi_disable(&bp->napi);
1441 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1443 b44_free_consistent(bp);
1447 init_timer(&bp->timer);
1448 bp->timer.expires = jiffies + HZ;
1449 bp->timer.data = (unsigned long) bp;
1450 bp->timer.function = b44_timer;
1451 add_timer(&bp->timer);
1453 b44_enable_ints(bp);
1454 netif_start_queue(dev);
1459 #ifdef CONFIG_NET_POLL_CONTROLLER
1461 * Polling receive - used by netconsole and other diagnostic tools
1462 * to allow network i/o with interrupts disabled.
1464 static void b44_poll_controller(struct net_device *dev)
1466 disable_irq(dev->irq);
1467 b44_interrupt(dev->irq, dev);
1468 enable_irq(dev->irq);
1472 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1475 u32 *pattern = (u32 *) pp;
1477 for (i = 0; i < bytes; i += sizeof(u32)) {
1478 bw32(bp, B44_FILT_ADDR, table_offset + i);
1479 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1483 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1486 int k, j, len = offset;
1487 int ethaddr_bytes = ETH_ALEN;
1489 memset(ppattern + offset, 0xff, magicsync);
1490 for (j = 0; j < magicsync; j++)
1491 set_bit(len++, (unsigned long *) pmask);
1493 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1494 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1495 ethaddr_bytes = ETH_ALEN;
1497 ethaddr_bytes = B44_PATTERN_SIZE - len;
1498 if (ethaddr_bytes <=0)
1500 for (k = 0; k< ethaddr_bytes; k++) {
1501 ppattern[offset + magicsync +
1502 (j * ETH_ALEN) + k] = macaddr[k];
1504 set_bit(len, (unsigned long *) pmask);
1510 /* Setup magic packet patterns in the b44 WOL
1511 * pattern matching filter.
1513 static void b44_setup_pseudo_magicp(struct b44 *bp)
1517 int plen0, plen1, plen2;
1519 u8 pwol_mask[B44_PMASK_SIZE];
1521 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1522 if (!pwol_pattern) {
1523 printk(KERN_ERR PFX "Memory not available for WOL\n");
1527 /* Ipv4 magic packet pattern - pattern 0.*/
1528 memset(pwol_mask, 0, B44_PMASK_SIZE);
1529 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1530 B44_ETHIPV4UDP_HLEN);
1532 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1533 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1535 /* Raw ethernet II magic packet pattern - pattern 1 */
1536 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1537 memset(pwol_mask, 0, B44_PMASK_SIZE);
1538 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1541 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1542 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1543 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1544 B44_PMASK_BASE + B44_PMASK_SIZE);
1546 /* Ipv6 magic packet pattern - pattern 2 */
1547 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1548 memset(pwol_mask, 0, B44_PMASK_SIZE);
1549 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1550 B44_ETHIPV6UDP_HLEN);
1552 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1553 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1554 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1555 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1557 kfree(pwol_pattern);
1559 /* set these pattern's lengths: one less than each real length */
1560 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1561 bw32(bp, B44_WKUP_LEN, val);
1563 /* enable wakeup pattern matching */
1564 val = br32(bp, B44_DEVCTRL);
1565 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1569 #ifdef CONFIG_B44_PCI
1570 static void b44_setup_wol_pci(struct b44 *bp)
1574 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1575 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1576 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1577 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1581 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1582 #endif /* CONFIG_B44_PCI */
1584 static void b44_setup_wol(struct b44 *bp)
1588 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1590 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1592 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1594 val = bp->dev->dev_addr[2] << 24 |
1595 bp->dev->dev_addr[3] << 16 |
1596 bp->dev->dev_addr[4] << 8 |
1597 bp->dev->dev_addr[5];
1598 bw32(bp, B44_ADDR_LO, val);
1600 val = bp->dev->dev_addr[0] << 8 |
1601 bp->dev->dev_addr[1];
1602 bw32(bp, B44_ADDR_HI, val);
1604 val = br32(bp, B44_DEVCTRL);
1605 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1608 b44_setup_pseudo_magicp(bp);
1610 b44_setup_wol_pci(bp);
1613 static int b44_close(struct net_device *dev)
1615 struct b44 *bp = netdev_priv(dev);
1617 netif_stop_queue(dev);
1619 napi_disable(&bp->napi);
1621 del_timer_sync(&bp->timer);
1623 spin_lock_irq(&bp->lock);
1627 netif_carrier_off(dev);
1629 spin_unlock_irq(&bp->lock);
1631 free_irq(dev->irq, dev);
1633 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1634 b44_init_hw(bp, B44_PARTIAL_RESET);
1638 b44_free_consistent(bp);
1643 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1645 struct b44 *bp = netdev_priv(dev);
1646 struct net_device_stats *nstat = &bp->stats;
1647 struct b44_hw_stats *hwstat = &bp->hw_stats;
1649 /* Convert HW stats into netdevice stats. */
1650 nstat->rx_packets = hwstat->rx_pkts;
1651 nstat->tx_packets = hwstat->tx_pkts;
1652 nstat->rx_bytes = hwstat->rx_octets;
1653 nstat->tx_bytes = hwstat->tx_octets;
1654 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1655 hwstat->tx_oversize_pkts +
1656 hwstat->tx_underruns +
1657 hwstat->tx_excessive_cols +
1658 hwstat->tx_late_cols);
1659 nstat->multicast = hwstat->tx_multicast_pkts;
1660 nstat->collisions = hwstat->tx_total_cols;
1662 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1663 hwstat->rx_undersize);
1664 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1665 nstat->rx_frame_errors = hwstat->rx_align_errs;
1666 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1667 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1668 hwstat->rx_oversize_pkts +
1669 hwstat->rx_missed_pkts +
1670 hwstat->rx_crc_align_errs +
1671 hwstat->rx_undersize +
1672 hwstat->rx_crc_errs +
1673 hwstat->rx_align_errs +
1674 hwstat->rx_symbol_errs);
1676 nstat->tx_aborted_errors = hwstat->tx_underruns;
1678 /* Carrier lost counter seems to be broken for some devices */
1679 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1685 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1687 struct dev_mc_list *mclist;
1690 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1691 mclist = dev->mc_list;
1692 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1693 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1698 static void __b44_set_rx_mode(struct net_device *dev)
1700 struct b44 *bp = netdev_priv(dev);
1703 val = br32(bp, B44_RXCONFIG);
1704 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1705 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1706 val |= RXCONFIG_PROMISC;
1707 bw32(bp, B44_RXCONFIG, val);
1709 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1712 __b44_set_mac_addr(bp);
1714 if ((dev->flags & IFF_ALLMULTI) ||
1715 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1716 val |= RXCONFIG_ALLMULTI;
1718 i = __b44_load_mcast(bp, dev);
1721 __b44_cam_write(bp, zero, i);
1723 bw32(bp, B44_RXCONFIG, val);
1724 val = br32(bp, B44_CAM_CTRL);
1725 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1729 static void b44_set_rx_mode(struct net_device *dev)
1731 struct b44 *bp = netdev_priv(dev);
1733 spin_lock_irq(&bp->lock);
1734 __b44_set_rx_mode(dev);
1735 spin_unlock_irq(&bp->lock);
1738 static u32 b44_get_msglevel(struct net_device *dev)
1740 struct b44 *bp = netdev_priv(dev);
1741 return bp->msg_enable;
1744 static void b44_set_msglevel(struct net_device *dev, u32 value)
1746 struct b44 *bp = netdev_priv(dev);
1747 bp->msg_enable = value;
1750 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1752 struct b44 *bp = netdev_priv(dev);
1753 struct ssb_bus *bus = bp->sdev->bus;
1755 strncpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1756 strncpy(info->version, DRV_MODULE_VERSION, sizeof(info->driver));
1757 switch (bus->bustype) {
1758 case SSB_BUSTYPE_PCI:
1759 strncpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1761 case SSB_BUSTYPE_PCMCIA:
1762 case SSB_BUSTYPE_SSB:
1763 strncpy(info->bus_info, "SSB", sizeof(info->bus_info));
1768 static int b44_nway_reset(struct net_device *dev)
1770 struct b44 *bp = netdev_priv(dev);
1774 spin_lock_irq(&bp->lock);
1775 b44_readphy(bp, MII_BMCR, &bmcr);
1776 b44_readphy(bp, MII_BMCR, &bmcr);
1778 if (bmcr & BMCR_ANENABLE) {
1779 b44_writephy(bp, MII_BMCR,
1780 bmcr | BMCR_ANRESTART);
1783 spin_unlock_irq(&bp->lock);
1788 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1790 struct b44 *bp = netdev_priv(dev);
1792 cmd->supported = (SUPPORTED_Autoneg);
1793 cmd->supported |= (SUPPORTED_100baseT_Half |
1794 SUPPORTED_100baseT_Full |
1795 SUPPORTED_10baseT_Half |
1796 SUPPORTED_10baseT_Full |
1799 cmd->advertising = 0;
1800 if (bp->flags & B44_FLAG_ADV_10HALF)
1801 cmd->advertising |= ADVERTISED_10baseT_Half;
1802 if (bp->flags & B44_FLAG_ADV_10FULL)
1803 cmd->advertising |= ADVERTISED_10baseT_Full;
1804 if (bp->flags & B44_FLAG_ADV_100HALF)
1805 cmd->advertising |= ADVERTISED_100baseT_Half;
1806 if (bp->flags & B44_FLAG_ADV_100FULL)
1807 cmd->advertising |= ADVERTISED_100baseT_Full;
1808 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1809 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1810 SPEED_100 : SPEED_10;
1811 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1812 DUPLEX_FULL : DUPLEX_HALF;
1814 cmd->phy_address = bp->phy_addr;
1815 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1816 XCVR_INTERNAL : XCVR_EXTERNAL;
1817 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1818 AUTONEG_DISABLE : AUTONEG_ENABLE;
1819 if (cmd->autoneg == AUTONEG_ENABLE)
1820 cmd->advertising |= ADVERTISED_Autoneg;
1821 if (!netif_running(dev)){
1830 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1832 struct b44 *bp = netdev_priv(dev);
1834 /* We do not support gigabit. */
1835 if (cmd->autoneg == AUTONEG_ENABLE) {
1836 if (cmd->advertising &
1837 (ADVERTISED_1000baseT_Half |
1838 ADVERTISED_1000baseT_Full))
1840 } else if ((cmd->speed != SPEED_100 &&
1841 cmd->speed != SPEED_10) ||
1842 (cmd->duplex != DUPLEX_HALF &&
1843 cmd->duplex != DUPLEX_FULL)) {
1847 spin_lock_irq(&bp->lock);
1849 if (cmd->autoneg == AUTONEG_ENABLE) {
1850 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1851 B44_FLAG_100_BASE_T |
1852 B44_FLAG_FULL_DUPLEX |
1853 B44_FLAG_ADV_10HALF |
1854 B44_FLAG_ADV_10FULL |
1855 B44_FLAG_ADV_100HALF |
1856 B44_FLAG_ADV_100FULL);
1857 if (cmd->advertising == 0) {
1858 bp->flags |= (B44_FLAG_ADV_10HALF |
1859 B44_FLAG_ADV_10FULL |
1860 B44_FLAG_ADV_100HALF |
1861 B44_FLAG_ADV_100FULL);
1863 if (cmd->advertising & ADVERTISED_10baseT_Half)
1864 bp->flags |= B44_FLAG_ADV_10HALF;
1865 if (cmd->advertising & ADVERTISED_10baseT_Full)
1866 bp->flags |= B44_FLAG_ADV_10FULL;
1867 if (cmd->advertising & ADVERTISED_100baseT_Half)
1868 bp->flags |= B44_FLAG_ADV_100HALF;
1869 if (cmd->advertising & ADVERTISED_100baseT_Full)
1870 bp->flags |= B44_FLAG_ADV_100FULL;
1873 bp->flags |= B44_FLAG_FORCE_LINK;
1874 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1875 if (cmd->speed == SPEED_100)
1876 bp->flags |= B44_FLAG_100_BASE_T;
1877 if (cmd->duplex == DUPLEX_FULL)
1878 bp->flags |= B44_FLAG_FULL_DUPLEX;
1881 if (netif_running(dev))
1884 spin_unlock_irq(&bp->lock);
1889 static void b44_get_ringparam(struct net_device *dev,
1890 struct ethtool_ringparam *ering)
1892 struct b44 *bp = netdev_priv(dev);
1894 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1895 ering->rx_pending = bp->rx_pending;
1897 /* XXX ethtool lacks a tx_max_pending, oops... */
1900 static int b44_set_ringparam(struct net_device *dev,
1901 struct ethtool_ringparam *ering)
1903 struct b44 *bp = netdev_priv(dev);
1905 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1906 (ering->rx_mini_pending != 0) ||
1907 (ering->rx_jumbo_pending != 0) ||
1908 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1911 spin_lock_irq(&bp->lock);
1913 bp->rx_pending = ering->rx_pending;
1914 bp->tx_pending = ering->tx_pending;
1918 b44_init_hw(bp, B44_FULL_RESET);
1919 netif_wake_queue(bp->dev);
1920 spin_unlock_irq(&bp->lock);
1922 b44_enable_ints(bp);
1927 static void b44_get_pauseparam(struct net_device *dev,
1928 struct ethtool_pauseparam *epause)
1930 struct b44 *bp = netdev_priv(dev);
1933 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1935 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1937 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1940 static int b44_set_pauseparam(struct net_device *dev,
1941 struct ethtool_pauseparam *epause)
1943 struct b44 *bp = netdev_priv(dev);
1945 spin_lock_irq(&bp->lock);
1946 if (epause->autoneg)
1947 bp->flags |= B44_FLAG_PAUSE_AUTO;
1949 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1950 if (epause->rx_pause)
1951 bp->flags |= B44_FLAG_RX_PAUSE;
1953 bp->flags &= ~B44_FLAG_RX_PAUSE;
1954 if (epause->tx_pause)
1955 bp->flags |= B44_FLAG_TX_PAUSE;
1957 bp->flags &= ~B44_FLAG_TX_PAUSE;
1958 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1961 b44_init_hw(bp, B44_FULL_RESET);
1963 __b44_set_flow_ctrl(bp, bp->flags);
1965 spin_unlock_irq(&bp->lock);
1967 b44_enable_ints(bp);
1972 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1976 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1981 static int b44_get_sset_count(struct net_device *dev, int sset)
1985 return ARRAY_SIZE(b44_gstrings);
1991 static void b44_get_ethtool_stats(struct net_device *dev,
1992 struct ethtool_stats *stats, u64 *data)
1994 struct b44 *bp = netdev_priv(dev);
1995 u32 *val = &bp->hw_stats.tx_good_octets;
1998 spin_lock_irq(&bp->lock);
2000 b44_stats_update(bp);
2002 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2005 spin_unlock_irq(&bp->lock);
2008 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2010 struct b44 *bp = netdev_priv(dev);
2012 wol->supported = WAKE_MAGIC;
2013 if (bp->flags & B44_FLAG_WOL_ENABLE)
2014 wol->wolopts = WAKE_MAGIC;
2017 memset(&wol->sopass, 0, sizeof(wol->sopass));
2020 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2022 struct b44 *bp = netdev_priv(dev);
2024 spin_lock_irq(&bp->lock);
2025 if (wol->wolopts & WAKE_MAGIC)
2026 bp->flags |= B44_FLAG_WOL_ENABLE;
2028 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2029 spin_unlock_irq(&bp->lock);
2034 static const struct ethtool_ops b44_ethtool_ops = {
2035 .get_drvinfo = b44_get_drvinfo,
2036 .get_settings = b44_get_settings,
2037 .set_settings = b44_set_settings,
2038 .nway_reset = b44_nway_reset,
2039 .get_link = ethtool_op_get_link,
2040 .get_wol = b44_get_wol,
2041 .set_wol = b44_set_wol,
2042 .get_ringparam = b44_get_ringparam,
2043 .set_ringparam = b44_set_ringparam,
2044 .get_pauseparam = b44_get_pauseparam,
2045 .set_pauseparam = b44_set_pauseparam,
2046 .get_msglevel = b44_get_msglevel,
2047 .set_msglevel = b44_set_msglevel,
2048 .get_strings = b44_get_strings,
2049 .get_sset_count = b44_get_sset_count,
2050 .get_ethtool_stats = b44_get_ethtool_stats,
2053 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2055 struct mii_ioctl_data *data = if_mii(ifr);
2056 struct b44 *bp = netdev_priv(dev);
2059 if (!netif_running(dev))
2062 spin_lock_irq(&bp->lock);
2063 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2064 spin_unlock_irq(&bp->lock);
2069 static int __devinit b44_get_invariants(struct b44 *bp)
2071 struct ssb_device *sdev = bp->sdev;
2075 bp->dma_offset = ssb_dma_translation(sdev);
2077 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2079 addr = sdev->bus->sprom.et1mac;
2080 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2082 addr = sdev->bus->sprom.et0mac;
2083 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2085 memcpy(bp->dev->dev_addr, addr, 6);
2087 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2088 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2092 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2094 bp->imask = IMASK_DEF;
2096 /* XXX - really required?
2097 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2100 if (bp->sdev->id.revision >= 7)
2101 bp->flags |= B44_FLAG_B0_ANDLATER;
2106 static int __devinit b44_init_one(struct ssb_device *sdev,
2107 const struct ssb_device_id *ent)
2109 static int b44_version_printed = 0;
2110 struct net_device *dev;
2113 DECLARE_MAC_BUF(mac);
2117 if (b44_version_printed++ == 0)
2118 printk(KERN_INFO "%s", version);
2121 dev = alloc_etherdev(sizeof(*bp));
2123 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2128 SET_NETDEV_DEV(dev, sdev->dev);
2130 /* No interesting netdevice features in this card... */
2133 bp = netdev_priv(dev);
2137 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2139 spin_lock_init(&bp->lock);
2141 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2142 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2144 dev->open = b44_open;
2145 dev->stop = b44_close;
2146 dev->hard_start_xmit = b44_start_xmit;
2147 dev->get_stats = b44_get_stats;
2148 dev->set_multicast_list = b44_set_rx_mode;
2149 dev->set_mac_address = b44_set_mac_addr;
2150 dev->do_ioctl = b44_ioctl;
2151 dev->tx_timeout = b44_tx_timeout;
2152 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2153 dev->watchdog_timeo = B44_TX_TIMEOUT;
2154 #ifdef CONFIG_NET_POLL_CONTROLLER
2155 dev->poll_controller = b44_poll_controller;
2157 dev->change_mtu = b44_change_mtu;
2158 dev->irq = sdev->irq;
2159 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2161 netif_carrier_off(dev);
2163 err = ssb_bus_powerup(sdev->bus, 0);
2166 "Failed to powerup the bus\n");
2167 goto err_out_free_dev;
2169 err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK);
2172 "Required 30BIT DMA mask unsupported by the system.\n");
2173 goto err_out_powerdown;
2175 err = b44_get_invariants(bp);
2178 "Problem fetching invariants of chip, aborting.\n");
2179 goto err_out_powerdown;
2182 bp->mii_if.dev = dev;
2183 bp->mii_if.mdio_read = b44_mii_read;
2184 bp->mii_if.mdio_write = b44_mii_write;
2185 bp->mii_if.phy_id = bp->phy_addr;
2186 bp->mii_if.phy_id_mask = 0x1f;
2187 bp->mii_if.reg_num_mask = 0x1f;
2189 /* By default, advertise all speed/duplex settings. */
2190 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2191 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2193 /* By default, auto-negotiate PAUSE. */
2194 bp->flags |= B44_FLAG_PAUSE_AUTO;
2196 err = register_netdev(dev);
2198 dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2199 goto err_out_powerdown;
2202 ssb_set_drvdata(sdev, dev);
2204 /* Chip reset provides power to the b44 MAC & PCI cores, which
2205 * is necessary for MAC register access.
2207 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2209 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %s\n",
2210 dev->name, print_mac(mac, dev->dev_addr));
2215 ssb_bus_may_powerdown(sdev->bus);
2224 static void __devexit b44_remove_one(struct ssb_device *sdev)
2226 struct net_device *dev = ssb_get_drvdata(sdev);
2228 unregister_netdev(dev);
2229 ssb_bus_may_powerdown(sdev->bus);
2231 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2232 ssb_set_drvdata(sdev, NULL);
2235 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2237 struct net_device *dev = ssb_get_drvdata(sdev);
2238 struct b44 *bp = netdev_priv(dev);
2240 if (!netif_running(dev))
2243 del_timer_sync(&bp->timer);
2245 spin_lock_irq(&bp->lock);
2248 netif_carrier_off(bp->dev);
2249 netif_device_detach(bp->dev);
2252 spin_unlock_irq(&bp->lock);
2254 free_irq(dev->irq, dev);
2255 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2256 b44_init_hw(bp, B44_PARTIAL_RESET);
2260 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2264 static int b44_resume(struct ssb_device *sdev)
2266 struct net_device *dev = ssb_get_drvdata(sdev);
2267 struct b44 *bp = netdev_priv(dev);
2270 rc = ssb_bus_powerup(sdev->bus, 0);
2273 "Failed to powerup the bus\n");
2277 if (!netif_running(dev))
2280 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2282 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2286 spin_lock_irq(&bp->lock);
2289 b44_init_hw(bp, B44_FULL_RESET);
2290 netif_device_attach(bp->dev);
2291 spin_unlock_irq(&bp->lock);
2293 b44_enable_ints(bp);
2294 netif_wake_queue(dev);
2296 mod_timer(&bp->timer, jiffies + 1);
2301 static struct ssb_driver b44_ssb_driver = {
2302 .name = DRV_MODULE_NAME,
2303 .id_table = b44_ssb_tbl,
2304 .probe = b44_init_one,
2305 .remove = __devexit_p(b44_remove_one),
2306 .suspend = b44_suspend,
2307 .resume = b44_resume,
2310 static inline int b44_pci_init(void)
2313 #ifdef CONFIG_B44_PCI
2314 err = ssb_pcihost_register(&b44_pci_driver);
2319 static inline void b44_pci_exit(void)
2321 #ifdef CONFIG_B44_PCI
2322 ssb_pcihost_unregister(&b44_pci_driver);
2326 static int __init b44_init(void)
2328 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2331 /* Setup paramaters for syncing RX/TX DMA descriptors */
2332 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2333 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2335 err = b44_pci_init();
2338 err = ssb_driver_register(&b44_ssb_driver);
2344 static void __exit b44_cleanup(void)
2346 ssb_driver_unregister(&b44_ssb_driver);
2350 module_init(b44_init);
2351 module_exit(b44_cleanup);