2 * Intel IXP4xx Ethernet driver for Linux
4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
10 * Ethernet port config (0x00 is not present on IXP42X):
12 * logical port 0x00 0x10 0x20
13 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
14 * physical PortId 2 0 1
16 * RX-free queue 26 27 28
17 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
21 * bits 0 -> 1 - NPE ID (RX and TX-done)
22 * bits 0 -> 2 - priority (TX, per 802.1D)
23 * bits 3 -> 4 - port ID (user-set?)
24 * bits 5 -> 31 - physical descriptor address
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmapool.h>
30 #include <linux/etherdevice.h>
32 #include <linux/kernel.h>
33 #include <linux/phy.h>
34 #include <linux/platform_device.h>
36 #include <mach/qmgr.h>
38 #define DEBUG_QUEUES 0
42 #define DEBUG_PKT_BYTES 0
46 #define DRV_NAME "ixp4xx_eth"
50 #define RX_DESCS 64 /* also length of all RX queues */
51 #define TX_DESCS 16 /* also length of all TX queues */
52 #define TXDONE_QUEUE_LEN 64 /* dwords */
54 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
55 #define REGS_SIZE 0x1000
56 #define MAX_MRU 1536 /* 0x600 */
57 #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
59 #define NAPI_WEIGHT 16
60 #define MDIO_INTERVAL (3 * HZ)
61 #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
62 #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
64 #define NPE_ID(port_id) ((port_id) >> 4)
65 #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
66 #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
67 #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
68 #define TXDONE_QUEUE 31
70 /* TX Control Registers */
71 #define TX_CNTRL0_TX_EN 0x01
72 #define TX_CNTRL0_HALFDUPLEX 0x02
73 #define TX_CNTRL0_RETRY 0x04
74 #define TX_CNTRL0_PAD_EN 0x08
75 #define TX_CNTRL0_APPEND_FCS 0x10
76 #define TX_CNTRL0_2DEFER 0x20
77 #define TX_CNTRL0_RMII 0x40 /* reduced MII */
78 #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
80 /* RX Control Registers */
81 #define RX_CNTRL0_RX_EN 0x01
82 #define RX_CNTRL0_PADSTRIP_EN 0x02
83 #define RX_CNTRL0_SEND_FCS 0x04
84 #define RX_CNTRL0_PAUSE_EN 0x08
85 #define RX_CNTRL0_LOOP_EN 0x10
86 #define RX_CNTRL0_ADDR_FLTR_EN 0x20
87 #define RX_CNTRL0_RX_RUNT_EN 0x40
88 #define RX_CNTRL0_BCAST_DIS 0x80
89 #define RX_CNTRL1_DEFER_EN 0x01
91 /* Core Control Register */
92 #define CORE_RESET 0x01
93 #define CORE_RX_FIFO_FLUSH 0x02
94 #define CORE_TX_FIFO_FLUSH 0x04
95 #define CORE_SEND_JAM 0x08
96 #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
98 #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
99 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
101 #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
102 #define DEFAULT_CORE_CNTRL CORE_MDC_EN
105 /* NPE message codes */
106 #define NPE_GETSTATUS 0x00
107 #define NPE_EDB_SETPORTADDRESS 0x01
108 #define NPE_EDB_GETMACADDRESSDATABASE 0x02
109 #define NPE_EDB_SETMACADDRESSSDATABASE 0x03
110 #define NPE_GETSTATS 0x04
111 #define NPE_RESETSTATS 0x05
112 #define NPE_SETMAXFRAMELENGTHS 0x06
113 #define NPE_VLAN_SETRXTAGMODE 0x07
114 #define NPE_VLAN_SETDEFAULTRXVID 0x08
115 #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
116 #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
117 #define NPE_VLAN_SETRXQOSENTRY 0x0B
118 #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
119 #define NPE_STP_SETBLOCKINGSTATE 0x0D
120 #define NPE_FW_SETFIREWALLMODE 0x0E
121 #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
122 #define NPE_PC_SETAPMACTABLE 0x11
123 #define NPE_SETLOOPBACK_MODE 0x12
124 #define NPE_PC_SETBSSIDTABLE 0x13
125 #define NPE_ADDRESS_FILTER_CONFIG 0x14
126 #define NPE_APPENDFCSCONFIG 0x15
127 #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
128 #define NPE_MAC_RECOVERY_START 0x17
132 typedef struct sk_buff buffer_t;
133 #define free_buffer dev_kfree_skb
134 #define free_buffer_irq dev_kfree_skb_irq
136 typedef void buffer_t;
137 #define free_buffer kfree
138 #define free_buffer_irq kfree
142 u32 tx_control[2], __res1[2]; /* 000 */
143 u32 rx_control[2], __res2[2]; /* 010 */
144 u32 random_seed, __res3[3]; /* 020 */
145 u32 partial_empty_threshold, __res4; /* 030 */
146 u32 partial_full_threshold, __res5; /* 038 */
147 u32 tx_start_bytes, __res6[3]; /* 040 */
148 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
149 u32 tx_2part_deferral[2], __res8[2]; /* 060 */
150 u32 slot_time, __res9[3]; /* 070 */
151 u32 mdio_command[4]; /* 080 */
152 u32 mdio_status[4]; /* 090 */
153 u32 mcast_mask[6], __res10[2]; /* 0A0 */
154 u32 mcast_addr[6], __res11[2]; /* 0C0 */
155 u32 int_clock_threshold, __res12[3]; /* 0E0 */
156 u32 hw_addr[6], __res13[61]; /* 0F0 */
157 u32 core_control; /* 1FC */
161 struct resource *mem_res;
162 struct eth_regs __iomem *regs;
164 struct net_device *netdev;
165 struct napi_struct napi;
166 struct phy_device *phydev;
167 struct eth_plat_info *plat;
168 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
169 struct desc *desc_tab; /* coherent */
171 int id; /* logical port ID */
175 /* NPE message structure */
178 u8 cmd, eth_id, byte2, byte3;
179 u8 byte4, byte5, byte6, byte7;
181 u8 byte3, byte2, eth_id, cmd;
182 u8 byte7, byte6, byte5, byte4;
186 /* Ethernet packet descriptor */
188 u32 next; /* pointer to next buffer, unused */
191 u16 buf_len; /* buffer length */
192 u16 pkt_len; /* packet length */
193 u32 data; /* pointer to data buffer in RAM */
201 u16 pkt_len; /* packet length */
202 u16 buf_len; /* buffer length */
203 u32 data; /* pointer to data buffer in RAM */
213 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
214 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
215 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
217 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
218 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
219 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
224 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
225 (n) * sizeof(struct desc))
226 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
228 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
229 ((n) + RX_DESCS) * sizeof(struct desc))
230 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
233 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
236 for (i = 0; i < cnt; i++)
237 dest[i] = swab32(src[i]);
241 static spinlock_t mdio_lock;
242 static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
243 struct mii_bus *mdio_bus;
244 static int ports_open;
245 static struct port *npe_port_tab[MAX_NPES];
246 static struct dma_pool *dma_pool;
249 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
254 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
255 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
260 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
261 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
263 __raw_writel(((phy_id << 5) | location) & 0xFF,
264 &mdio_regs->mdio_command[2]);
265 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
266 &mdio_regs->mdio_command[3]);
268 while ((cycles < MAX_MDIO_RETRIES) &&
269 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
274 if (cycles == MAX_MDIO_RETRIES) {
275 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
281 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
282 phy_id, write ? "write" : "read", cycles);
288 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
290 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
293 return 0xFFFF; /* don't return error */
296 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
297 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
300 static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
305 spin_lock_irqsave(&mdio_lock, flags);
306 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
307 spin_unlock_irqrestore(&mdio_lock, flags);
309 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
310 phy_id, location, ret);
315 static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
321 spin_lock_irqsave(&mdio_lock, flags);
322 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
323 spin_unlock_irqrestore(&mdio_lock, flags);
325 printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
326 bus->name, phy_id, location, val, ret);
331 static int ixp4xx_mdio_register(void)
335 if (!(mdio_bus = mdiobus_alloc()))
338 /* All MII PHY accesses use NPE-B Ethernet registers */
339 spin_lock_init(&mdio_lock);
340 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
341 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
343 mdio_bus->name = "IXP4xx MII Bus";
344 mdio_bus->read = &ixp4xx_mdio_read;
345 mdio_bus->write = &ixp4xx_mdio_write;
346 strcpy(mdio_bus->id, "0");
348 if ((err = mdiobus_register(mdio_bus)))
349 mdiobus_free(mdio_bus);
353 static void ixp4xx_mdio_remove(void)
355 mdiobus_unregister(mdio_bus);
356 mdiobus_free(mdio_bus);
360 static void ixp4xx_adjust_link(struct net_device *dev)
362 struct port *port = netdev_priv(dev);
363 struct phy_device *phydev = port->phydev;
368 printk(KERN_INFO "%s: link down\n", dev->name);
373 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
376 port->speed = phydev->speed;
377 port->duplex = phydev->duplex;
380 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
381 &port->regs->tx_control[0]);
383 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
384 &port->regs->tx_control[0]);
386 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
387 dev->name, port->speed, port->duplex ? "full" : "half");
391 static inline void debug_pkt(struct net_device *dev, const char *func,
397 printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
398 for (i = 0; i < len; i++) {
399 if (i >= DEBUG_PKT_BYTES)
402 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
410 static inline void debug_desc(u32 phys, struct desc *desc)
413 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
414 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
415 phys, desc->next, desc->buf_len, desc->pkt_len,
416 desc->data, desc->dest_id, desc->src_id, desc->flags,
417 desc->qos, desc->padlen, desc->vlan_tci,
418 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
419 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
420 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
421 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
425 static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
432 { TX_QUEUE(0x10), "TX#0 " },
433 { TX_QUEUE(0x20), "TX#1 " },
434 { TX_QUEUE(0x00), "TX#2 " },
435 { RXFREE_QUEUE(0x10), "RX-free#0 " },
436 { RXFREE_QUEUE(0x20), "RX-free#1 " },
437 { RXFREE_QUEUE(0x00), "RX-free#2 " },
438 { TXDONE_QUEUE, "TX-done " },
442 for (i = 0; i < ARRAY_SIZE(names); i++)
443 if (names[i].queue == queue)
446 printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
447 i < ARRAY_SIZE(names) ? names[i].name : "",
448 is_get ? "->" : "<-", phys);
452 static inline u32 queue_get_entry(unsigned int queue)
454 u32 phys = qmgr_get_entry(queue);
455 debug_queue(queue, 1, phys);
459 static inline int queue_get_desc(unsigned int queue, struct port *port,
462 u32 phys, tab_phys, n_desc;
465 if (!(phys = queue_get_entry(queue)))
468 phys &= ~0x1F; /* mask out non-address bits */
469 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
470 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
471 n_desc = (phys - tab_phys) / sizeof(struct desc);
472 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
473 debug_desc(phys, &tab[n_desc]);
474 BUG_ON(tab[n_desc].next);
478 static inline void queue_put_desc(unsigned int queue, u32 phys,
481 debug_queue(queue, 0, phys);
482 debug_desc(phys, desc);
484 qmgr_put_entry(queue, phys);
485 BUG_ON(qmgr_stat_overflow(queue));
489 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
492 dma_unmap_single(&port->netdev->dev, desc->data,
493 desc->buf_len, DMA_TO_DEVICE);
495 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
496 ALIGN((desc->data & 3) + desc->buf_len, 4),
502 static void eth_rx_irq(void *pdev)
504 struct net_device *dev = pdev;
505 struct port *port = netdev_priv(dev);
508 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
510 qmgr_disable_irq(port->plat->rxq);
511 netif_rx_schedule(dev, &port->napi);
514 static int eth_poll(struct napi_struct *napi, int budget)
516 struct port *port = container_of(napi, struct port, napi);
517 struct net_device *dev = port->netdev;
518 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
522 printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
525 while (received < budget) {
530 struct sk_buff *temp;
534 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
536 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
539 netif_rx_complete(dev, napi);
540 qmgr_enable_irq(rxq);
541 if (!qmgr_stat_empty(rxq) &&
542 netif_rx_reschedule(dev, napi)) {
544 printk(KERN_DEBUG "%s: eth_poll"
545 " netif_rx_reschedule successed\n",
548 qmgr_disable_irq(rxq);
552 printk(KERN_DEBUG "%s: eth_poll all done\n",
555 return received; /* all work done */
558 desc = rx_desc_ptr(port, n);
561 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
562 phys = dma_map_single(&dev->dev, skb->data,
563 RX_BUFF_SIZE, DMA_FROM_DEVICE);
564 if (dma_mapping_error(&dev->dev, phys)) {
570 skb = netdev_alloc_skb(dev,
571 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
575 dev->stats.rx_dropped++;
576 /* put the desc back on RX-ready queue */
577 desc->buf_len = MAX_MRU;
579 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
583 /* process received frame */
586 skb = port->rx_buff_tab[n];
587 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
588 RX_BUFF_SIZE, DMA_FROM_DEVICE);
590 dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
591 RX_BUFF_SIZE, DMA_FROM_DEVICE);
592 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
593 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
595 skb_reserve(skb, NET_IP_ALIGN);
596 skb_put(skb, desc->pkt_len);
598 debug_pkt(dev, "eth_poll", skb->data, skb->len);
600 skb->protocol = eth_type_trans(skb, dev);
601 dev->stats.rx_packets++;
602 dev->stats.rx_bytes += skb->len;
603 netif_receive_skb(skb);
605 /* put the new buffer on RX-free queue */
607 port->rx_buff_tab[n] = temp;
608 desc->data = phys + NET_IP_ALIGN;
610 desc->buf_len = MAX_MRU;
612 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
617 printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
619 return received; /* not all work done */
623 static void eth_txdone_irq(void *unused)
628 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
630 while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
637 BUG_ON(npe_id >= MAX_NPES);
638 port = npe_port_tab[npe_id];
640 phys &= ~0x1F; /* mask out non-address bits */
641 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
642 BUG_ON(n_desc >= TX_DESCS);
643 desc = tx_desc_ptr(port, n_desc);
644 debug_desc(phys, desc);
646 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
647 port->netdev->stats.tx_packets++;
648 port->netdev->stats.tx_bytes += desc->pkt_len;
650 dma_unmap_tx(port, desc);
652 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
653 port->netdev->name, port->tx_buff_tab[n_desc]);
655 free_buffer_irq(port->tx_buff_tab[n_desc]);
656 port->tx_buff_tab[n_desc] = NULL;
659 start = qmgr_stat_empty(port->plat->txreadyq);
660 queue_put_desc(port->plat->txreadyq, phys, desc);
663 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
666 netif_wake_queue(port->netdev);
671 static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
673 struct port *port = netdev_priv(dev);
674 unsigned int txreadyq = port->plat->txreadyq;
675 int len, offset, bytes, n;
681 printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
684 if (unlikely(skb->len > MAX_MRU)) {
686 dev->stats.tx_errors++;
690 debug_pkt(dev, "eth_xmit", skb->data, skb->len);
694 offset = 0; /* no need to keep alignment */
698 offset = (int)skb->data & 3; /* keep 32-bit alignment */
699 bytes = ALIGN(offset + len, 4);
700 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
702 dev->stats.tx_dropped++;
705 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
709 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
710 if (dma_mapping_error(&dev->dev, phys)) {
716 dev->stats.tx_dropped++;
720 n = queue_get_desc(txreadyq, port, 1);
722 desc = tx_desc_ptr(port, n);
725 port->tx_buff_tab[n] = skb;
727 port->tx_buff_tab[n] = mem;
729 desc->data = phys + offset;
730 desc->buf_len = desc->pkt_len = len;
732 /* NPE firmware pads short frames with zeros internally */
734 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
735 dev->trans_start = jiffies;
737 if (qmgr_stat_empty(txreadyq)) {
739 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
741 netif_stop_queue(dev);
742 /* we could miss TX ready interrupt */
743 if (!qmgr_stat_empty(txreadyq)) {
745 printk(KERN_DEBUG "%s: eth_xmit ready again\n",
748 netif_wake_queue(dev);
753 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
759 static void eth_set_mcast_list(struct net_device *dev)
761 struct port *port = netdev_priv(dev);
762 struct dev_mc_list *mclist = dev->mc_list;
763 u8 diffs[ETH_ALEN], *addr;
764 int cnt = dev->mc_count, i;
766 if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
767 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
768 &port->regs->rx_control[0]);
772 memset(diffs, 0, ETH_ALEN);
773 addr = mclist->dmi_addr; /* first MAC address */
775 while (--cnt && (mclist = mclist->next))
776 for (i = 0; i < ETH_ALEN; i++)
777 diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
779 for (i = 0; i < ETH_ALEN; i++) {
780 __raw_writel(addr[i], &port->regs->mcast_addr[i]);
781 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
784 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
785 &port->regs->rx_control[0]);
789 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
791 struct port *port = netdev_priv(dev);
793 if (!netif_running(dev))
795 return phy_mii_ioctl(port->phydev, if_mii(req), cmd);
799 static int request_queues(struct port *port)
803 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
807 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
811 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
815 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
819 /* TX-done queue handles skbs sent out by the NPEs */
821 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
828 qmgr_release_queue(port->plat->txreadyq);
830 qmgr_release_queue(TX_QUEUE(port->id));
832 qmgr_release_queue(port->plat->rxq);
834 qmgr_release_queue(RXFREE_QUEUE(port->id));
835 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
840 static void release_queues(struct port *port)
842 qmgr_release_queue(RXFREE_QUEUE(port->id));
843 qmgr_release_queue(port->plat->rxq);
844 qmgr_release_queue(TX_QUEUE(port->id));
845 qmgr_release_queue(port->plat->txreadyq);
848 qmgr_release_queue(TXDONE_QUEUE);
851 static int init_queues(struct port *port)
856 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
857 POOL_ALLOC_SIZE, 32, 0)))
860 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
861 &port->desc_tab_phys)))
863 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
864 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
865 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
867 /* Setup RX buffers */
868 for (i = 0; i < RX_DESCS; i++) {
869 struct desc *desc = rx_desc_ptr(port, i);
870 buffer_t *buff; /* skb or kmalloc()ated memory */
873 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
877 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
881 desc->buf_len = MAX_MRU;
882 desc->data = dma_map_single(&port->netdev->dev, data,
883 RX_BUFF_SIZE, DMA_FROM_DEVICE);
884 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
888 desc->data += NET_IP_ALIGN;
889 port->rx_buff_tab[i] = buff;
895 static void destroy_queues(struct port *port)
899 if (port->desc_tab) {
900 for (i = 0; i < RX_DESCS; i++) {
901 struct desc *desc = rx_desc_ptr(port, i);
902 buffer_t *buff = port->rx_buff_tab[i];
904 dma_unmap_single(&port->netdev->dev,
905 desc->data - NET_IP_ALIGN,
906 RX_BUFF_SIZE, DMA_FROM_DEVICE);
910 for (i = 0; i < TX_DESCS; i++) {
911 struct desc *desc = tx_desc_ptr(port, i);
912 buffer_t *buff = port->tx_buff_tab[i];
914 dma_unmap_tx(port, desc);
918 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
919 port->desc_tab = NULL;
922 if (!ports_open && dma_pool) {
923 dma_pool_destroy(dma_pool);
928 static int eth_open(struct net_device *dev)
930 struct port *port = netdev_priv(dev);
931 struct npe *npe = port->npe;
935 if (!npe_running(npe)) {
936 err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
940 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
941 printk(KERN_ERR "%s: %s not responding\n", dev->name,
947 memset(&msg, 0, sizeof(msg));
948 msg.cmd = NPE_VLAN_SETRXQOSENTRY;
949 msg.eth_id = port->id;
950 msg.byte5 = port->plat->rxq | 0x80;
951 msg.byte7 = port->plat->rxq << 4;
952 for (i = 0; i < 8; i++) {
954 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
958 msg.cmd = NPE_EDB_SETPORTADDRESS;
959 msg.eth_id = PHYSICAL_ID(port->id);
960 msg.byte2 = dev->dev_addr[0];
961 msg.byte3 = dev->dev_addr[1];
962 msg.byte4 = dev->dev_addr[2];
963 msg.byte5 = dev->dev_addr[3];
964 msg.byte6 = dev->dev_addr[4];
965 msg.byte7 = dev->dev_addr[5];
966 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
969 memset(&msg, 0, sizeof(msg));
970 msg.cmd = NPE_FW_SETFIREWALLMODE;
971 msg.eth_id = port->id;
972 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
975 if ((err = request_queues(port)) != 0)
978 if ((err = init_queues(port)) != 0) {
979 destroy_queues(port);
980 release_queues(port);
984 port->speed = 0; /* force "link up" message */
985 phy_start(port->phydev);
987 for (i = 0; i < ETH_ALEN; i++)
988 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
989 __raw_writel(0x08, &port->regs->random_seed);
990 __raw_writel(0x12, &port->regs->partial_empty_threshold);
991 __raw_writel(0x30, &port->regs->partial_full_threshold);
992 __raw_writel(0x08, &port->regs->tx_start_bytes);
993 __raw_writel(0x15, &port->regs->tx_deferral);
994 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
995 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
996 __raw_writel(0x80, &port->regs->slot_time);
997 __raw_writel(0x01, &port->regs->int_clock_threshold);
999 /* Populate queues with buffers, no failure after this point */
1000 for (i = 0; i < TX_DESCS; i++)
1001 queue_put_desc(port->plat->txreadyq,
1002 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1004 for (i = 0; i < RX_DESCS; i++)
1005 queue_put_desc(RXFREE_QUEUE(port->id),
1006 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1008 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1009 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1010 __raw_writel(0, &port->regs->rx_control[1]);
1011 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1013 napi_enable(&port->napi);
1014 eth_set_mcast_list(dev);
1015 netif_start_queue(dev);
1017 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1020 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
1021 eth_txdone_irq, NULL);
1022 qmgr_enable_irq(TXDONE_QUEUE);
1025 /* we may already have RX data, enables IRQ */
1026 netif_rx_schedule(dev, &port->napi);
1030 static int eth_close(struct net_device *dev)
1032 struct port *port = netdev_priv(dev);
1034 int buffs = RX_DESCS; /* allocated RX buffers */
1038 qmgr_disable_irq(port->plat->rxq);
1039 napi_disable(&port->napi);
1040 netif_stop_queue(dev);
1042 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1045 memset(&msg, 0, sizeof(msg));
1046 msg.cmd = NPE_SETLOOPBACK_MODE;
1047 msg.eth_id = port->id;
1049 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1050 printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
1053 do { /* drain RX buffers */
1054 while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1058 if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1059 /* we have to inject some packet */
1062 int n = queue_get_desc(port->plat->txreadyq, port, 1);
1064 desc = tx_desc_ptr(port, n);
1065 phys = tx_desc_phys(port, n);
1066 desc->buf_len = desc->pkt_len = 1;
1068 queue_put_desc(TX_QUEUE(port->id), phys, desc);
1071 } while (++i < MAX_CLOSE_WAIT);
1074 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1075 " left in NPE\n", dev->name, buffs);
1078 printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
1082 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1083 buffs--; /* cancel TX */
1087 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1091 } while (++i < MAX_CLOSE_WAIT);
1094 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1095 "left in NPE\n", dev->name, buffs);
1098 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1102 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1103 printk(KERN_CRIT "%s: unable to disable loopback\n",
1106 phy_stop(port->phydev);
1109 qmgr_disable_irq(TXDONE_QUEUE);
1110 destroy_queues(port);
1111 release_queues(port);
1115 static int __devinit eth_init_one(struct platform_device *pdev)
1118 struct net_device *dev;
1119 struct eth_plat_info *plat = pdev->dev.platform_data;
1121 char phy_id[BUS_ID_SIZE];
1124 if (!(dev = alloc_etherdev(sizeof(struct port))))
1127 SET_NETDEV_DEV(dev, &pdev->dev);
1128 port = netdev_priv(dev);
1130 port->id = pdev->id;
1133 case IXP4XX_ETH_NPEA:
1134 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
1135 regs_phys = IXP4XX_EthA_BASE_PHYS;
1137 case IXP4XX_ETH_NPEB:
1138 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1139 regs_phys = IXP4XX_EthB_BASE_PHYS;
1141 case IXP4XX_ETH_NPEC:
1142 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
1143 regs_phys = IXP4XX_EthC_BASE_PHYS;
1150 dev->open = eth_open;
1151 dev->hard_start_xmit = eth_xmit;
1152 dev->stop = eth_close;
1153 dev->do_ioctl = eth_ioctl;
1154 dev->set_multicast_list = eth_set_mcast_list;
1155 dev->tx_queue_len = 100;
1157 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
1159 if (!(port->npe = npe_request(NPE_ID(port->id)))) {
1164 if (register_netdev(dev)) {
1169 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
1170 if (!port->mem_res) {
1176 npe_port_tab[NPE_ID(port->id)] = port;
1177 memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
1179 platform_set_drvdata(pdev, dev);
1181 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
1182 &port->regs->core_control);
1184 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1187 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
1188 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
1189 PHY_INTERFACE_MODE_MII);
1190 if (IS_ERR(port->phydev)) {
1191 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
1192 return PTR_ERR(port->phydev);
1195 port->phydev->irq = PHY_POLL;
1197 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1198 npe_name(port->npe));
1203 unregister_netdev(dev);
1205 npe_release(port->npe);
1211 static int __devexit eth_remove_one(struct platform_device *pdev)
1213 struct net_device *dev = platform_get_drvdata(pdev);
1214 struct port *port = netdev_priv(dev);
1216 unregister_netdev(dev);
1217 npe_port_tab[NPE_ID(port->id)] = NULL;
1218 platform_set_drvdata(pdev, NULL);
1219 npe_release(port->npe);
1220 release_resource(port->mem_res);
1225 static struct platform_driver ixp4xx_eth_driver = {
1226 .driver.name = DRV_NAME,
1227 .probe = eth_init_one,
1228 .remove = eth_remove_one,
1231 static int __init eth_init_module(void)
1234 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1237 if ((err = ixp4xx_mdio_register()))
1239 return platform_driver_register(&ixp4xx_eth_driver);
1242 static void __exit eth_cleanup_module(void)
1244 platform_driver_unregister(&ixp4xx_eth_driver);
1245 ixp4xx_mdio_remove();
1248 MODULE_AUTHOR("Krzysztof Halasa");
1249 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
1250 MODULE_LICENSE("GPL v2");
1251 MODULE_ALIAS("platform:ixp4xx_eth");
1252 module_init(eth_init_module);
1253 module_exit(eth_cleanup_module);