1 /* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
3 * (c) Copyright 1998 Red Hat Software Inc
5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
12 * Thanks to 3Com for making this possible by providing me with the
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
20 #define DRV_NAME "3c527"
21 #define DRV_VERSION "0.7-SMP"
22 #define DRV_RELDATE "2003/09/21"
24 static const char *version =
25 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
28 * DOC: Traps for the unwary
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
37 * Setting the SAV BP bit does not save bad packets, but
38 * only enables RX on-card stats collection.
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
44 * DOC: Theory Of Operation
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for inital setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
73 * implementation was made necessary --- see mc32_update_stats().
77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
85 #include <linux/module.h>
87 #include <linux/errno.h>
88 #include <linux/netdevice.h>
89 #include <linux/etherdevice.h>
90 #include <linux/if_ether.h>
91 #include <linux/init.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/mca-legacy.h>
97 #include <linux/ioport.h>
99 #include <linux/skbuff.h>
100 #include <linux/slab.h>
101 #include <linux/string.h>
102 #include <linux/wait.h>
103 #include <linux/ethtool.h>
104 #include <linux/completion.h>
105 #include <linux/bitops.h>
106 #include <linux/semaphore.h>
108 #include <asm/uaccess.h>
109 #include <asm/system.h>
115 MODULE_LICENSE("GPL");
118 * The name of the card. Is used for messages and in the requests for
119 * io regions, irqs and dma channels
121 static const char* cardname = DRV_NAME;
123 /* use 0 for production, 1 for verification, >2 for debug */
130 static unsigned int mc32_debug = NET_DEBUG;
132 /* The number of low I/O ports used by the ethercard. */
133 #define MC32_IO_EXTENT 8
135 /* As implemented, values must be a power-of-2 -- 4/8/16/32 */
136 #define TX_RING_LEN 32 /* Typically the card supports 37 */
137 #define RX_RING_LEN 8 /* " " " */
139 /* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature. */
141 #define RX_COPYBREAK 200 /* Value from 3c59x.c */
143 /* Issue the 82586 workaround command - this is for "busy lans", but
144 * basically means for all lans now days - has a performance (latency)
145 * cost, but best set. */
146 static const int WORKAROUND_82586=1;
148 /* Pointers to buffers and their on-card records */
149 struct mc32_ring_desc
151 volatile struct skb_header *p;
155 /* Information that needs to be kept for each board. */
161 volatile struct mc32_mailbox *rx_box;
162 volatile struct mc32_mailbox *tx_box;
163 volatile struct mc32_mailbox *exec_box;
164 volatile struct mc32_stats *stats; /* Start of on-card statistics */
165 u16 tx_chain; /* Transmit list start offset */
166 u16 rx_chain; /* Receive list start offset */
167 u16 tx_len; /* Transmit list count */
168 u16 rx_len; /* Receive list count */
170 u16 xceiver_desired_state; /* HALTED or RUNNING */
171 u16 cmd_nonblocking; /* Thread is uninterested in command result */
172 u16 mc_reload_wait; /* A multicast load request is pending */
173 u32 mc_list_valid; /* True when the mclist is set */
175 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
176 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
178 atomic_t tx_count; /* buffers left */
179 atomic_t tx_ring_head; /* index to tx en-queue end */
180 u16 tx_ring_tail; /* index to tx de-queue end */
182 u16 rx_ring_tail; /* index to rx de-queue end */
184 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
185 struct completion execution_cmd; /* Card has completed an execute command */
186 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
189 /* The station (ethernet) address prefix, used for a sanity check. */
190 #define SA_ADDR0 0x02
191 #define SA_ADDR1 0x60
192 #define SA_ADDR2 0xAC
194 struct mca_adapters_t {
199 static const struct mca_adapters_t mc32_adapters[] = {
200 { 0x0041, "3COM EtherLink MC/32" },
201 { 0x8EF5, "IBM High Performance Lan Adapter" },
206 /* Macros for ring index manipulations */
207 static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
208 static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
210 static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
213 /* Index to functions, as function prototypes. */
214 static int mc32_probe1(struct net_device *dev, int ioaddr);
215 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
216 static int mc32_open(struct net_device *dev);
217 static void mc32_timeout(struct net_device *dev);
218 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
219 static irqreturn_t mc32_interrupt(int irq, void *dev_id);
220 static int mc32_close(struct net_device *dev);
221 static struct net_device_stats *mc32_get_stats(struct net_device *dev);
222 static void mc32_set_multicast_list(struct net_device *dev);
223 static void mc32_reset_multicast_list(struct net_device *dev);
224 static const struct ethtool_ops netdev_ethtool_ops;
226 static void cleanup_card(struct net_device *dev)
228 struct mc32_local *lp = netdev_priv(dev);
229 unsigned slot = lp->slot;
230 mca_mark_as_unused(slot);
231 mca_set_adapter_name(slot, NULL);
232 free_irq(dev->irq, dev);
233 release_region(dev->base_addr, MC32_IO_EXTENT);
237 * mc32_probe - Search for supported boards
238 * @unit: interface number to use
240 * Because MCA bus is a real bus and we can scan for cards we could do a
241 * single scan for all boards here. Right now we use the passed in device
242 * structure and scan for only one board. This needs fixing for modules
246 struct net_device *__init mc32_probe(int unit)
248 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
249 static int current_mca_slot = -1;
254 return ERR_PTR(-ENOMEM);
257 sprintf(dev->name, "eth%d", unit);
259 /* Do not check any supplied i/o locations.
260 POS registers usually don't fail :) */
262 /* MCA cards have POS registers.
263 Autodetecting MCA cards is extremely simple.
264 Just search for the card. */
266 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
268 mca_find_unused_adapter(mc32_adapters[i].id, 0);
270 if(current_mca_slot != MCA_NOTFOUND) {
271 if(!mc32_probe1(dev, current_mca_slot))
273 mca_set_adapter_name(current_mca_slot,
274 mc32_adapters[i].name);
275 mca_mark_as_used(current_mca_slot);
276 err = register_netdev(dev);
288 return ERR_PTR(-ENODEV);
291 static const struct net_device_ops netdev_ops = {
292 .ndo_open = mc32_open,
293 .ndo_stop = mc32_close,
294 .ndo_start_xmit = mc32_send_packet,
295 .ndo_get_stats = mc32_get_stats,
296 .ndo_set_multicast_list = mc32_set_multicast_list,
297 .ndo_tx_timeout = mc32_timeout,
298 .ndo_change_mtu = eth_change_mtu,
299 .ndo_set_mac_address = eth_mac_addr,
300 .ndo_validate_addr = eth_validate_addr,
304 * mc32_probe1 - Check a given slot for a board and test the card
305 * @dev: Device structure to fill in
306 * @slot: The MCA bus slot being used by this card
308 * Decode the slot data and configure the card structures. Having done this we
309 * can reset the card and configure it. The card does a full self test cycle
310 * in firmware so we have to wait for it to return and post us either a
311 * failure case or some addresses we use to find the board internals.
314 static int __init mc32_probe1(struct net_device *dev, int slot)
316 static unsigned version_printed;
320 struct mc32_local *lp = netdev_priv(dev);
321 static u16 mca_io_bases[]={
327 static u32 mca_mem_bases[]={
337 static char *failures[]={
338 "Processor instruction",
339 "Processor data bus",
340 "Processor data bus",
341 "Processor data bus",
346 "82586 internal loopback",
347 "82586 initialisation failure",
348 "Adapter list configuration error"
351 /* Time to play MCA games */
353 if (mc32_debug && version_printed++ == 0)
354 printk(KERN_DEBUG "%s", version);
356 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
358 POS = mca_read_stored_pos(slot, 2);
362 printk(" disabled.\n");
366 /* Fill in the 'dev' fields. */
367 dev->base_addr = mca_io_bases[(POS>>1)&7];
368 dev->mem_start = mca_mem_bases[(POS>>4)&7];
370 POS = mca_read_stored_pos(slot, 4);
373 printk("memory window disabled.\n");
377 POS = mca_read_stored_pos(slot, 5);
382 printk("invalid memory window.\n");
389 dev->mem_end=dev->mem_start + i;
391 dev->irq = ((POS>>2)&3)+9;
393 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
395 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
399 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
400 dev->base_addr, dev->irq, dev->mem_start, i/1024);
403 /* We ought to set the cache line size here.. */
410 /* Retrieve and print the ethernet address. */
411 for (i = 0; i < 6; i++)
413 mca_write_pos(slot, 6, i+12);
414 mca_write_pos(slot, 7, 0);
416 dev->dev_addr[i] = mca_read_pos(slot,3);
419 printk("%s: Address %pM", dev->name, dev->dev_addr);
421 mca_write_pos(slot, 6, 0);
422 mca_write_pos(slot, 7, 0);
424 POS = mca_read_stored_pos(slot, 4);
427 printk(" : BNC port selected.\n");
429 printk(" : AUI port selected.\n");
431 POS=inb(dev->base_addr+HOST_CTRL);
432 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
433 POS&=~HOST_CTRL_INTE;
434 outb(POS, dev->base_addr+HOST_CTRL);
438 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
439 outb(POS, dev->base_addr+HOST_CTRL);
447 err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
449 release_region(dev->base_addr, MC32_IO_EXTENT);
450 printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
454 memset(lp, 0, sizeof(struct mc32_local));
459 base = inb(dev->base_addr);
466 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
471 if(inb(dev->base_addr+2)&(1<<5))
472 base = inb(dev->base_addr);
478 printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
479 base<0x0A?" test failure":"");
481 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
491 while(!(inb(dev->base_addr+2)&(1<<5)))
497 printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
503 base|=(inb(dev->base_addr)<<(8*i));
506 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
508 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
510 lp->base = dev->mem_start+base;
512 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
513 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
515 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
518 * Descriptor chains (card relative)
521 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
522 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
523 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
524 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
526 init_MUTEX_LOCKED(&lp->cmd_mutex);
527 init_completion(&lp->execution_cmd);
528 init_completion(&lp->xceiver_cmd);
530 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
531 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
533 dev->netdev_ops = &netdev_ops;
534 dev->watchdog_timeo = HZ*5; /* Board does all the work */
535 dev->ethtool_ops = &netdev_ethtool_ops;
540 free_irq(dev->irq, dev);
542 release_region(dev->base_addr, MC32_IO_EXTENT);
548 * mc32_ready_poll - wait until we can feed it a command
549 * @dev: The device to wait for
551 * Wait until the card becomes ready to accept a command via the
552 * command register. This tells us nothing about the completion
553 * status of any pending commands and takes very little time at all.
556 static inline void mc32_ready_poll(struct net_device *dev)
558 int ioaddr = dev->base_addr;
559 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
564 * mc32_command_nowait - send a command non blocking
565 * @dev: The 3c527 to issue the command to
566 * @cmd: The command word to write to the mailbox
567 * @data: A data block if the command expects one
568 * @len: Length of the data block
570 * Send a command from interrupt state. If there is a command
571 * currently being executed then we return an error of -1. It
572 * simply isn't viable to wait around as commands may be
573 * slow. This can theoretically be starved on SMP, but it's hard
574 * to see a realistic situation. We do not wait for the command
575 * to complete --- we rely on the interrupt handler to tidy up
579 static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
581 struct mc32_local *lp = netdev_priv(dev);
582 int ioaddr = dev->base_addr;
585 if (down_trylock(&lp->cmd_mutex) == 0)
587 lp->cmd_nonblocking=1;
588 lp->exec_box->mbox=0;
589 lp->exec_box->mbox=cmd;
590 memcpy((void *)lp->exec_box->data, data, len);
591 barrier(); /* the memcpy forgot the volatile so be sure */
593 /* Send the command */
594 mc32_ready_poll(dev);
595 outb(1<<6, ioaddr+HOST_CMD);
599 /* Interrupt handler will signal mutex on completion */
607 * mc32_command - send a command and sleep until completion
608 * @dev: The 3c527 card to issue the command to
609 * @cmd: The command word to write to the mailbox
610 * @data: A data block if the command expects one
611 * @len: Length of the data block
613 * Sends exec commands in a user context. This permits us to wait around
614 * for the replies and also to wait for the command buffer to complete
615 * from a previous command before we execute our command. After our
616 * command completes we will attempt any pending multicast reload
617 * we blocked off by hogging the exec buffer.
619 * You feed the card a command, you wait, it interrupts you get a
620 * reply. All well and good. The complication arises because you use
621 * commands for filter list changes which come in at bh level from things
622 * like IPV6 group stuff.
625 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
627 struct mc32_local *lp = netdev_priv(dev);
628 int ioaddr = dev->base_addr;
631 down(&lp->cmd_mutex);
637 lp->cmd_nonblocking=0;
638 lp->exec_box->mbox=0;
639 lp->exec_box->mbox=cmd;
640 memcpy((void *)lp->exec_box->data, data, len);
641 barrier(); /* the memcpy forgot the volatile so be sure */
643 mc32_ready_poll(dev);
644 outb(1<<6, ioaddr+HOST_CMD);
646 wait_for_completion(&lp->execution_cmd);
648 if(lp->exec_box->mbox&(1<<13))
654 * A multicast set got blocked - try it now
657 if(lp->mc_reload_wait)
659 mc32_reset_multicast_list(dev);
667 * mc32_start_transceiver - tell board to restart tx/rx
668 * @dev: The 3c527 card to issue the command to
670 * This may be called from the interrupt state, where it is used
671 * to restart the rx ring if the card runs out of rx buffers.
673 * We must first check if it's ok to (re)start the transceiver. See
674 * mc32_close for details.
677 static void mc32_start_transceiver(struct net_device *dev) {
679 struct mc32_local *lp = netdev_priv(dev);
680 int ioaddr = dev->base_addr;
682 /* Ignore RX overflow on device closure */
683 if (lp->xceiver_desired_state==HALTED)
686 /* Give the card the offset to the post-EOL-bit RX descriptor */
687 mc32_ready_poll(dev);
689 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
690 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
692 mc32_ready_poll(dev);
694 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
696 /* We are not interrupted on start completion */
701 * mc32_halt_transceiver - tell board to stop tx/rx
702 * @dev: The 3c527 card to issue the command to
704 * We issue the commands to halt the card's transceiver. In fact,
705 * after some experimenting we now simply tell the card to
706 * suspend. When issuing aborts occasionally odd things happened.
708 * We then sleep until the card has notified us that both rx and
709 * tx have been suspended.
712 static void mc32_halt_transceiver(struct net_device *dev)
714 struct mc32_local *lp = netdev_priv(dev);
715 int ioaddr = dev->base_addr;
717 mc32_ready_poll(dev);
719 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
720 wait_for_completion(&lp->xceiver_cmd);
722 mc32_ready_poll(dev);
724 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
725 wait_for_completion(&lp->xceiver_cmd);
730 * mc32_load_rx_ring - load the ring of receive buffers
731 * @dev: 3c527 to build the ring for
733 * This initalises the on-card and driver datastructures to
734 * the point where mc32_start_transceiver() can be called.
736 * The card sets up the receive ring for us. We are required to use the
737 * ring it provides, although the size of the ring is configurable.
739 * We allocate an sk_buff for each ring entry in turn and
740 * initalise its house-keeping info. At the same time, we read
741 * each 'next' pointer in our rx_ring array. This reduces slow
742 * shared-memory reads and makes it easy to access predecessor
745 * We then set the end-of-list bit for the last entry so that the
746 * card will know when it has run out of buffers.
749 static int mc32_load_rx_ring(struct net_device *dev)
751 struct mc32_local *lp = netdev_priv(dev);
754 volatile struct skb_header *p;
756 rx_base=lp->rx_chain;
758 for(i=0; i<RX_RING_LEN; i++) {
759 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
760 if (lp->rx_ring[i].skb==NULL) {
762 kfree_skb(lp->rx_ring[i].skb);
765 skb_reserve(lp->rx_ring[i].skb, 18);
767 p=isa_bus_to_virt(lp->base+rx_base);
770 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
778 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
787 * mc32_flush_rx_ring - free the ring of receive buffers
788 * @lp: Local data of 3c527 to flush the rx ring of
790 * Free the buffer for each ring slot. This may be called
791 * before mc32_load_rx_ring(), eg. on error in mc32_open().
792 * Requires rx skb pointers to point to a valid skb, or NULL.
795 static void mc32_flush_rx_ring(struct net_device *dev)
797 struct mc32_local *lp = netdev_priv(dev);
800 for(i=0; i < RX_RING_LEN; i++)
802 if (lp->rx_ring[i].skb) {
803 dev_kfree_skb(lp->rx_ring[i].skb);
804 lp->rx_ring[i].skb = NULL;
806 lp->rx_ring[i].p=NULL;
812 * mc32_load_tx_ring - load transmit ring
813 * @dev: The 3c527 card to issue the command to
815 * This sets up the host transmit data-structures.
817 * First, we obtain from the card it's current postion in the tx
818 * ring, so that we will know where to begin transmitting
821 * Then, we read the 'next' pointers from the on-card tx ring into
822 * our tx_ring array to reduce slow shared-mem reads. Finally, we
823 * intitalise the tx house keeping variables.
827 static void mc32_load_tx_ring(struct net_device *dev)
829 struct mc32_local *lp = netdev_priv(dev);
830 volatile struct skb_header *p;
834 tx_base=lp->tx_box->data[0];
836 for(i=0 ; i<TX_RING_LEN ; i++)
838 p=isa_bus_to_virt(lp->base+tx_base);
840 lp->tx_ring[i].skb=NULL;
845 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
846 /* see mc32_tx_ring */
848 atomic_set(&lp->tx_count, TX_RING_LEN-1);
849 atomic_set(&lp->tx_ring_head, 0);
855 * mc32_flush_tx_ring - free transmit ring
856 * @lp: Local data of 3c527 to flush the tx ring of
858 * If the ring is non-empty, zip over the it, freeing any
859 * allocated skb_buffs. The tx ring house-keeping variables are
860 * then reset. Requires rx skb pointers to point to a valid skb,
864 static void mc32_flush_tx_ring(struct net_device *dev)
866 struct mc32_local *lp = netdev_priv(dev);
869 for (i=0; i < TX_RING_LEN; i++)
871 if (lp->tx_ring[i].skb)
873 dev_kfree_skb(lp->tx_ring[i].skb);
874 lp->tx_ring[i].skb = NULL;
878 atomic_set(&lp->tx_count, 0);
879 atomic_set(&lp->tx_ring_head, 0);
885 * mc32_open - handle 'up' of card
886 * @dev: device to open
888 * The user is trying to bring the card into ready state. This requires
889 * a brief dialogue with the card. Firstly we enable interrupts and then
890 * 'indications'. Without these enabled the card doesn't bother telling
891 * us what it has done. This had me puzzled for a week.
893 * We configure the number of card descriptors, then load the network
894 * address and multicast filters. Turn on the workaround mode. This
895 * works around a bug in the 82586 - it asks the firmware to do
896 * so. It has a performance (latency) hit but is needed on busy
897 * [read most] lans. We load the ring with buffers then we kick it
901 static int mc32_open(struct net_device *dev)
903 int ioaddr = dev->base_addr;
904 struct mc32_local *lp = netdev_priv(dev);
907 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
913 regs=inb(ioaddr+HOST_CTRL);
914 regs|=HOST_CTRL_INTE;
915 outb(regs, ioaddr+HOST_CTRL);
918 * Allow ourselves to issue commands
925 * Send the indications on command
928 mc32_command(dev, 4, &one, 2);
931 * Poke it to make sure it's really dead.
934 mc32_halt_transceiver(dev);
935 mc32_flush_tx_ring(dev);
938 * Ask card to set up on-card descriptors to our spec
941 if(mc32_command(dev, 8, descnumbuffs, 4)) {
942 printk("%s: %s rejected our buffer configuration!\n",
943 dev->name, cardname);
948 /* Report new configuration */
949 mc32_command(dev, 6, NULL, 0);
951 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
952 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
953 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
954 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
956 /* Set Network Address */
957 mc32_command(dev, 1, dev->dev_addr, 6);
959 /* Set the filters */
960 mc32_set_multicast_list(dev);
962 if (WORKAROUND_82586) {
964 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
967 mc32_load_tx_ring(dev);
969 if(mc32_load_rx_ring(dev))
975 lp->xceiver_desired_state = RUNNING;
977 /* And finally, set the ball rolling... */
978 mc32_start_transceiver(dev);
980 netif_start_queue(dev);
987 * mc32_timeout - handle a timeout from the network layer
988 * @dev: 3c527 that timed out
990 * Handle a timeout on transmit from the 3c527. This normally means
991 * bad things as the hardware handles cable timeouts and mess for
996 static void mc32_timeout(struct net_device *dev)
998 printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
999 /* Try to restart the adaptor. */
1000 netif_wake_queue(dev);
1005 * mc32_send_packet - queue a frame for transmit
1006 * @skb: buffer to transmit
1007 * @dev: 3c527 to send it out of
1009 * Transmit a buffer. This normally means throwing the buffer onto
1010 * the transmit queue as the queue is quite large. If the queue is
1011 * full then we set tx_busy and return. Once the interrupt handler
1012 * gets messages telling it to reclaim transmit queue entries, we will
1013 * clear tx_busy and the kernel will start calling this again.
1015 * We do not disable interrupts or acquire any locks; this can
1016 * run concurrently with mc32_tx_ring(), and the function itself
1017 * is serialised at a higher layer. However, similarly for the
1018 * card itself, we must ensure that we update tx_ring_head only
1019 * after we've established a valid packet on the tx ring (and
1020 * before we let the card "see" it, to prevent it racing with the
1025 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1027 struct mc32_local *lp = netdev_priv(dev);
1028 u32 head = atomic_read(&lp->tx_ring_head);
1030 volatile struct skb_header *p, *np;
1032 netif_stop_queue(dev);
1034 if(atomic_read(&lp->tx_count)==0) {
1038 if (skb_padto(skb, ETH_ZLEN)) {
1039 netif_wake_queue(dev);
1043 atomic_dec(&lp->tx_count);
1045 /* P is the last sending/sent buffer as a pointer */
1046 p=lp->tx_ring[head].p;
1048 head = next_tx(head);
1050 /* NP is the buffer we will be loading */
1051 np=lp->tx_ring[head].p;
1053 /* We will need this to flush the buffer out */
1054 lp->tx_ring[head].skb=skb;
1056 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1057 np->data = isa_virt_to_bus(skb->data);
1059 np->control = CONTROL_EOP | CONTROL_EOL;
1063 * The new frame has been setup; we can now
1064 * let the interrupt handler and card "see" it
1067 atomic_set(&lp->tx_ring_head, head);
1068 p->control &= ~CONTROL_EOL;
1070 netif_wake_queue(dev);
1076 * mc32_update_stats - pull off the on board statistics
1077 * @dev: 3c527 to service
1080 * Query and reset the on-card stats. There's the small possibility
1081 * of a race here, which would result in an underestimation of
1082 * actual errors. As such, we'd prefer to keep all our stats
1083 * collection in software. As a rule, we do. However it can't be
1084 * used for rx errors and collisions as, by default, the card discards
1087 * Setting the SAV BP in the rx filter command supposedly
1088 * stops this behaviour. However, testing shows that it only seems to
1089 * enable the collation of on-card rx statistics --- the driver
1090 * never sees an RX descriptor with an error status set.
1094 static void mc32_update_stats(struct net_device *dev)
1096 struct mc32_local *lp = netdev_priv(dev);
1097 volatile struct mc32_stats *st = lp->stats;
1101 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1102 st->rx_crc_errors=0;
1103 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
1104 st->rx_overrun_errors=0;
1105 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1106 st->rx_alignment_errors=0;
1107 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1108 st->rx_tooshort_errors=0;
1109 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
1110 st->rx_outofresource_errors=0;
1111 dev->stats.rx_errors=rx_errors;
1113 /* Number of packets which saw one collision */
1114 dev->stats.collisions+=st->dataC[10];
1117 /* Number of packets which saw 2--15 collisions */
1118 dev->stats.collisions+=st->dataC[11];
1124 * mc32_rx_ring - process the receive ring
1125 * @dev: 3c527 that needs its receive ring processing
1128 * We have received one or more indications from the card that a
1129 * receive has completed. The buffer ring thus contains dirty
1130 * entries. We walk the ring by iterating over the circular rx_ring
1131 * array, starting at the next dirty buffer (which happens to be the
1132 * one we finished up at last time around).
1134 * For each completed packet, we will either copy it and pass it up
1135 * the stack or, if the packet is near MTU sized, we allocate
1136 * another buffer and flip the old one up the stack.
1138 * We must succeed in keeping a buffer on the ring. If necessary we
1139 * will toss a received packet rather than lose a ring entry. Once
1140 * the first uncompleted descriptor is found, we move the
1141 * End-Of-List bit to include the buffers just processed.
1145 static void mc32_rx_ring(struct net_device *dev)
1147 struct mc32_local *lp = netdev_priv(dev);
1148 volatile struct skb_header *p;
1153 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1157 p=lp->rx_ring[rx_ring_tail].p;
1159 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1162 if(p->status & (1<<6)) /* COMPLETED_OK */
1165 u16 length=p->length;
1166 struct sk_buff *skb;
1167 struct sk_buff *newskb;
1169 /* Try to save time by avoiding a copy on big frames */
1171 if ((length > RX_COPYBREAK)
1172 && ((newskb=dev_alloc_skb(1532)) != NULL))
1174 skb=lp->rx_ring[rx_ring_tail].skb;
1175 skb_put(skb, length);
1177 skb_reserve(newskb,18);
1178 lp->rx_ring[rx_ring_tail].skb=newskb;
1179 p->data=isa_virt_to_bus(newskb->data);
1183 skb=dev_alloc_skb(length+2);
1186 dev->stats.rx_dropped++;
1191 memcpy(skb_put(skb, length),
1192 lp->rx_ring[rx_ring_tail].skb->data, length);
1195 skb->protocol=eth_type_trans(skb,dev);
1196 dev->stats.rx_packets++;
1197 dev->stats.rx_bytes += length;
1205 rx_ring_tail=next_rx(rx_ring_tail);
1209 /* If there was actually a frame to be processed, place the EOL bit */
1210 /* at the descriptor prior to the one to be filled next */
1212 if (rx_ring_tail != rx_old_tail)
1214 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1215 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1217 lp->rx_ring_tail=rx_ring_tail;
1223 * mc32_tx_ring - process completed transmits
1224 * @dev: 3c527 that needs its transmit ring processing
1227 * This operates in a similar fashion to mc32_rx_ring. We iterate
1228 * over the transmit ring. For each descriptor which has been
1229 * processed by the card, we free its associated buffer and note
1230 * any errors. This continues until the transmit ring is emptied
1231 * or we reach a descriptor that hasn't yet been processed by the
1236 static void mc32_tx_ring(struct net_device *dev)
1238 struct mc32_local *lp = netdev_priv(dev);
1239 volatile struct skb_header *np;
1242 * We rely on head==tail to mean 'queue empty'.
1243 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1244 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1245 * condition with 'queue full'
1248 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1252 t=next_tx(lp->tx_ring_tail);
1253 np=lp->tx_ring[t].p;
1255 if(!(np->status & (1<<7)))
1260 dev->stats.tx_packets++;
1261 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1263 dev->stats.tx_errors++;
1265 switch(np->status&0x0F)
1268 dev->stats.tx_aborted_errors++;
1269 break; /* Max collisions */
1271 dev->stats.tx_fifo_errors++;
1274 dev->stats.tx_carrier_errors++;
1277 dev->stats.tx_window_errors++;
1278 break; /* CTS Lost */
1280 dev->stats.tx_aborted_errors++;
1281 break; /* Transmit timeout */
1284 /* Packets are sent in order - this is
1285 basically a FIFO queue of buffers matching
1287 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1288 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1289 lp->tx_ring[t].skb=NULL;
1290 atomic_inc(&lp->tx_count);
1291 netif_wake_queue(dev);
1300 * mc32_interrupt - handle an interrupt from a 3c527
1301 * @irq: Interrupt number
1302 * @dev_id: 3c527 that requires servicing
1303 * @regs: Registers (unused)
1306 * An interrupt is raised whenever the 3c527 writes to the command
1307 * register. This register contains the message it wishes to send us
1308 * packed into a single byte field. We keep reading status entries
1309 * until we have processed all the control items, but simply count
1310 * transmit and receive reports. When all reports are in we empty the
1311 * transceiver rings as appropriate. This saves the overhead of
1312 * multiple command requests.
1314 * Because MCA is level-triggered, we shouldn't miss indications.
1315 * Therefore, we needn't ask the card to suspend interrupts within
1316 * this handler. The card receives an implicit acknowledgment of the
1317 * current interrupt when we read the command register.
1321 static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1323 struct net_device *dev = dev_id;
1324 struct mc32_local *lp;
1325 int ioaddr, status, boguscount = 0;
1329 ioaddr = dev->base_addr;
1330 lp = netdev_priv(dev);
1332 /* See whats cooking */
1334 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1336 status=inb(ioaddr+HOST_CMD);
1339 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1340 (status&7), (status>>3)&7, (status>>6)&1,
1341 (status>>7)&1, boguscount);
1348 case 6: /* TX fail */
1354 complete(&lp->xceiver_cmd);
1357 printk("%s: strange tx ack %d\n", dev->name, status&7);
1369 complete(&lp->xceiver_cmd);
1372 /* Out of RX buffers stat */
1373 /* Must restart rx */
1374 dev->stats.rx_dropped++;
1376 mc32_start_transceiver(dev);
1379 printk("%s: strange rx ack %d\n",
1380 dev->name, status&7);
1386 * No thread is waiting: we need to tidy
1390 if (lp->cmd_nonblocking) {
1392 if (lp->mc_reload_wait)
1393 mc32_reset_multicast_list(dev);
1395 else complete(&lp->execution_cmd);
1400 * We get interrupted once per
1401 * counter that is about to overflow.
1404 mc32_update_stats(dev);
1410 * Process the transmit and receive rings
1424 * mc32_close - user configuring the 3c527 down
1425 * @dev: 3c527 card to shut down
1427 * The 3c527 is a bus mastering device. We must be careful how we
1428 * shut it down. It may also be running shared interrupt so we have
1429 * to be sure to silence it properly
1431 * We indicate that the card is closing to the rest of the
1432 * driver. Otherwise, it is possible that the card may run out
1433 * of receive buffers and restart the transceiver while we're
1434 * trying to close it.
1436 * We abort any receive and transmits going on and then wait until
1437 * any pending exec commands have completed in other code threads.
1438 * In theory we can't get here while that is true, in practice I am
1441 * We turn off the interrupt enable for the board to be sure it can't
1442 * intefere with other devices.
1445 static int mc32_close(struct net_device *dev)
1447 struct mc32_local *lp = netdev_priv(dev);
1448 int ioaddr = dev->base_addr;
1453 lp->xceiver_desired_state = HALTED;
1454 netif_stop_queue(dev);
1457 * Send the indications on command (handy debug check)
1460 mc32_command(dev, 4, &one, 2);
1462 /* Shut down the transceiver */
1464 mc32_halt_transceiver(dev);
1466 /* Ensure we issue no more commands beyond this point */
1468 down(&lp->cmd_mutex);
1470 /* Ok the card is now stopping */
1472 regs=inb(ioaddr+HOST_CTRL);
1473 regs&=~HOST_CTRL_INTE;
1474 outb(regs, ioaddr+HOST_CTRL);
1476 mc32_flush_rx_ring(dev);
1477 mc32_flush_tx_ring(dev);
1479 mc32_update_stats(dev);
1486 * mc32_get_stats - hand back stats to network layer
1487 * @dev: The 3c527 card to handle
1489 * We've collected all the stats we can in software already. Now
1490 * it's time to update those kept on-card and return the lot.
1494 static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1496 mc32_update_stats(dev);
1502 * do_mc32_set_multicast_list - attempt to update multicasts
1503 * @dev: 3c527 device to load the list on
1504 * @retry: indicates this is not the first call.
1507 * Actually set or clear the multicast filter for this adaptor. The
1508 * locking issues are handled by this routine. We have to track
1509 * state as it may take multiple calls to get the command sequence
1510 * completed. We just keep trying to schedule the loads until we
1511 * manage to process them all.
1513 * num_addrs == -1 Promiscuous mode, receive all packets
1515 * num_addrs == 0 Normal mode, clear multicast list
1517 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1518 * and do best-effort filtering.
1520 * See mc32_update_stats() regards setting the SAV BP bit.
1524 static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1526 struct mc32_local *lp = netdev_priv(dev);
1527 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1529 if ((dev->flags&IFF_PROMISC) ||
1530 (dev->flags&IFF_ALLMULTI) ||
1532 /* Enable promiscuous mode */
1534 else if(dev->mc_count)
1536 unsigned char block[62];
1538 struct dev_mc_list *dmc=dev->mc_list;
1543 lp->mc_list_valid = 0;
1544 if(!lp->mc_list_valid)
1547 block[0]=dev->mc_count;
1550 for(i=0;i<dev->mc_count;i++)
1552 memcpy(bp, dmc->dmi_addr, 6);
1556 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1558 lp->mc_reload_wait = 1;
1561 lp->mc_list_valid=1;
1565 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1567 lp->mc_reload_wait = 1;
1570 lp->mc_reload_wait = 0;
1576 * mc32_set_multicast_list - queue multicast list update
1577 * @dev: The 3c527 to use
1579 * Commence loading the multicast list. This is called when the kernel
1580 * changes the lists. It will override any pending list we are trying to
1584 static void mc32_set_multicast_list(struct net_device *dev)
1586 do_mc32_set_multicast_list(dev,0);
1591 * mc32_reset_multicast_list - reset multicast list
1592 * @dev: The 3c527 to use
1594 * Attempt the next step in loading the multicast lists. If this attempt
1595 * fails to complete then it will be scheduled and this function called
1596 * again later from elsewhere.
1599 static void mc32_reset_multicast_list(struct net_device *dev)
1601 do_mc32_set_multicast_list(dev,1);
1604 static void netdev_get_drvinfo(struct net_device *dev,
1605 struct ethtool_drvinfo *info)
1607 strcpy(info->driver, DRV_NAME);
1608 strcpy(info->version, DRV_VERSION);
1609 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1612 static u32 netdev_get_msglevel(struct net_device *dev)
1617 static void netdev_set_msglevel(struct net_device *dev, u32 level)
1622 static const struct ethtool_ops netdev_ethtool_ops = {
1623 .get_drvinfo = netdev_get_drvinfo,
1624 .get_msglevel = netdev_get_msglevel,
1625 .set_msglevel = netdev_set_msglevel,
1630 static struct net_device *this_device;
1633 * init_module - entry point
1635 * Probe and locate a 3c527 card. This really should probe and locate
1636 * all the 3c527 cards in the machine not just one of them. Yes you can
1637 * insmod multiple modules for now but it's a hack.
1640 int __init init_module(void)
1642 this_device = mc32_probe(-1);
1643 if (IS_ERR(this_device))
1644 return PTR_ERR(this_device);
1649 * cleanup_module - free resources for an unload
1651 * Unloading time. We release the MCA bus resources and the interrupt
1652 * at which point everything is ready to unload. The card must be stopped
1653 * at this point or we would not have been called. When we unload we
1654 * leave the card stopped but not totally shut down. When the card is
1655 * initialized it must be rebooted or the rings reloaded before any
1656 * transmit operations are allowed to start scribbling into memory.
1659 void __exit cleanup_module(void)
1661 unregister_netdev(this_device);
1662 cleanup_card(this_device);
1663 free_netdev(this_device);