2 * ni6510 (am7990 'lance' chip) driver for Linux-net-3
3 * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
4 * copyrights (c) 1994,1995,1996 by M.Hipp
6 * This driver can handle the old ni6510 board and the newer ni6510
7 * EtherBlaster. (probably it also works with every full NE2100
10 * To compile as module, type:
11 * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni65.c
12 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
14 * This is an extension to the Linux operating system, and is covered by the
15 * same GNU General Public License that covers the Linux-kernel.
17 * comments/bugs/suggestions can be sent to:
19 * email: hippm@informatik.uni-tuebingen.de
22 * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
23 * and from the original drivers by D.Becker
26 * - on some PCI boards (including my own) the card/board/ISA-bridge has
27 * problems with bus master DMA. This results in lotsa overruns.
28 * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
29 * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
30 * Or just play with your BIOS options to optimize ISA-DMA access.
31 * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
32 * defines -> please report me your experience then
33 * - Harald reported for ASUS SP3G mainboards, that you should use
34 * the 'optimal settings' from the user's manual on page 3-12!
37 * thanx to Jason Sullivan for sending me a ni6510 card!
38 * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
40 * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
41 * average: FTP -> 8384421 bytes received in 8.5 seconds
42 * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
43 * peak: FTP -> 8384421 bytes received in 7.5 seconds
44 * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
48 * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
49 * 96.Sept.29: virt_to_bus stuff added for new memory modell
50 * 96.April.29: Added Harald Koenig's Patches (MH)
51 * 96.April.13: enhanced error handling .. more tests (MH)
52 * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
53 * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
54 * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
55 * hopefully no more 16MB limit
57 * 95.Nov.18: multicast tweaked (AC).
59 * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
61 * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
64 #include <linux/kernel.h>
65 #include <linux/string.h>
66 #include <linux/errno.h>
67 #include <linux/ioport.h>
68 #include <linux/slab.h>
69 #include <linux/interrupt.h>
70 #include <linux/delay.h>
71 #include <linux/init.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/skbuff.h>
75 #include <linux/module.h>
76 #include <linux/bitops.h>
84 * the current setting allows an acceptable performance
85 * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
86 * the header of this file
87 * 'invert' the defines for max. performance. This may cause DMA problems
88 * on some boards (e.g on my ASUS SP3G)
92 #define RCV_PARANOIA_CHECK
94 #define MID_PERFORMANCE
96 #if defined( LOW_PERFORMANCE )
97 static int isa0=7,isa1=7,csr80=0x0c10;
98 #elif defined( MID_PERFORMANCE )
99 static int isa0=5,isa1=5,csr80=0x2810;
100 #else /* high performance */
101 static int isa0=4,isa1=4,csr80=0x0017;
105 * a few card/vendor specific defines
107 #define NI65_ID0 0x00
108 #define NI65_ID1 0x55
109 #define NI65_EB_ID0 0x52
110 #define NI65_EB_ID1 0x44
111 #define NE2100_ID0 0x57
112 #define NE2100_ID1 0x57
114 #define PORT p->cmdr_addr
117 * buffer configuration
121 #define RMDNUMMASK 0x80000000
124 #define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
129 #define TMDNUMMASK 0x00000000
132 #define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
135 /* slightly oversized */
136 #define R_BUF_SIZE 1544
137 #define T_BUF_SIZE 1544
140 * lance register defines
142 #define L_DATAREG 0x00
143 #define L_ADDRREG 0x02
145 #define L_CONFIG 0x05
149 * to access the lance/am7990-regs, you have to write
150 * reg-number into L_ADDRREG, then you can access it using L_DATAREG
157 #define INIT_RING_BEFORE_START 0x1
158 #define FULL_RESET_ON_ERROR 0x2
161 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
162 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
163 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
166 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
168 #define writedatareg(val) { writereg(val,CSR0); }
171 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
172 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
173 #define writedatareg(val) { writereg(val,CSR0); }
176 static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
179 unsigned char id0,id1;
184 unsigned char *vendor_id;
195 .vendor_id = ni_vendor,
196 .cardname = "ni6510",
206 .vendor_id = ni_vendor,
207 .cardname = "ni6510 EtherBlaster",
218 .cardname = "generic NE2100",
226 struct rmd rmdhead[RMDNUM];
227 struct tmd tmdhead[TMDNUM];
228 struct init_block ib;
232 struct sk_buff *recv_skb[RMDNUM];
234 void *recvbounce[RMDNUM];
237 struct sk_buff *tmd_skb[TMDNUM];
239 void *tmdbounce[TMDNUM];
241 int lock,xmit_queued;
242 struct net_device_stats stats;
247 spinlock_t ring_lock;
250 static int ni65_probe1(struct net_device *dev,int);
251 static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs *regs);
252 static void ni65_recv_intr(struct net_device *dev,int);
253 static void ni65_xmit_intr(struct net_device *dev,int);
254 static int ni65_open(struct net_device *dev);
255 static int ni65_lance_reinit(struct net_device *dev);
256 static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
257 static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev);
258 static void ni65_timeout(struct net_device *dev);
259 static int ni65_close(struct net_device *dev);
260 static int ni65_alloc_buffer(struct net_device *dev);
261 static void ni65_free_buffer(struct priv *p);
262 static struct net_device_stats *ni65_get_stats(struct net_device *);
263 static void set_multicast_list(struct net_device *dev);
265 static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
266 static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
268 static int debuglevel = 1;
271 * set 'performance' registers .. we must STOP lance for that
273 static void ni65_set_performance(struct priv *p)
275 writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
277 if( !(cards[p->cardno].config & 0x02) )
280 outw(80,PORT+L_ADDRREG);
281 if(inw(PORT+L_ADDRREG) != 80)
284 writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
285 outw(0,PORT+L_ADDRREG);
286 outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
287 outw(1,PORT+L_ADDRREG);
288 outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
290 outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
294 * open interface (up)
296 static int ni65_open(struct net_device *dev)
298 struct priv *p = (struct priv *) dev->priv;
299 int irqval = request_irq(dev->irq, &ni65_interrupt,0,
300 cards[p->cardno].cardname,dev);
302 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
303 dev->name,dev->irq, irqval);
307 if(ni65_lance_reinit(dev))
309 netif_start_queue(dev);
314 free_irq(dev->irq,dev);
320 * close interface (down)
322 static int ni65_close(struct net_device *dev)
324 struct priv *p = (struct priv *) dev->priv;
326 netif_stop_queue(dev);
328 outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
333 for(i=0;i<TMDNUM;i++)
336 dev_kfree_skb(p->tmd_skb[i]);
337 p->tmd_skb[i] = NULL;
342 free_irq(dev->irq,dev);
346 static void cleanup_card(struct net_device *dev)
348 struct priv *p = (struct priv *) dev->priv;
349 disable_dma(dev->dma);
351 release_region(dev->base_addr, cards[p->cardno].total_size);
355 /* set: io,irq,dma or set it when calling insmod */
361 * Probe The Card (not the lance-chip)
363 struct net_device * __init ni65_probe(int unit)
365 struct net_device *dev = alloc_etherdev(0);
366 static int ports[] = {0x360,0x300,0x320,0x340, 0};
371 return ERR_PTR(-ENOMEM);
374 sprintf(dev->name, "eth%d", unit);
375 netdev_boot_setup_check(dev);
382 if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
383 err = ni65_probe1(dev, dev->base_addr);
384 } else if (dev->base_addr > 0) { /* Don't probe at all. */
387 for (port = ports; *port && ni65_probe1(dev, *port); port++)
395 err = register_netdev(dev);
407 * this is the real card probe ..
409 static int __init ni65_probe1(struct net_device *dev,int ioaddr)
418 for(i=0;i<NUM_CARDS;i++) {
419 if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
421 if(cards[i].id_offset >= 0) {
422 if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
423 inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
424 release_region(ioaddr, cards[i].total_size);
428 if(cards[i].vendor_id) {
430 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
431 release_region(ioaddr, cards[i].total_size);
441 dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
443 if( (j=ni65_alloc_buffer(dev)) < 0) {
444 release_region(ioaddr, cards[i].total_size);
447 p = (struct priv *) dev->priv;
448 p->cmdr_addr = ioaddr + cards[i].cmd_offset;
450 spin_lock_init(&p->ring_lock);
452 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
454 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
455 if( (j=readreg(CSR0)) != 0x4) {
457 printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
459 release_region(ioaddr, cards[p->cardno].total_size);
463 outw(88,PORT+L_ADDRREG);
464 if(inw(PORT+L_ADDRREG) == 88) {
466 v = inw(PORT+L_DATAREG);
468 outw(89,PORT+L_ADDRREG);
469 v |= inw(PORT+L_DATAREG);
470 printk("Version %#08lx, ",v);
471 p->features = INIT_RING_BEFORE_START;
474 printk("ancient LANCE, ");
478 if(test_bit(0,&cards[i].config)) {
479 dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
480 dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
481 printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
485 /* 'stuck test' from lance.c */
486 long dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
487 (inb(DMA2_STAT_REG) & 0xf0);
490 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
493 flags=claim_dma_lock();
495 set_dma_mode(dma,DMA_MODE_CASCADE);
497 release_dma_lock(flags);
499 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
501 flags=claim_dma_lock();
504 release_dma_lock(flags);
506 if(readreg(CSR0) & CSR0_IDON)
511 printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
513 release_region(ioaddr, cards[p->cardno].total_size);
516 dev->dma = dmatab[i];
517 printk("DMA %d (autodetected), ",dev->dma);
520 printk("DMA %d (assigned), ",dev->dma);
524 unsigned long irq_mask;
526 ni65_init_lance(p,dev->dev_addr,0,0);
527 irq_mask = probe_irq_on();
528 writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
530 dev->irq = probe_irq_off(irq_mask);
533 printk("Failed to detect IRQ line!\n");
535 release_region(ioaddr, cards[p->cardno].total_size);
538 printk("IRQ %d (autodetected).\n",dev->irq);
541 printk("IRQ %d (assigned).\n",dev->irq);
544 if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
546 printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
548 release_region(ioaddr, cards[p->cardno].total_size);
552 dev->base_addr = ioaddr;
553 SET_MODULE_OWNER(dev);
554 dev->open = ni65_open;
555 dev->stop = ni65_close;
556 dev->hard_start_xmit = ni65_send_packet;
557 dev->tx_timeout = ni65_timeout;
558 dev->watchdog_timeo = HZ/2;
559 dev->get_stats = ni65_get_stats;
560 dev->set_multicast_list = set_multicast_list;
561 return 0; /* everything is OK */
565 * set lance register and trigger init
567 static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
572 writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
575 p->ib.eaddr[i] = daddr[i];
578 p->ib.filter[i] = filter;
581 p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
582 p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
583 writereg(0,CSR3); /* busmaster/no word-swap */
584 pib = (u32) isa_virt_to_bus(&p->ib);
585 writereg(pib & 0xffff,CSR1);
586 writereg(pib >> 16,CSR2);
588 writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
593 if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
594 break; /* init ok ? */
599 * allocate memory area and check the 16MB border
601 static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
603 struct sk_buff *skb=NULL;
608 ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
610 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
614 skb_reserve(skb,2+16);
615 skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
619 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
621 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
625 if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
626 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
637 * allocate all memory structures .. send/recv buffers etc ...
639 static int ni65_alloc_buffer(struct net_device *dev)
646 * we need 8-aligned memory ..
648 ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
652 p = dev->priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
653 memset((char *) dev->priv,0,sizeof(struct priv));
656 for(i=0;i<TMDNUM;i++)
659 p->tmd_skb[i] = NULL;
661 p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
662 if(!p->tmdbounce[i]) {
668 for(i=0;i<RMDNUM;i++)
671 p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
672 if(!p->recv_skb[i]) {
677 p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
678 if(!p->recvbounce[i]) {
685 return 0; /* everything is OK */
689 * free buffers and private struct
691 static void ni65_free_buffer(struct priv *p)
698 for(i=0;i<TMDNUM;i++) {
699 kfree(p->tmdbounce[i]);
702 dev_kfree_skb(p->tmd_skb[i]);
706 for(i=0;i<RMDNUM;i++)
710 dev_kfree_skb(p->recv_skb[i]);
712 kfree(p->recvbounce[i]);
720 * stop and (re)start lance .. e.g after an error
722 static void ni65_stop_start(struct net_device *dev,struct priv *p)
724 int csr0 = CSR0_INEA;
726 writedatareg(CSR0_STOP);
729 printk(KERN_DEBUG "ni65_stop_start\n");
731 if(p->features & INIT_RING_BEFORE_START) {
734 struct sk_buff *skb_save[TMDNUM];
736 unsigned long buffer[TMDNUM];
741 if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
743 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
744 if(p->tmdlast == p->tmdnum)
749 for(i=0;i<TMDNUM;i++) {
750 struct tmd *tmdp = p->tmdhead + i;
752 skb_save[i] = p->tmd_skb[i];
754 buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
755 blen[i] = tmdp->blen;
756 tmdp->u.s.status = 0x0;
759 for(i=0;i<RMDNUM;i++) {
760 struct rmd *rmdp = p->rmdhead + i;
761 rmdp->u.s.status = RCV_OWN;
763 p->tmdnum = p->xmit_queued = 0;
764 writedatareg(CSR0_STRT | csr0);
766 for(i=0;i<TMDNUM;i++) {
767 int num = (i + p->tmdlast) & (TMDNUM-1);
768 p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
769 p->tmdhead[i].blen = blen[num];
770 if(p->tmdhead[i].u.s.status & XMIT_OWN) {
771 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
773 writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
776 p->tmd_skb[i] = skb_save[num];
779 p->rmdnum = p->tmdlast = 0;
781 if (p->tmdnum || !p->xmit_queued)
782 netif_wake_queue(dev);
783 dev->trans_start = jiffies;
786 writedatareg(CSR0_STRT | csr0);
790 * init lance (write init-values .. init-buffers) (open-helper)
792 static int ni65_lance_reinit(struct net_device *dev)
795 struct priv *p = (struct priv *) dev->priv;
801 flags=claim_dma_lock();
802 disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
803 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
804 enable_dma(dev->dma);
805 release_dma_lock(flags);
807 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
808 if( (i=readreg(CSR0) ) != 0x4)
810 printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
811 cards[p->cardno].cardname,(int) i);
812 flags=claim_dma_lock();
813 disable_dma(dev->dma);
814 release_dma_lock(flags);
818 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
819 for(i=0;i<TMDNUM;i++)
821 struct tmd *tmdp = p->tmdhead + i;
824 dev_kfree_skb(p->tmd_skb[i]);
825 p->tmd_skb[i] = NULL;
828 tmdp->u.buffer = 0x0;
829 tmdp->u.s.status = XMIT_START | XMIT_END;
830 tmdp->blen = tmdp->status2 = 0;
833 for(i=0;i<RMDNUM;i++)
835 struct rmd *rmdp = p->rmdhead + i;
837 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
839 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
841 rmdp->blen = -(R_BUF_SIZE-8);
843 rmdp->u.s.status = RCV_OWN;
846 if(dev->flags & IFF_PROMISC)
847 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
848 else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
849 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
851 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
854 * ni65_set_lance_mem() sets L_ADDRREG to CSR0
855 * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
858 if(inw(PORT+L_DATAREG) & CSR0_IDON) {
859 ni65_set_performance(p);
860 /* init OK: start lance , enable interrupts */
861 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
864 printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
865 flags=claim_dma_lock();
866 disable_dma(dev->dma);
867 release_dma_lock(flags);
868 return 0; /* ->Error */
874 static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs)
877 struct net_device *dev = dev_id;
881 p = (struct priv *) dev->priv;
883 spin_lock(&p->ring_lock);
886 csr0 = inw(PORT+L_DATAREG);
889 writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
891 writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
894 if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
897 if(csr0 & CSR0_RINT) /* RECV-int? */
898 ni65_recv_intr(dev,csr0);
899 if(csr0 & CSR0_TINT) /* XMIT-int? */
900 ni65_xmit_intr(dev,csr0);
904 struct priv *p = (struct priv *) dev->priv;
906 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
908 p->stats.tx_errors++;
909 if(csr0 & CSR0_MISS) {
911 for(i=0;i<RMDNUM;i++)
912 printk("%02x ",p->rmdhead[i].u.s.status);
914 p->stats.rx_errors++;
916 if(csr0 & CSR0_MERR) {
918 printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
919 ni65_stop_start(dev,p);
924 #ifdef RCV_PARANOIA_CHECK
927 for(j=0;j<RMDNUM;j++)
929 struct priv *p = (struct priv *) dev->priv;
931 for(i=RMDNUM-1;i>0;i--) {
932 num2 = (p->rmdnum + i) & (RMDNUM-1);
933 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
938 for(k=0;k<RMDNUM;k++) {
939 num1 = (p->rmdnum + k) & (RMDNUM-1);
940 if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
951 for(k=0;k<RMDNUM;k++) {
952 sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
956 printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
960 ni65_recv_intr(dev,csr0);
961 if((p->rmdhead[num2].u.s.status & RCV_OWN))
962 break; /* ok, we are 'in sync' again */
970 if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
971 printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
972 ni65_stop_start(dev,p);
975 writedatareg(CSR0_INEA);
977 spin_unlock(&p->ring_lock);
982 * We have received an Xmit-Interrupt ..
983 * send a new packet if necessary
985 static void ni65_xmit_intr(struct net_device *dev,int csr0)
987 struct priv *p = (struct priv *) dev->priv;
989 while(p->xmit_queued)
991 struct tmd *tmdp = p->tmdhead + p->tmdlast;
992 int tmdstat = tmdp->u.s.status;
994 if(tmdstat & XMIT_OWN)
997 if(tmdstat & XMIT_ERR)
1000 if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
1001 printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
1003 /* checking some errors */
1004 if(tmdp->status2 & XMIT_RTRY)
1005 p->stats.tx_aborted_errors++;
1006 if(tmdp->status2 & XMIT_LCAR)
1007 p->stats.tx_carrier_errors++;
1008 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1009 /* this stops the xmitter */
1010 p->stats.tx_fifo_errors++;
1012 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1013 if(p->features & INIT_RING_BEFORE_START) {
1014 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
1015 ni65_stop_start(dev,p);
1016 break; /* no more Xmit processing .. */
1019 ni65_stop_start(dev,p);
1022 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1023 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
1024 p->stats.tx_errors++;
1028 p->stats.tx_bytes -= (short)(tmdp->blen);
1029 p->stats.tx_packets++;
1033 if(p->tmd_skb[p->tmdlast]) {
1034 dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
1035 p->tmd_skb[p->tmdlast] = NULL;
1039 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
1040 if(p->tmdlast == p->tmdnum)
1043 netif_wake_queue(dev);
1047 * We have received a packet
1049 static void ni65_recv_intr(struct net_device *dev,int csr0)
1054 struct priv *p = (struct priv *) dev->priv;
1056 rmdp = p->rmdhead + p->rmdnum;
1057 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
1060 if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
1062 if(!(rmdstat & RCV_ERR)) {
1063 if(rmdstat & RCV_START)
1065 p->stats.rx_length_errors++;
1066 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1071 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1072 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1073 if(rmdstat & RCV_FRAM)
1074 p->stats.rx_frame_errors++;
1075 if(rmdstat & RCV_OFLO)
1076 p->stats.rx_over_errors++;
1077 if(rmdstat & RCV_CRC)
1078 p->stats.rx_crc_errors++;
1079 if(rmdstat & RCV_BUF_ERR)
1080 p->stats.rx_fifo_errors++;
1082 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
1083 p->stats.rx_errors++;
1085 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1088 struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
1090 skb_reserve(skb,16);
1092 struct sk_buff *skb = dev_alloc_skb(len+2);
1099 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1101 eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
1104 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
1105 skb_put(skb,R_BUF_SIZE);
1106 p->recv_skb[p->rmdnum] = skb;
1107 rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1113 eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
1115 p->stats.rx_packets++;
1116 p->stats.rx_bytes += len;
1117 skb->protocol=eth_type_trans(skb,dev);
1119 dev->last_rx = jiffies;
1123 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1124 p->stats.rx_dropped++;
1128 printk(KERN_INFO "%s: received runt packet\n",dev->name);
1129 p->stats.rx_errors++;
1131 rmdp->blen = -(R_BUF_SIZE-8);
1133 rmdp->u.s.status = RCV_OWN; /* change owner */
1134 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
1135 rmdp = p->rmdhead + p->rmdnum;
1143 static void ni65_timeout(struct net_device *dev)
1146 struct priv *p = (struct priv *) dev->priv;
1148 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
1149 for(i=0;i<TMDNUM;i++)
1150 printk("%02x ",p->tmdhead[i].u.s.status);
1152 ni65_lance_reinit(dev);
1153 dev->trans_start = jiffies;
1154 netif_wake_queue(dev);
1161 static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1163 struct priv *p = (struct priv *) dev->priv;
1165 netif_stop_queue(dev);
1167 if (test_and_set_bit(0, (void*)&p->lock)) {
1168 printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
1173 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1175 unsigned long flags;
1178 if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
1181 memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
1182 (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
1184 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
1185 dev_kfree_skb (skb);
1187 spin_lock_irqsave(&p->ring_lock, flags);
1188 tmdp = p->tmdhead + p->tmdnum;
1189 tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
1190 p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
1195 spin_lock_irqsave(&p->ring_lock, flags);
1197 tmdp = p->tmdhead + p->tmdnum;
1198 tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1199 p->tmd_skb[p->tmdnum] = skb;
1204 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
1205 writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
1208 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
1210 if(p->tmdnum != p->tmdlast)
1211 netif_wake_queue(dev);
1214 dev->trans_start = jiffies;
1216 spin_unlock_irqrestore(&p->ring_lock, flags);
1222 static struct net_device_stats *ni65_get_stats(struct net_device *dev)
1227 struct priv *p = (struct priv *) dev->priv;
1228 for(i=0;i<RMDNUM;i++)
1230 struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
1231 printk("%02x ",rmdp->u.s.status);
1236 return &((struct priv *) dev->priv)->stats;
1239 static void set_multicast_list(struct net_device *dev)
1241 if(!ni65_lance_reinit(dev))
1242 printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
1243 netif_wake_queue(dev);
1247 static struct net_device *dev_ni65;
1249 module_param(irq, int, 0);
1250 module_param(io, int, 0);
1251 module_param(dma, int, 0);
1252 MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1253 MODULE_PARM_DESC(io, "ni6510 I/O base address");
1254 MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1256 int __init init_module(void)
1258 dev_ni65 = ni65_probe(-1);
1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
1262 void cleanup_module(void)
1264 unregister_netdev(dev_ni65);
1265 cleanup_card(dev_ni65);
1266 free_netdev(dev_ni65);
1270 MODULE_LICENSE("GPL");