2 drivers/net/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
16 #include <linux/pci.h>
18 #include <linux/etherdevice.h>
20 int tulip_rx_copybreak;
21 unsigned int tulip_max_interrupt_work;
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_TABLE 15 /* We use 0 or max */
27 static unsigned int mit_table[MIT_SIZE+1] =
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
56 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
62 int tulip_refill_rx(struct net_device *dev)
64 struct tulip_private *tp = netdev_priv(dev);
68 /* Refill the Rx ring buffers. */
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
79 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
81 tp->rx_buffers[entry].mapping = mapping;
83 skb->dev = dev; /* Mark as being used by this device. */
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
94 iowrite32(0x01, tp->base_addr + CSR2);
100 #ifdef CONFIG_TULIP_NAPI
102 void oom_timer(unsigned long data)
104 struct net_device *dev = (struct net_device *)data;
105 struct tulip_private *tp = netdev_priv(dev);
106 netif_rx_schedule(dev, &tp->napi);
109 int tulip_poll(struct napi_struct *napi, int budget)
111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
113 int entry = tp->cur_rx % RX_RING_SIZE;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
121 /* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
124 if (budget >=RX_RING_SIZE) budget--;
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
129 tp->rx_ring[entry].status);
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
144 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
148 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
149 dev->name, entry, status);
151 if (++work_done >= budget)
154 if ((status & 0x38008300) != 0x0300) {
155 if ((status & 0x38000300) != 0x0300) {
156 /* Ingore earlier buffers. */
157 if ((status & 0xffff) != 0x7fff) {
159 printk(KERN_WARNING "%s: Oversized Ethernet frame "
160 "spanned multiple buffers, status %8.8x!\n",
162 tp->stats.rx_length_errors++;
164 } else if (status & RxDescFatalErr) {
165 /* There was a fatal error. */
167 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
169 tp->stats.rx_errors++; /* end of a packet.*/
170 if (status & 0x0890) tp->stats.rx_length_errors++;
171 if (status & 0x0004) tp->stats.rx_frame_errors++;
172 if (status & 0x0002) tp->stats.rx_crc_errors++;
173 if (status & 0x0001) tp->stats.rx_fifo_errors++;
176 /* Omit the four octet CRC from the length. */
177 short pkt_len = ((status >> 16) & 0x7ff) - 4;
180 #ifndef final_version
181 if (pkt_len > 1518) {
182 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183 dev->name, pkt_len, pkt_len);
185 tp->stats.rx_length_errors++;
188 /* Check if the packet is long enough to accept without copying
189 to a minimally-sized skbuff. */
190 if (pkt_len < tulip_rx_copybreak
191 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
192 skb_reserve(skb, 2); /* 16 byte align the IP header */
193 pci_dma_sync_single_for_cpu(tp->pdev,
194 tp->rx_buffers[entry].mapping,
195 pkt_len, PCI_DMA_FROMDEVICE);
196 #if ! defined(__alpha__)
197 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
199 skb_put(skb, pkt_len);
201 memcpy(skb_put(skb, pkt_len),
202 tp->rx_buffers[entry].skb->data,
205 pci_dma_sync_single_for_device(tp->pdev,
206 tp->rx_buffers[entry].mapping,
207 pkt_len, PCI_DMA_FROMDEVICE);
208 } else { /* Pass up the skb already on the Rx ring. */
209 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
212 #ifndef final_version
213 if (tp->rx_buffers[entry].mapping !=
214 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
215 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
216 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
218 le32_to_cpu(tp->rx_ring[entry].buffer1),
219 (unsigned long long)tp->rx_buffers[entry].mapping,
224 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
225 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
227 tp->rx_buffers[entry].skb = NULL;
228 tp->rx_buffers[entry].mapping = 0;
230 skb->protocol = eth_type_trans(skb, dev);
232 netif_receive_skb(skb);
234 dev->last_rx = jiffies;
235 tp->stats.rx_packets++;
236 tp->stats.rx_bytes += pkt_len;
238 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
242 entry = (++tp->cur_rx) % RX_RING_SIZE;
243 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
244 tulip_refill_rx(dev);
248 /* New ack strategy... irq does not ack Rx any longer
249 hopefully this helps */
251 /* Really bad things can happen here... If new packet arrives
252 * and an irq arrives (tx or just due to occasionally unset
253 * mask), it will be acked by irq handler, but new thread
254 * is not scheduled. It is major hole in design.
255 * No idea how to fix this if "playing with fire" will fail
256 * tomorrow (night 011029). If it will not fail, we won
257 * finally: amount of IO did not increase at all. */
258 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
260 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
262 /* We use this simplistic scheme for IM. It's proven by
263 real life installations. We can have IM enabled
264 continuesly but this would cause unnecessary latency.
265 Unfortunely we can't use all the NET_RX_* feedback here.
266 This would turn on IM for devices that is not contributing
267 to backlog congestion with unnecessary latency.
269 We monitor the device RX-ring and have:
271 HW Interrupt Mitigation either ON or OFF.
273 ON: More then 1 pkt received (per intr.) OR we are dropping
274 OFF: Only 1 pkt received
276 Note. We only use min and max (0, 15) settings from mit_table */
279 if( tp->flags & HAS_INTR_MITIGATION) {
283 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
289 iowrite32(0, tp->base_addr + CSR11);
294 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
296 tulip_refill_rx(dev);
298 /* If RX ring is not full we are out of memory. */
299 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
302 /* Remove us from polling list and enable RX intr. */
304 netif_rx_complete(dev, napi);
305 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
307 /* The last op happens after poll completion. Which means the following:
308 * 1. it can race with disabling irqs in irq handler
309 * 2. it can race with dise/enabling irqs in other poll threads
310 * 3. if an irq raised after beginning loop, it will be immediately
313 * Summarizing: the logic results in some redundant irqs both
314 * due to races in masking and due to too late acking of already
315 * processed irqs. But it must not result in losing events.
321 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
322 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
323 tulip_refill_rx(dev);
325 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
330 oom: /* Executed with RX ints disabled */
332 /* Start timer, stop polling, but do not enable rx interrupts. */
333 mod_timer(&tp->oom_timer, jiffies+1);
335 /* Think: timer_pending() was an explicit signature of bug.
336 * Timer can be pending now but fired and completed
337 * before we did netif_rx_complete(). See? We would lose it. */
339 /* remove ourselves from the polling list */
340 netif_rx_complete(dev, napi);
345 #else /* CONFIG_TULIP_NAPI */
347 static int tulip_rx(struct net_device *dev)
349 struct tulip_private *tp = netdev_priv(dev);
350 int entry = tp->cur_rx % RX_RING_SIZE;
351 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
355 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
356 tp->rx_ring[entry].status);
357 /* If we own the next entry, it is a new packet. Send it up. */
358 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
359 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
362 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
363 dev->name, entry, status);
364 if (--rx_work_limit < 0)
366 if ((status & 0x38008300) != 0x0300) {
367 if ((status & 0x38000300) != 0x0300) {
368 /* Ingore earlier buffers. */
369 if ((status & 0xffff) != 0x7fff) {
371 printk(KERN_WARNING "%s: Oversized Ethernet frame "
372 "spanned multiple buffers, status %8.8x!\n",
374 tp->stats.rx_length_errors++;
376 } else if (status & RxDescFatalErr) {
377 /* There was a fatal error. */
379 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
381 tp->stats.rx_errors++; /* end of a packet.*/
382 if (status & 0x0890) tp->stats.rx_length_errors++;
383 if (status & 0x0004) tp->stats.rx_frame_errors++;
384 if (status & 0x0002) tp->stats.rx_crc_errors++;
385 if (status & 0x0001) tp->stats.rx_fifo_errors++;
388 /* Omit the four octet CRC from the length. */
389 short pkt_len = ((status >> 16) & 0x7ff) - 4;
392 #ifndef final_version
393 if (pkt_len > 1518) {
394 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
395 dev->name, pkt_len, pkt_len);
397 tp->stats.rx_length_errors++;
401 /* Check if the packet is long enough to accept without copying
402 to a minimally-sized skbuff. */
403 if (pkt_len < tulip_rx_copybreak
404 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
405 skb_reserve(skb, 2); /* 16 byte align the IP header */
406 pci_dma_sync_single_for_cpu(tp->pdev,
407 tp->rx_buffers[entry].mapping,
408 pkt_len, PCI_DMA_FROMDEVICE);
409 #if ! defined(__alpha__)
410 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
412 skb_put(skb, pkt_len);
414 memcpy(skb_put(skb, pkt_len),
415 tp->rx_buffers[entry].skb->data,
418 pci_dma_sync_single_for_device(tp->pdev,
419 tp->rx_buffers[entry].mapping,
420 pkt_len, PCI_DMA_FROMDEVICE);
421 } else { /* Pass up the skb already on the Rx ring. */
422 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
425 #ifndef final_version
426 if (tp->rx_buffers[entry].mapping !=
427 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
428 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
429 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
431 le32_to_cpu(tp->rx_ring[entry].buffer1),
432 (long long)tp->rx_buffers[entry].mapping,
437 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
438 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
440 tp->rx_buffers[entry].skb = NULL;
441 tp->rx_buffers[entry].mapping = 0;
443 skb->protocol = eth_type_trans(skb, dev);
447 dev->last_rx = jiffies;
448 tp->stats.rx_packets++;
449 tp->stats.rx_bytes += pkt_len;
452 entry = (++tp->cur_rx) % RX_RING_SIZE;
456 #endif /* CONFIG_TULIP_NAPI */
458 static inline unsigned int phy_interrupt (struct net_device *dev)
461 struct tulip_private *tp = netdev_priv(dev);
462 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
464 if (csr12 != tp->csr12_shadow) {
466 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
467 tp->csr12_shadow = csr12;
468 /* do link change stuff */
469 spin_lock(&tp->lock);
470 tulip_check_duplex(dev);
471 spin_unlock(&tp->lock);
472 /* clear irq ack bit */
473 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
482 /* The interrupt handler does all of the Rx thread work and cleans up
483 after the Tx thread. */
484 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
486 struct net_device *dev = (struct net_device *)dev_instance;
487 struct tulip_private *tp = netdev_priv(dev);
488 void __iomem *ioaddr = tp->base_addr;
494 int maxrx = RX_RING_SIZE;
495 int maxtx = TX_RING_SIZE;
496 int maxoi = TX_RING_SIZE;
497 #ifdef CONFIG_TULIP_NAPI
502 unsigned int work_count = tulip_max_interrupt_work;
503 unsigned int handled = 0;
505 /* Let's see whether the interrupt really is for us */
506 csr5 = ioread32(ioaddr + CSR5);
508 if (tp->flags & HAS_PHY_IRQ)
509 handled = phy_interrupt (dev);
511 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
512 return IRQ_RETVAL(handled);
518 #ifdef CONFIG_TULIP_NAPI
520 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
522 /* Mask RX intrs and add the device to poll list. */
523 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
524 netif_rx_schedule(dev, &tp->napi);
526 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
530 /* Acknowledge the interrupt sources we handle here ASAP
531 the poll function does Rx and RxNoBuf acking */
533 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
536 /* Acknowledge all of the current interrupt sources ASAP. */
537 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
540 if (csr5 & (RxIntr | RxNoBuf)) {
542 tulip_refill_rx(dev);
545 #endif /* CONFIG_TULIP_NAPI */
548 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
549 dev->name, csr5, ioread32(ioaddr + CSR5));
552 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
553 unsigned int dirty_tx;
555 spin_lock(&tp->lock);
557 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
559 int entry = dirty_tx % TX_RING_SIZE;
560 int status = le32_to_cpu(tp->tx_ring[entry].status);
563 break; /* It still has not been Txed */
565 /* Check for Rx filter setup frames. */
566 if (tp->tx_buffers[entry].skb == NULL) {
567 /* test because dummy frames not mapped */
568 if (tp->tx_buffers[entry].mapping)
569 pci_unmap_single(tp->pdev,
570 tp->tx_buffers[entry].mapping,
571 sizeof(tp->setup_frame),
576 if (status & 0x8000) {
577 /* There was an major error, log it. */
578 #ifndef final_version
580 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
583 tp->stats.tx_errors++;
584 if (status & 0x4104) tp->stats.tx_aborted_errors++;
585 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
586 if (status & 0x0200) tp->stats.tx_window_errors++;
587 if (status & 0x0002) tp->stats.tx_fifo_errors++;
588 if ((status & 0x0080) && tp->full_duplex == 0)
589 tp->stats.tx_heartbeat_errors++;
591 tp->stats.tx_bytes +=
592 tp->tx_buffers[entry].skb->len;
593 tp->stats.collisions += (status >> 3) & 15;
594 tp->stats.tx_packets++;
597 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
598 tp->tx_buffers[entry].skb->len,
601 /* Free the original skb. */
602 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
603 tp->tx_buffers[entry].skb = NULL;
604 tp->tx_buffers[entry].mapping = 0;
608 #ifndef final_version
609 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
610 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
611 dev->name, dirty_tx, tp->cur_tx);
612 dirty_tx += TX_RING_SIZE;
616 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
617 netif_wake_queue(dev);
619 tp->dirty_tx = dirty_tx;
622 printk(KERN_WARNING "%s: The transmitter stopped."
623 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
624 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
625 tulip_restart_rxtx(tp);
627 spin_unlock(&tp->lock);
631 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
632 if (csr5 == 0xffffffff)
634 if (csr5 & TxJabber) tp->stats.tx_errors++;
635 if (csr5 & TxFIFOUnderflow) {
636 if ((tp->csr6 & 0xC000) != 0xC000)
637 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
639 tp->csr6 |= 0x00200000; /* Store-n-forward. */
640 /* Restart the transmit process. */
641 tulip_restart_rxtx(tp);
642 iowrite32(0, ioaddr + CSR1);
644 if (csr5 & (RxDied | RxNoBuf)) {
645 if (tp->flags & COMET_MAC_ADDR) {
646 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
647 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
650 if (csr5 & RxDied) { /* Missed a Rx frame. */
651 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
652 tp->stats.rx_errors++;
653 tulip_start_rxtx(tp);
656 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
657 * call is ever done under the spinlock
659 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
661 (tp->link_change)(dev, csr5);
663 if (csr5 & SystemError) {
664 int error = (csr5 >> 23) & 7;
665 /* oops, we hit a PCI error. The code produced corresponds
670 * Note that on parity error, we should do a software reset
671 * of the chip to get it back into a sane state (according
672 * to the 21142/3 docs that is).
675 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
676 dev->name, tp->nir, error);
678 /* Clear all error sources, included undocumented ones! */
679 iowrite32(0x0800f7ba, ioaddr + CSR5);
682 if (csr5 & TimerInt) {
685 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
687 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
691 if (tx > maxtx || rx > maxrx || oi > maxoi) {
693 printk(KERN_WARNING "%s: Too much work during an interrupt, "
694 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
696 /* Acknowledge all interrupt sources. */
697 iowrite32(0x8001ffff, ioaddr + CSR5);
698 if (tp->flags & HAS_INTR_MITIGATION) {
699 /* Josip Loncaric at ICASE did extensive experimentation
700 to develop a good interrupt mitigation setting.*/
701 iowrite32(0x8b240000, ioaddr + CSR11);
702 } else if (tp->chip_id == LC82C168) {
703 /* the LC82C168 doesn't have a hw timer.*/
704 iowrite32(0x00, ioaddr + CSR7);
705 mod_timer(&tp->timer, RUN_AT(HZ/50));
707 /* Mask all interrupting sources, set timer to
709 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
710 iowrite32(0x0012, ioaddr + CSR11);
719 csr5 = ioread32(ioaddr + CSR5);
721 #ifdef CONFIG_TULIP_NAPI
724 } while ((csr5 & (TxNoBuf |
733 SystemError )) != 0);
735 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
737 tulip_refill_rx(dev);
739 /* check if the card is in suspend mode */
740 entry = tp->dirty_rx % RX_RING_SIZE;
741 if (tp->rx_buffers[entry].skb == NULL) {
743 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
744 if (tp->chip_id == LC82C168) {
745 iowrite32(0x00, ioaddr + CSR7);
746 mod_timer(&tp->timer, RUN_AT(HZ/50));
748 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
750 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
751 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
753 iowrite32(TimerInt, ioaddr + CSR5);
754 iowrite32(12, ioaddr + CSR11);
759 #endif /* CONFIG_TULIP_NAPI */
761 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
762 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
766 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
767 dev->name, ioread32(ioaddr + CSR5));