2 drivers/net/tulip/interrupt.c
4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
17 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
21 int tulip_rx_copybreak;
22 unsigned int tulip_max_interrupt_work;
24 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
26 #define MIT_TABLE 15 /* We use 0 or max */
28 static unsigned int mit_table[MIT_SIZE+1] =
30 /* CRS11 21143 hardware Mitigation Control Interrupt
31 We use only RX mitigation we other techniques for
34 31 Cycle Size (timer control)
35 30:27 TX timer in 16 * Cycle size
36 26:24 TX No pkts before Int.
37 23:20 RX timer in Cycle size
38 19:17 RX No pkts before Int.
39 16 Continues Mode (CM)
42 0x0, /* IM disabled */
43 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
57 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
58 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
63 int tulip_refill_rx(struct net_device *dev)
65 struct tulip_private *tp = netdev_priv(dev);
69 /* Refill the Rx ring buffers. */
70 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71 entry = tp->dirty_rx % RX_RING_SIZE;
72 if (tp->rx_buffers[entry].skb == NULL) {
76 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
80 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
82 tp->rx_buffers[entry].mapping = mapping;
84 skb->dev = dev; /* Mark as being used by this device. */
85 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
88 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90 if(tp->chip_id == LC82C168) {
91 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92 /* Rx stopped due to out of buffers,
95 iowrite32(0x01, tp->base_addr + CSR2);
101 #ifdef CONFIG_TULIP_NAPI
103 void oom_timer(unsigned long data)
105 struct net_device *dev = (struct net_device *)data;
106 netif_rx_schedule(dev);
109 int tulip_poll(struct net_device *dev, int *budget)
111 struct tulip_private *tp = netdev_priv(dev);
112 int entry = tp->cur_rx % RX_RING_SIZE;
113 int rx_work_limit = *budget;
116 if (!netif_running(dev))
119 if (rx_work_limit > dev->quota)
120 rx_work_limit = dev->quota;
122 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
124 /* that one buffer is needed for mit activation; or might be a
125 bug in the ring buffer code; check later -- JHS*/
127 if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
131 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
132 tp->rx_ring[entry].status);
135 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
136 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
139 /* Acknowledge current RX interrupt sources. */
140 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
143 /* If we own the next entry, it is a new packet. Send it up. */
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
152 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
153 dev->name, entry, status);
154 if (--rx_work_limit < 0)
157 if ((status & 0x38008300) != 0x0300) {
158 if ((status & 0x38000300) != 0x0300) {
159 /* Ingore earlier buffers. */
160 if ((status & 0xffff) != 0x7fff) {
162 printk(KERN_WARNING "%s: Oversized Ethernet frame "
163 "spanned multiple buffers, status %8.8x!\n",
165 tp->stats.rx_length_errors++;
167 } else if (status & RxDescFatalErr) {
168 /* There was a fatal error. */
170 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
172 tp->stats.rx_errors++; /* end of a packet.*/
173 if (status & 0x0890) tp->stats.rx_length_errors++;
174 if (status & 0x0004) tp->stats.rx_frame_errors++;
175 if (status & 0x0002) tp->stats.rx_crc_errors++;
176 if (status & 0x0001) tp->stats.rx_fifo_errors++;
179 /* Omit the four octet CRC from the length. */
180 short pkt_len = ((status >> 16) & 0x7ff) - 4;
183 #ifndef final_version
184 if (pkt_len > 1518) {
185 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
186 dev->name, pkt_len, pkt_len);
188 tp->stats.rx_length_errors++;
191 /* Check if the packet is long enough to accept without copying
192 to a minimally-sized skbuff. */
193 if (pkt_len < tulip_rx_copybreak
194 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
195 skb_reserve(skb, 2); /* 16 byte align the IP header */
196 pci_dma_sync_single_for_cpu(tp->pdev,
197 tp->rx_buffers[entry].mapping,
198 pkt_len, PCI_DMA_FROMDEVICE);
199 #if ! defined(__alpha__)
200 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
202 skb_put(skb, pkt_len);
204 memcpy(skb_put(skb, pkt_len),
205 tp->rx_buffers[entry].skb->data,
208 pci_dma_sync_single_for_device(tp->pdev,
209 tp->rx_buffers[entry].mapping,
210 pkt_len, PCI_DMA_FROMDEVICE);
211 } else { /* Pass up the skb already on the Rx ring. */
212 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
215 #ifndef final_version
216 if (tp->rx_buffers[entry].mapping !=
217 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
218 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
219 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
221 le32_to_cpu(tp->rx_ring[entry].buffer1),
222 (unsigned long long)tp->rx_buffers[entry].mapping,
227 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
228 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
230 tp->rx_buffers[entry].skb = NULL;
231 tp->rx_buffers[entry].mapping = 0;
233 skb->protocol = eth_type_trans(skb, dev);
235 netif_receive_skb(skb);
237 dev->last_rx = jiffies;
238 tp->stats.rx_packets++;
239 tp->stats.rx_bytes += pkt_len;
243 entry = (++tp->cur_rx) % RX_RING_SIZE;
244 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
245 tulip_refill_rx(dev);
249 /* New ack strategy... irq does not ack Rx any longer
250 hopefully this helps */
252 /* Really bad things can happen here... If new packet arrives
253 * and an irq arrives (tx or just due to occasionally unset
254 * mask), it will be acked by irq handler, but new thread
255 * is not scheduled. It is major hole in design.
256 * No idea how to fix this if "playing with fire" will fail
257 * tomorrow (night 011029). If it will not fail, we won
258 * finally: amount of IO did not increase at all. */
259 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
263 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
265 /* We use this simplistic scheme for IM. It's proven by
266 real life installations. We can have IM enabled
267 continuesly but this would cause unnecessary latency.
268 Unfortunely we can't use all the NET_RX_* feedback here.
269 This would turn on IM for devices that is not contributing
270 to backlog congestion with unnecessary latency.
272 We monitor the device RX-ring and have:
274 HW Interrupt Mitigation either ON or OFF.
276 ON: More then 1 pkt received (per intr.) OR we are dropping
277 OFF: Only 1 pkt received
279 Note. We only use min and max (0, 15) settings from mit_table */
282 if( tp->flags & HAS_INTR_MITIGATION) {
286 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
292 iowrite32(0, tp->base_addr + CSR11);
297 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
299 dev->quota -= received;
302 tulip_refill_rx(dev);
304 /* If RX ring is not full we are out of memory. */
305 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
307 /* Remove us from polling list and enable RX intr. */
309 netif_rx_complete(dev);
310 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
312 /* The last op happens after poll completion. Which means the following:
313 * 1. it can race with disabling irqs in irq handler
314 * 2. it can race with dise/enabling irqs in other poll threads
315 * 3. if an irq raised after beginning loop, it will be immediately
318 * Summarizing: the logic results in some redundant irqs both
319 * due to races in masking and due to too late acking of already
320 * processed irqs. But it must not result in losing events.
328 received = dev->quota; /* Not to happen */
330 dev->quota -= received;
333 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
334 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
335 tulip_refill_rx(dev);
337 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
342 oom: /* Executed with RX ints disabled */
345 /* Start timer, stop polling, but do not enable rx interrupts. */
346 mod_timer(&tp->oom_timer, jiffies+1);
348 /* Think: timer_pending() was an explicit signature of bug.
349 * Timer can be pending now but fired and completed
350 * before we did netif_rx_complete(). See? We would lose it. */
352 /* remove ourselves from the polling list */
353 netif_rx_complete(dev);
358 #else /* CONFIG_TULIP_NAPI */
360 static int tulip_rx(struct net_device *dev)
362 struct tulip_private *tp = netdev_priv(dev);
363 int entry = tp->cur_rx % RX_RING_SIZE;
364 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
368 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
369 tp->rx_ring[entry].status);
370 /* If we own the next entry, it is a new packet. Send it up. */
371 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
372 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
375 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
376 dev->name, entry, status);
377 if (--rx_work_limit < 0)
379 if ((status & 0x38008300) != 0x0300) {
380 if ((status & 0x38000300) != 0x0300) {
381 /* Ingore earlier buffers. */
382 if ((status & 0xffff) != 0x7fff) {
384 printk(KERN_WARNING "%s: Oversized Ethernet frame "
385 "spanned multiple buffers, status %8.8x!\n",
387 tp->stats.rx_length_errors++;
389 } else if (status & RxDescFatalErr) {
390 /* There was a fatal error. */
392 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
394 tp->stats.rx_errors++; /* end of a packet.*/
395 if (status & 0x0890) tp->stats.rx_length_errors++;
396 if (status & 0x0004) tp->stats.rx_frame_errors++;
397 if (status & 0x0002) tp->stats.rx_crc_errors++;
398 if (status & 0x0001) tp->stats.rx_fifo_errors++;
401 /* Omit the four octet CRC from the length. */
402 short pkt_len = ((status >> 16) & 0x7ff) - 4;
405 #ifndef final_version
406 if (pkt_len > 1518) {
407 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
408 dev->name, pkt_len, pkt_len);
410 tp->stats.rx_length_errors++;
414 /* Check if the packet is long enough to accept without copying
415 to a minimally-sized skbuff. */
416 if (pkt_len < tulip_rx_copybreak
417 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
418 skb_reserve(skb, 2); /* 16 byte align the IP header */
419 pci_dma_sync_single_for_cpu(tp->pdev,
420 tp->rx_buffers[entry].mapping,
421 pkt_len, PCI_DMA_FROMDEVICE);
422 #if ! defined(__alpha__)
423 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
425 skb_put(skb, pkt_len);
427 memcpy(skb_put(skb, pkt_len),
428 tp->rx_buffers[entry].skb->data,
431 pci_dma_sync_single_for_device(tp->pdev,
432 tp->rx_buffers[entry].mapping,
433 pkt_len, PCI_DMA_FROMDEVICE);
434 } else { /* Pass up the skb already on the Rx ring. */
435 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
438 #ifndef final_version
439 if (tp->rx_buffers[entry].mapping !=
440 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
441 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
442 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
444 le32_to_cpu(tp->rx_ring[entry].buffer1),
445 (long long)tp->rx_buffers[entry].mapping,
450 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
451 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
453 tp->rx_buffers[entry].skb = NULL;
454 tp->rx_buffers[entry].mapping = 0;
456 skb->protocol = eth_type_trans(skb, dev);
460 dev->last_rx = jiffies;
461 tp->stats.rx_packets++;
462 tp->stats.rx_bytes += pkt_len;
465 entry = (++tp->cur_rx) % RX_RING_SIZE;
469 #endif /* CONFIG_TULIP_NAPI */
471 static inline unsigned int phy_interrupt (struct net_device *dev)
474 struct tulip_private *tp = netdev_priv(dev);
475 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
477 if (csr12 != tp->csr12_shadow) {
479 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
480 tp->csr12_shadow = csr12;
481 /* do link change stuff */
482 spin_lock(&tp->lock);
483 tulip_check_duplex(dev);
484 spin_unlock(&tp->lock);
485 /* clear irq ack bit */
486 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
495 /* The interrupt handler does all of the Rx thread work and cleans up
496 after the Tx thread. */
497 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
499 struct net_device *dev = (struct net_device *)dev_instance;
500 struct tulip_private *tp = netdev_priv(dev);
501 void __iomem *ioaddr = tp->base_addr;
507 int maxrx = RX_RING_SIZE;
508 int maxtx = TX_RING_SIZE;
509 int maxoi = TX_RING_SIZE;
510 #ifdef CONFIG_TULIP_NAPI
515 unsigned int work_count = tulip_max_interrupt_work;
516 unsigned int handled = 0;
518 /* Let's see whether the interrupt really is for us */
519 csr5 = ioread32(ioaddr + CSR5);
521 if (tp->flags & HAS_PHY_IRQ)
522 handled = phy_interrupt (dev);
524 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
525 return IRQ_RETVAL(handled);
531 #ifdef CONFIG_TULIP_NAPI
533 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
535 /* Mask RX intrs and add the device to poll list. */
536 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
537 netif_rx_schedule(dev);
539 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
543 /* Acknowledge the interrupt sources we handle here ASAP
544 the poll function does Rx and RxNoBuf acking */
546 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
549 /* Acknowledge all of the current interrupt sources ASAP. */
550 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
553 if (csr5 & (RxIntr | RxNoBuf)) {
555 tulip_refill_rx(dev);
558 #endif /* CONFIG_TULIP_NAPI */
561 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
562 dev->name, csr5, ioread32(ioaddr + CSR5));
565 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
566 unsigned int dirty_tx;
568 spin_lock(&tp->lock);
570 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
572 int entry = dirty_tx % TX_RING_SIZE;
573 int status = le32_to_cpu(tp->tx_ring[entry].status);
576 break; /* It still has not been Txed */
578 /* Check for Rx filter setup frames. */
579 if (tp->tx_buffers[entry].skb == NULL) {
580 /* test because dummy frames not mapped */
581 if (tp->tx_buffers[entry].mapping)
582 pci_unmap_single(tp->pdev,
583 tp->tx_buffers[entry].mapping,
584 sizeof(tp->setup_frame),
589 if (status & 0x8000) {
590 /* There was an major error, log it. */
591 #ifndef final_version
593 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
596 tp->stats.tx_errors++;
597 if (status & 0x4104) tp->stats.tx_aborted_errors++;
598 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
599 if (status & 0x0200) tp->stats.tx_window_errors++;
600 if (status & 0x0002) tp->stats.tx_fifo_errors++;
601 if ((status & 0x0080) && tp->full_duplex == 0)
602 tp->stats.tx_heartbeat_errors++;
604 tp->stats.tx_bytes +=
605 tp->tx_buffers[entry].skb->len;
606 tp->stats.collisions += (status >> 3) & 15;
607 tp->stats.tx_packets++;
610 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
611 tp->tx_buffers[entry].skb->len,
614 /* Free the original skb. */
615 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
616 tp->tx_buffers[entry].skb = NULL;
617 tp->tx_buffers[entry].mapping = 0;
621 #ifndef final_version
622 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
623 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
624 dev->name, dirty_tx, tp->cur_tx);
625 dirty_tx += TX_RING_SIZE;
629 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
630 netif_wake_queue(dev);
632 tp->dirty_tx = dirty_tx;
635 printk(KERN_WARNING "%s: The transmitter stopped."
636 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
637 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
638 tulip_restart_rxtx(tp);
640 spin_unlock(&tp->lock);
644 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
645 if (csr5 == 0xffffffff)
647 if (csr5 & TxJabber) tp->stats.tx_errors++;
648 if (csr5 & TxFIFOUnderflow) {
649 if ((tp->csr6 & 0xC000) != 0xC000)
650 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
652 tp->csr6 |= 0x00200000; /* Store-n-forward. */
653 /* Restart the transmit process. */
654 tulip_restart_rxtx(tp);
655 iowrite32(0, ioaddr + CSR1);
657 if (csr5 & (RxDied | RxNoBuf)) {
658 if (tp->flags & COMET_MAC_ADDR) {
659 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
660 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
663 if (csr5 & RxDied) { /* Missed a Rx frame. */
664 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
665 tp->stats.rx_errors++;
666 tulip_start_rxtx(tp);
669 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
670 * call is ever done under the spinlock
672 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
674 (tp->link_change)(dev, csr5);
676 if (csr5 & SystemError) {
677 int error = (csr5 >> 23) & 7;
678 /* oops, we hit a PCI error. The code produced corresponds
683 * Note that on parity error, we should do a software reset
684 * of the chip to get it back into a sane state (according
685 * to the 21142/3 docs that is).
688 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
689 dev->name, tp->nir, error);
691 /* Clear all error sources, included undocumented ones! */
692 iowrite32(0x0800f7ba, ioaddr + CSR5);
695 if (csr5 & TimerInt) {
698 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
700 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
704 if (tx > maxtx || rx > maxrx || oi > maxoi) {
706 printk(KERN_WARNING "%s: Too much work during an interrupt, "
707 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
709 /* Acknowledge all interrupt sources. */
710 iowrite32(0x8001ffff, ioaddr + CSR5);
711 if (tp->flags & HAS_INTR_MITIGATION) {
712 /* Josip Loncaric at ICASE did extensive experimentation
713 to develop a good interrupt mitigation setting.*/
714 iowrite32(0x8b240000, ioaddr + CSR11);
715 } else if (tp->chip_id == LC82C168) {
716 /* the LC82C168 doesn't have a hw timer.*/
717 iowrite32(0x00, ioaddr + CSR7);
718 mod_timer(&tp->timer, RUN_AT(HZ/50));
720 /* Mask all interrupting sources, set timer to
722 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
723 iowrite32(0x0012, ioaddr + CSR11);
732 csr5 = ioread32(ioaddr + CSR5);
734 #ifdef CONFIG_TULIP_NAPI
737 } while ((csr5 & (TxNoBuf |
746 SystemError )) != 0);
748 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
750 tulip_refill_rx(dev);
752 /* check if the card is in suspend mode */
753 entry = tp->dirty_rx % RX_RING_SIZE;
754 if (tp->rx_buffers[entry].skb == NULL) {
756 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
757 if (tp->chip_id == LC82C168) {
758 iowrite32(0x00, ioaddr + CSR7);
759 mod_timer(&tp->timer, RUN_AT(HZ/50));
761 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
763 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
764 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
766 iowrite32(TimerInt, ioaddr + CSR5);
767 iowrite32(12, ioaddr + CSR11);
772 #endif /* CONFIG_TULIP_NAPI */
774 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
775 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
779 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
780 dev->name, ioread32(ioaddr + CSR5));