2         drivers/net/tulip/interrupt.c
 
   4         Copyright 2000,2001  The Linux Kernel Team
 
   5         Written/copyright 1994-2001 by Donald Becker.
 
   7         This software may be used and distributed according to the terms
 
   8         of the GNU General Public License, incorporated herein by reference.
 
  10         Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
 
  11         for more information on this driver.
 
  12         Please submit bugs to http://bugzilla.kernel.org/ .
 
  16 #include <linux/pci.h>
 
  18 #include <linux/etherdevice.h>
 
  20 int tulip_rx_copybreak;
 
  21 unsigned int tulip_max_interrupt_work;
 
  23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 
  25 #define MIT_TABLE 15 /* We use 0 or max */
 
  27 static unsigned int mit_table[MIT_SIZE+1] =
 
  29         /*  CRS11 21143 hardware Mitigation Control Interrupt
 
  30             We use only RX mitigation we other techniques for
 
  33            31    Cycle Size (timer control)
 
  34            30:27 TX timer in 16 * Cycle size
 
  35            26:24 TX No pkts before Int.
 
  36            23:20 RX timer in Cycle size
 
  37            19:17 RX No pkts before Int.
 
  38            16       Continues Mode (CM)
 
  41         0x0,             /* IM disabled */
 
  42         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
 
  56 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
 
  57         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
 
  62 int tulip_refill_rx(struct net_device *dev)
 
  64         struct tulip_private *tp = netdev_priv(dev);
 
  68         /* Refill the Rx ring buffers. */
 
  69         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
 
  70                 entry = tp->dirty_rx % RX_RING_SIZE;
 
  71                 if (tp->rx_buffers[entry].skb == NULL) {
 
  75                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
 
  79                         mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
 
  81                         tp->rx_buffers[entry].mapping = mapping;
 
  83                         skb->dev = dev;                 /* Mark as being used by this device. */
 
  84                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
 
  87                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
 
  89         if(tp->chip_id == LC82C168) {
 
  90                 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
 
  91                         /* Rx stopped due to out of buffers,
 
  94                         iowrite32(0x01, tp->base_addr + CSR2);
 
 100 #ifdef CONFIG_TULIP_NAPI
 
 102 void oom_timer(unsigned long data)
 
 104         struct net_device *dev = (struct net_device *)data;
 
 105         struct tulip_private *tp = netdev_priv(dev);
 
 106         netif_rx_schedule(&tp->napi);
 
 109 int tulip_poll(struct napi_struct *napi, int budget)
 
 111         struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
 
 112         struct net_device *dev = tp->dev;
 
 113         int entry = tp->cur_rx % RX_RING_SIZE;
 
 115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 
 119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 
 121 /* that one buffer is needed for mit activation; or might be a
 
 122    bug in the ring buffer code; check later -- JHS*/
 
 124         if (budget >=RX_RING_SIZE) budget--;
 
 128                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
 
 129                            tp->rx_ring[entry].status);
 
 132                 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
 
 133                         printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
 
 136                /* Acknowledge current RX interrupt sources. */
 
 137                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
 
 140                /* If we own the next entry, it is a new packet. Send it up. */
 
 141                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
 
 142                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
 
 144                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
 
 148                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
 
 149                                       dev->name, entry, status);
 
 151                        if (++work_done >= budget)
 
 154                        if ((status & 0x38008300) != 0x0300) {
 
 155                                if ((status & 0x38000300) != 0x0300) {
 
 156                                 /* Ingore earlier buffers. */
 
 157                                        if ((status & 0xffff) != 0x7fff) {
 
 159                                                        printk(KERN_WARNING "%s: Oversized Ethernet frame "
 
 160                                                               "spanned multiple buffers, status %8.8x!\n",
 
 162                                                tp->stats.rx_length_errors++;
 
 164                                } else if (status & RxDescFatalErr) {
 
 165                                 /* There was a fatal error. */
 
 167                                                printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
 
 169                                        tp->stats.rx_errors++; /* end of a packet.*/
 
 170                                        if (status & 0x0890) tp->stats.rx_length_errors++;
 
 171                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
 
 172                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
 
 173                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
 
 176                                /* Omit the four octet CRC from the length. */
 
 177                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
 
 180 #ifndef final_version
 
 181                                if (pkt_len > 1518) {
 
 182                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
 
 183                                               dev->name, pkt_len, pkt_len);
 
 185                                        tp->stats.rx_length_errors++;
 
 188                                /* Check if the packet is long enough to accept without copying
 
 189                                   to a minimally-sized skbuff. */
 
 190                                if (pkt_len < tulip_rx_copybreak
 
 191                                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 
 192                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
 
 193                                        pci_dma_sync_single_for_cpu(tp->pdev,
 
 194                                                                    tp->rx_buffers[entry].mapping,
 
 195                                                                    pkt_len, PCI_DMA_FROMDEVICE);
 
 196 #if ! defined(__alpha__)
 
 197                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
 
 199                                        skb_put(skb, pkt_len);
 
 201                                        memcpy(skb_put(skb, pkt_len),
 
 202                                               tp->rx_buffers[entry].skb->data,
 
 205                                        pci_dma_sync_single_for_device(tp->pdev,
 
 206                                                                       tp->rx_buffers[entry].mapping,
 
 207                                                                       pkt_len, PCI_DMA_FROMDEVICE);
 
 208                                } else {        /* Pass up the skb already on the Rx ring. */
 
 209                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
 
 212 #ifndef final_version
 
 213                                        if (tp->rx_buffers[entry].mapping !=
 
 214                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
 
 215                                                printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
 
 216                                                       "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
 
 218                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
 
 219                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
 
 224                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
 
 225                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 
 227                                        tp->rx_buffers[entry].skb = NULL;
 
 228                                        tp->rx_buffers[entry].mapping = 0;
 
 230                                skb->protocol = eth_type_trans(skb, dev);
 
 232                                netif_receive_skb(skb);
 
 234                                tp->stats.rx_packets++;
 
 235                                tp->stats.rx_bytes += pkt_len;
 
 237 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 
 241                        entry = (++tp->cur_rx) % RX_RING_SIZE;
 
 242                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
 
 243                                tulip_refill_rx(dev);
 
 247                /* New ack strategy... irq does not ack Rx any longer
 
 248                   hopefully this helps */
 
 250                /* Really bad things can happen here... If new packet arrives
 
 251                 * and an irq arrives (tx or just due to occasionally unset
 
 252                 * mask), it will be acked by irq handler, but new thread
 
 253                 * is not scheduled. It is major hole in design.
 
 254                 * No idea how to fix this if "playing with fire" will fail
 
 255                 * tomorrow (night 011029). If it will not fail, we won
 
 256                 * finally: amount of IO did not increase at all. */
 
 257        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
 
 259  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 
 261           /* We use this simplistic scheme for IM. It's proven by
 
 262              real life installations. We can have IM enabled
 
 263             continuesly but this would cause unnecessary latency.
 
 264             Unfortunely we can't use all the NET_RX_* feedback here.
 
 265             This would turn on IM for devices that is not contributing
 
 266             to backlog congestion with unnecessary latency.
 
 268              We monitor the device RX-ring and have:
 
 270              HW Interrupt Mitigation either ON or OFF.
 
 272             ON:  More then 1 pkt received (per intr.) OR we are dropping
 
 273              OFF: Only 1 pkt received
 
 275              Note. We only use min and max (0, 15) settings from mit_table */
 
 278           if( tp->flags &  HAS_INTR_MITIGATION) {
 
 282                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
 
 288                                  iowrite32(0, tp->base_addr + CSR11);
 
 293 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
 
 295          tulip_refill_rx(dev);
 
 297          /* If RX ring is not full we are out of memory. */
 
 298          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
 
 301          /* Remove us from polling list and enable RX intr. */
 
 303          netif_rx_complete(napi);
 
 304          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 
 306          /* The last op happens after poll completion. Which means the following:
 
 307           * 1. it can race with disabling irqs in irq handler
 
 308           * 2. it can race with dise/enabling irqs in other poll threads
 
 309           * 3. if an irq raised after beginning loop, it will be immediately
 
 312           * Summarizing: the logic results in some redundant irqs both
 
 313           * due to races in masking and due to too late acking of already
 
 314           * processed irqs. But it must not result in losing events.
 
 320          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
 
 321              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
 
 322                  tulip_refill_rx(dev);
 
 324          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
 
 329  oom:    /* Executed with RX ints disabled */
 
 331          /* Start timer, stop polling, but do not enable rx interrupts. */
 
 332          mod_timer(&tp->oom_timer, jiffies+1);
 
 334          /* Think: timer_pending() was an explicit signature of bug.
 
 335           * Timer can be pending now but fired and completed
 
 336           * before we did netif_rx_complete(). See? We would lose it. */
 
 338          /* remove ourselves from the polling list */
 
 339          netif_rx_complete(napi);
 
 344 #else /* CONFIG_TULIP_NAPI */
 
 346 static int tulip_rx(struct net_device *dev)
 
 348         struct tulip_private *tp = netdev_priv(dev);
 
 349         int entry = tp->cur_rx % RX_RING_SIZE;
 
 350         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
 
 354                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
 
 355                            tp->rx_ring[entry].status);
 
 356         /* If we own the next entry, it is a new packet. Send it up. */
 
 357         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
 
 358                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
 
 361                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
 
 362                                    dev->name, entry, status);
 
 363                 if (--rx_work_limit < 0)
 
 365                 if ((status & 0x38008300) != 0x0300) {
 
 366                         if ((status & 0x38000300) != 0x0300) {
 
 367                                 /* Ingore earlier buffers. */
 
 368                                 if ((status & 0xffff) != 0x7fff) {
 
 370                                                 printk(KERN_WARNING "%s: Oversized Ethernet frame "
 
 371                                                            "spanned multiple buffers, status %8.8x!\n",
 
 373                                         tp->stats.rx_length_errors++;
 
 375                         } else if (status & RxDescFatalErr) {
 
 376                                 /* There was a fatal error. */
 
 378                                         printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
 
 380                                 tp->stats.rx_errors++; /* end of a packet.*/
 
 381                                 if (status & 0x0890) tp->stats.rx_length_errors++;
 
 382                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
 
 383                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
 
 384                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
 
 387                         /* Omit the four octet CRC from the length. */
 
 388                         short pkt_len = ((status >> 16) & 0x7ff) - 4;
 
 391 #ifndef final_version
 
 392                         if (pkt_len > 1518) {
 
 393                                 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
 
 394                                            dev->name, pkt_len, pkt_len);
 
 396                                 tp->stats.rx_length_errors++;
 
 400                         /* Check if the packet is long enough to accept without copying
 
 401                            to a minimally-sized skbuff. */
 
 402                         if (pkt_len < tulip_rx_copybreak
 
 403                                 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 
 404                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
 
 405                                 pci_dma_sync_single_for_cpu(tp->pdev,
 
 406                                                             tp->rx_buffers[entry].mapping,
 
 407                                                             pkt_len, PCI_DMA_FROMDEVICE);
 
 408 #if ! defined(__alpha__)
 
 409                                 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
 
 411                                 skb_put(skb, pkt_len);
 
 413                                 memcpy(skb_put(skb, pkt_len),
 
 414                                        tp->rx_buffers[entry].skb->data,
 
 417                                 pci_dma_sync_single_for_device(tp->pdev,
 
 418                                                                tp->rx_buffers[entry].mapping,
 
 419                                                                pkt_len, PCI_DMA_FROMDEVICE);
 
 420                         } else {        /* Pass up the skb already on the Rx ring. */
 
 421                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
 
 424 #ifndef final_version
 
 425                                 if (tp->rx_buffers[entry].mapping !=
 
 426                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
 
 427                                         printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
 
 428                                                "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
 
 430                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
 
 431                                                (long long)tp->rx_buffers[entry].mapping,
 
 436                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
 
 437                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 
 439                                 tp->rx_buffers[entry].skb = NULL;
 
 440                                 tp->rx_buffers[entry].mapping = 0;
 
 442                         skb->protocol = eth_type_trans(skb, dev);
 
 446                         tp->stats.rx_packets++;
 
 447                         tp->stats.rx_bytes += pkt_len;
 
 450                 entry = (++tp->cur_rx) % RX_RING_SIZE;
 
 454 #endif  /* CONFIG_TULIP_NAPI */
 
 456 static inline unsigned int phy_interrupt (struct net_device *dev)
 
 459         struct tulip_private *tp = netdev_priv(dev);
 
 460         int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
 
 462         if (csr12 != tp->csr12_shadow) {
 
 464                 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
 
 465                 tp->csr12_shadow = csr12;
 
 466                 /* do link change stuff */
 
 467                 spin_lock(&tp->lock);
 
 468                 tulip_check_duplex(dev);
 
 469                 spin_unlock(&tp->lock);
 
 470                 /* clear irq ack bit */
 
 471                 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
 
 480 /* The interrupt handler does all of the Rx thread work and cleans up
 
 481    after the Tx thread. */
 
 482 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
 
 484         struct net_device *dev = (struct net_device *)dev_instance;
 
 485         struct tulip_private *tp = netdev_priv(dev);
 
 486         void __iomem *ioaddr = tp->base_addr;
 
 492         int maxrx = RX_RING_SIZE;
 
 493         int maxtx = TX_RING_SIZE;
 
 494         int maxoi = TX_RING_SIZE;
 
 495 #ifdef CONFIG_TULIP_NAPI
 
 500         unsigned int work_count = tulip_max_interrupt_work;
 
 501         unsigned int handled = 0;
 
 503         /* Let's see whether the interrupt really is for us */
 
 504         csr5 = ioread32(ioaddr + CSR5);
 
 506         if (tp->flags & HAS_PHY_IRQ)
 
 507                 handled = phy_interrupt (dev);
 
 509         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
 
 510                 return IRQ_RETVAL(handled);
 
 516 #ifdef CONFIG_TULIP_NAPI
 
 518                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
 
 520                         /* Mask RX intrs and add the device to poll list. */
 
 521                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
 
 522                         netif_rx_schedule(&tp->napi);
 
 524                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
 
 528                /* Acknowledge the interrupt sources we handle here ASAP
 
 529                   the poll function does Rx and RxNoBuf acking */
 
 531                 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
 
 534                 /* Acknowledge all of the current interrupt sources ASAP. */
 
 535                 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
 
 538                 if (csr5 & (RxIntr | RxNoBuf)) {
 
 540                         tulip_refill_rx(dev);
 
 543 #endif /*  CONFIG_TULIP_NAPI */
 
 546                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
 
 547                                dev->name, csr5, ioread32(ioaddr + CSR5));
 
 550                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
 
 551                         unsigned int dirty_tx;
 
 553                         spin_lock(&tp->lock);
 
 555                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
 
 557                                 int entry = dirty_tx % TX_RING_SIZE;
 
 558                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
 
 561                                         break;                  /* It still has not been Txed */
 
 563                                 /* Check for Rx filter setup frames. */
 
 564                                 if (tp->tx_buffers[entry].skb == NULL) {
 
 565                                         /* test because dummy frames not mapped */
 
 566                                         if (tp->tx_buffers[entry].mapping)
 
 567                                                 pci_unmap_single(tp->pdev,
 
 568                                                          tp->tx_buffers[entry].mapping,
 
 569                                                          sizeof(tp->setup_frame),
 
 574                                 if (status & 0x8000) {
 
 575                                         /* There was an major error, log it. */
 
 576 #ifndef final_version
 
 578                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
 
 581                                         tp->stats.tx_errors++;
 
 582                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
 
 583                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
 
 584                                         if (status & 0x0200) tp->stats.tx_window_errors++;
 
 585                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
 
 586                                         if ((status & 0x0080) && tp->full_duplex == 0)
 
 587                                                 tp->stats.tx_heartbeat_errors++;
 
 589                                         tp->stats.tx_bytes +=
 
 590                                                 tp->tx_buffers[entry].skb->len;
 
 591                                         tp->stats.collisions += (status >> 3) & 15;
 
 592                                         tp->stats.tx_packets++;
 
 595                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
 
 596                                                  tp->tx_buffers[entry].skb->len,
 
 599                                 /* Free the original skb. */
 
 600                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
 
 601                                 tp->tx_buffers[entry].skb = NULL;
 
 602                                 tp->tx_buffers[entry].mapping = 0;
 
 606 #ifndef final_version
 
 607                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
 
 608                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
 
 609                                            dev->name, dirty_tx, tp->cur_tx);
 
 610                                 dirty_tx += TX_RING_SIZE;
 
 614                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
 
 615                                 netif_wake_queue(dev);
 
 617                         tp->dirty_tx = dirty_tx;
 
 620                                         printk(KERN_WARNING "%s: The transmitter stopped."
 
 621                                                    "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
 
 622                                                    dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
 
 623                                 tulip_restart_rxtx(tp);
 
 625                         spin_unlock(&tp->lock);
 
 629                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
 
 630                         if (csr5 == 0xffffffff)
 
 632                         if (csr5 & TxJabber) tp->stats.tx_errors++;
 
 633                         if (csr5 & TxFIFOUnderflow) {
 
 634                                 if ((tp->csr6 & 0xC000) != 0xC000)
 
 635                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
 
 637                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
 
 638                                 /* Restart the transmit process. */
 
 639                                 tulip_restart_rxtx(tp);
 
 640                                 iowrite32(0, ioaddr + CSR1);
 
 642                         if (csr5 & (RxDied | RxNoBuf)) {
 
 643                                 if (tp->flags & COMET_MAC_ADDR) {
 
 644                                         iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
 
 645                                         iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
 
 648                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
 
 649                                 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
 
 650                                 tp->stats.rx_errors++;
 
 651                                 tulip_start_rxtx(tp);
 
 654                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
 
 655                          * call is ever done under the spinlock
 
 657                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
 
 659                                         (tp->link_change)(dev, csr5);
 
 661                         if (csr5 & SystemError) {
 
 662                                 int error = (csr5 >> 23) & 7;
 
 663                                 /* oops, we hit a PCI error.  The code produced corresponds
 
 668                                  * Note that on parity error, we should do a software reset
 
 669                                  * of the chip to get it back into a sane state (according
 
 670                                  * to the 21142/3 docs that is).
 
 673                                 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
 
 674                                         dev->name, tp->nir, error);
 
 676                         /* Clear all error sources, included undocumented ones! */
 
 677                         iowrite32(0x0800f7ba, ioaddr + CSR5);
 
 680                 if (csr5 & TimerInt) {
 
 683                                 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
 
 685                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
 
 689                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
 
 691                                 printk(KERN_WARNING "%s: Too much work during an interrupt, "
 
 692                                            "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
 
 694                        /* Acknowledge all interrupt sources. */
 
 695                         iowrite32(0x8001ffff, ioaddr + CSR5);
 
 696                         if (tp->flags & HAS_INTR_MITIGATION) {
 
 697                      /* Josip Loncaric at ICASE did extensive experimentation
 
 698                         to develop a good interrupt mitigation setting.*/
 
 699                                 iowrite32(0x8b240000, ioaddr + CSR11);
 
 700                         } else if (tp->chip_id == LC82C168) {
 
 701                                 /* the LC82C168 doesn't have a hw timer.*/
 
 702                                 iowrite32(0x00, ioaddr + CSR7);
 
 703                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
 
 705                           /* Mask all interrupting sources, set timer to
 
 707                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
 
 708                                 iowrite32(0x0012, ioaddr + CSR11);
 
 717                 csr5 = ioread32(ioaddr + CSR5);
 
 719 #ifdef CONFIG_TULIP_NAPI
 
 722         } while ((csr5 & (TxNoBuf |
 
 731                           SystemError )) != 0);
 
 733         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
 
 735         tulip_refill_rx(dev);
 
 737         /* check if the card is in suspend mode */
 
 738         entry = tp->dirty_rx % RX_RING_SIZE;
 
 739         if (tp->rx_buffers[entry].skb == NULL) {
 
 741                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
 
 742                 if (tp->chip_id == LC82C168) {
 
 743                         iowrite32(0x00, ioaddr + CSR7);
 
 744                         mod_timer(&tp->timer, RUN_AT(HZ/50));
 
 746                         if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
 
 748                                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
 
 749                                 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
 
 751                                 iowrite32(TimerInt, ioaddr + CSR5);
 
 752                                 iowrite32(12, ioaddr + CSR11);
 
 757 #endif /* CONFIG_TULIP_NAPI */
 
 759         if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
 
 760                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
 
 764                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
 
 765                            dev->name, ioread32(ioaddr + CSR5));