Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_intr.c
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/pci.h>
35 #include <linux/delay.h>
36
37 #include "ipath_kernel.h"
38 #include "ipath_verbs.h"
39 #include "ipath_common.h"
40
41
42 /*
43  * Called when we might have an error that is specific to a particular
44  * PIO buffer, and may need to cancel that buffer, so it can be re-used.
45  */
46 void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
47 {
48         u32 piobcnt;
49         unsigned long sbuf[4];
50         /*
51          * it's possible that sendbuffererror could have bits set; might
52          * have already done this as a result of hardware error handling
53          */
54         piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
55         /* read these before writing errorclear */
56         sbuf[0] = ipath_read_kreg64(
57                 dd, dd->ipath_kregs->kr_sendbuffererror);
58         sbuf[1] = ipath_read_kreg64(
59                 dd, dd->ipath_kregs->kr_sendbuffererror + 1);
60         if (piobcnt > 128)
61                 sbuf[2] = ipath_read_kreg64(
62                         dd, dd->ipath_kregs->kr_sendbuffererror + 2);
63         if (piobcnt > 192)
64                 sbuf[3] = ipath_read_kreg64(
65                         dd, dd->ipath_kregs->kr_sendbuffererror + 3);
66         else
67                 sbuf[3] = 0;
68
69         if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
70                 int i;
71                 if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
72                         dd->ipath_lastcancel > jiffies) {
73                         __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
74                                           "SendbufErrs %lx %lx", sbuf[0],
75                                           sbuf[1]);
76                         if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
77                                 printk(" %lx %lx ", sbuf[2], sbuf[3]);
78                         printk("\n");
79                 }
80
81                 for (i = 0; i < piobcnt; i++)
82                         if (test_bit(i, sbuf))
83                                 ipath_disarm_piobufs(dd, i, 1);
84                 /* ignore armlaunch errs for a bit */
85                 dd->ipath_lastcancel = jiffies+3;
86         }
87 }
88
89
90 /* These are all rcv-related errors which we want to count for stats */
91 #define E_SUM_PKTERRS \
92         (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
93          INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
94          INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
95          INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
96          INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
97          INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
98
99 /* These are all send-related errors which we want to count for stats */
100 #define E_SUM_ERRS \
101         (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
102          INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
103          INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
104          INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
105          INFINIPATH_E_INVALIDADDR)
106
107 /*
108  * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
109  * errors not related to freeze and cancelling buffers.  Can't ignore
110  * armlaunch because could get more while still cleaning up, and need
111  * to cancel those as they happen.
112  */
113 #define E_SPKT_ERRS_IGNORE \
114          (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
115          INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
116          INFINIPATH_E_SPKTLEN)
117
118 /*
119  * these are errors that can occur when the link changes state while
120  * a packet is being sent or received.  This doesn't cover things
121  * like EBP or VCRC that can be the result of a sending having the
122  * link change state, so we receive a "known bad" packet.
123  */
124 #define E_SUM_LINK_PKTERRS \
125         (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
126          INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
127          INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
128          INFINIPATH_E_RUNEXPCHAR)
129
130 static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
131 {
132         u64 ignore_this_time = 0;
133
134         ipath_disarm_senderrbufs(dd);
135         if ((errs & E_SUM_LINK_PKTERRS) &&
136             !(dd->ipath_flags & IPATH_LINKACTIVE)) {
137                 /*
138                  * This can happen when SMA is trying to bring the link
139                  * up, but the IB link changes state at the "wrong" time.
140                  * The IB logic then complains that the packet isn't
141                  * valid.  We don't want to confuse people, so we just
142                  * don't print them, except at debug
143                  */
144                 ipath_dbg("Ignoring packet errors %llx, because link not "
145                           "ACTIVE\n", (unsigned long long) errs);
146                 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
147         }
148
149         return ignore_this_time;
150 }
151
152 /* generic hw error messages... */
153 #define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \
154         { \
155                 .mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a <<    \
156                           INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ),   \
157                 .msg = "TXE " #a " Memory Parity"            \
158         }
159 #define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \
160         { \
161                 .mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a <<    \
162                           INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ),   \
163                 .msg = "RXE " #a " Memory Parity"            \
164         }
165
166 static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {
167         INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),
168         INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),
169
170         INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),
171         INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),
172         INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),
173
174         INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),
175         INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),
176         INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),
177         INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),
178         INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),
179         INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),
180         INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),
181 };
182
183 /**
184  * ipath_format_hwmsg - format a single hwerror message
185  * @msg message buffer
186  * @msgl length of message buffer
187  * @hwmsg message to add to message buffer
188  */
189 static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
190 {
191         strlcat(msg, "[", msgl);
192         strlcat(msg, hwmsg, msgl);
193         strlcat(msg, "]", msgl);
194 }
195
196 /**
197  * ipath_format_hwerrors - format hardware error messages for display
198  * @hwerrs hardware errors bit vector
199  * @hwerrmsgs hardware error descriptions
200  * @nhwerrmsgs number of hwerrmsgs
201  * @msg message buffer
202  * @msgl message buffer length
203  */
204 void ipath_format_hwerrors(u64 hwerrs,
205                            const struct ipath_hwerror_msgs *hwerrmsgs,
206                            size_t nhwerrmsgs,
207                            char *msg, size_t msgl)
208 {
209         int i;
210         const int glen =
211             sizeof(ipath_generic_hwerror_msgs) /
212             sizeof(ipath_generic_hwerror_msgs[0]);
213
214         for (i=0; i<glen; i++) {
215                 if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
216                         ipath_format_hwmsg(msg, msgl,
217                                            ipath_generic_hwerror_msgs[i].msg);
218                 }
219         }
220
221         for (i=0; i<nhwerrmsgs; i++) {
222                 if (hwerrs & hwerrmsgs[i].mask) {
223                         ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
224                 }
225         }
226 }
227
228 /* return the strings for the most common link states */
229 static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
230 {
231         char *ret;
232         u32 state;
233
234         state = ipath_ib_state(dd, ibcs);
235         if (state == dd->ib_init)
236                 ret = "Init";
237         else if (state == dd->ib_arm)
238                 ret = "Arm";
239         else if (state == dd->ib_active)
240                 ret = "Active";
241         else
242                 ret = "Down";
243         return ret;
244 }
245
246 void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
247 {
248         struct ib_event event;
249
250         event.device = &dd->verbs_dev->ibdev;
251         event.element.port_num = 1;
252         event.event = ev;
253         ib_dispatch_event(&event);
254 }
255
256 static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
257                                      ipath_err_t errs)
258 {
259         u32 ltstate, lstate, ibstate, lastlstate;
260         u32 init = dd->ib_init;
261         u32 arm = dd->ib_arm;
262         u32 active = dd->ib_active;
263         const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
264
265         lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
266         ibstate = ipath_ib_state(dd, ibcs);
267         /* linkstate at last interrupt */
268         lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
269         ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
270
271         /*
272          * Since going into a recovery state causes the link state to go
273          * down and since recovery is transitory, it is better if we "miss"
274          * ever seeing the link training state go into recovery (i.e.,
275          * ignore this transition for link state special handling purposes)
276          * without even updating ipath_lastibcstat.
277          */
278         if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
279             (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
280             (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
281                 goto done;
282
283         /*
284          * if linkstate transitions into INIT from any of the various down
285          * states, or if it transitions from any of the up (INIT or better)
286          * states into any of the down states (except link recovery), then
287          * call the chip-specific code to take appropriate actions.
288          */
289         if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
290                 lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
291                 /* transitioned to UP */
292                 if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
293                         /* link came up, so we must no longer be disabled */
294                         dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
295                         ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
296                         goto skip_ibchange; /* chip-code handled */
297                 }
298         } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
299                 (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
300                 ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
301                 ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
302                 int handled;
303                 handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
304                 dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
305                 if (handled) {
306                         ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
307                         goto skip_ibchange; /* chip-code handled */
308                 }
309         }
310
311         /*
312          * Significant enough to always print and get into logs, if it was
313          * unexpected.  If it was a requested state change, we'll have
314          * already cleared the flags, so we won't print this warning
315          */
316         if ((ibstate != arm && ibstate != active) &&
317             (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
318                 dev_info(&dd->pcidev->dev, "Link state changed from %s "
319                          "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
320                          "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
321         }
322
323         if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
324             ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
325                 u32 lastlts;
326                 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
327                 /*
328                  * Ignore cycling back and forth from Polling.Active to
329                  * Polling.Quiet while waiting for the other end of the link
330                  * to come up, except to try and decide if we are connected
331                  * to a live IB device or not.  We will cycle back and
332                  * forth between them if no cable is plugged in, the other
333                  * device is powered off or disabled, etc.
334                  */
335                 if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
336                     lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
337                         if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
338                              (++dd->ipath_ibpollcnt == 40)) {
339                                 dd->ipath_flags |= IPATH_NOCABLE;
340                                 *dd->ipath_statusp |=
341                                         IPATH_STATUS_IB_NOCABLE;
342                                 ipath_cdbg(LINKVERB, "Set NOCABLE\n");
343                         }
344                         ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
345                                 ipath_ibcstatus_str[ltstate], ibstate);
346                         goto skip_ibchange;
347                 }
348         }
349
350         dd->ipath_ibpollcnt = 0; /* not poll*, now */
351         ipath_stats.sps_iblink++;
352
353         if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
354                 u64 linkrecov;
355                 linkrecov = ipath_snap_cntr(dd,
356                         dd->ipath_cregs->cr_iblinkerrrecovcnt);
357                 if (linkrecov != dd->ipath_lastlinkrecov) {
358                         ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
359                                 (unsigned long long) ibcs,
360                                 ib_linkstate(dd, ibcs),
361                                 ipath_ibcstatus_str[ltstate],
362                                 (unsigned long long) linkrecov);
363                         /* and no more until active again */
364                         dd->ipath_lastlinkrecov = 0;
365                         ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
366                         goto skip_ibchange;
367                 }
368         }
369
370         if (ibstate == init || ibstate == arm || ibstate == active) {
371                 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
372                 if (ibstate == init || ibstate == arm) {
373                         *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
374                         if (dd->ipath_flags & IPATH_LINKACTIVE)
375                                 signal_ib_event(dd, IB_EVENT_PORT_ERR);
376                 }
377                 if (ibstate == arm) {
378                         dd->ipath_flags |= IPATH_LINKARMED;
379                         dd->ipath_flags &= ~(IPATH_LINKUNK |
380                                 IPATH_LINKINIT | IPATH_LINKDOWN |
381                                 IPATH_LINKACTIVE | IPATH_NOCABLE);
382                         ipath_hol_down(dd);
383                 } else  if (ibstate == init) {
384                         /*
385                          * set INIT and DOWN.  Down is checked by
386                          * most of the other code, but INIT is
387                          * useful to know in a few places.
388                          */
389                         dd->ipath_flags |= IPATH_LINKINIT |
390                                 IPATH_LINKDOWN;
391                         dd->ipath_flags &= ~(IPATH_LINKUNK |
392                                 IPATH_LINKARMED | IPATH_LINKACTIVE |
393                                 IPATH_NOCABLE);
394                         ipath_hol_down(dd);
395                 } else {  /* active */
396                         dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
397                                 dd->ipath_cregs->cr_iblinkerrrecovcnt);
398                         *dd->ipath_statusp |=
399                                 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
400                         dd->ipath_flags |= IPATH_LINKACTIVE;
401                         dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
402                                 | IPATH_LINKDOWN | IPATH_LINKARMED |
403                                 IPATH_NOCABLE);
404                         if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
405                                 ipath_restart_sdma(dd);
406                         signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
407                         /* LED active not handled in chip _f_updown */
408                         dd->ipath_f_setextled(dd, lstate, ltstate);
409                         ipath_hol_up(dd);
410                 }
411
412                 /*
413                  * print after we've already done the work, so as not to
414                  * delay the state changes and notifications, for debugging
415                  */
416                 if (lstate == lastlstate)
417                         ipath_cdbg(LINKVERB, "Unchanged from last: %s "
418                                 "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
419                 else
420                         ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
421                                   dd->ipath_unit, ib_linkstate(dd, ibcs),
422                                   ipath_ibcstatus_str[ltstate],  ibstate);
423         } else { /* down */
424                 if (dd->ipath_flags & IPATH_LINKACTIVE)
425                         signal_ib_event(dd, IB_EVENT_PORT_ERR);
426                 dd->ipath_flags |= IPATH_LINKDOWN;
427                 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
428                                      | IPATH_LINKACTIVE |
429                                      IPATH_LINKARMED);
430                 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
431                 dd->ipath_lli_counter = 0;
432
433                 if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
434                         ipath_cdbg(VERBOSE, "Unit %u link state down "
435                                    "(state 0x%x), from %s\n",
436                                    dd->ipath_unit, lstate,
437                                    ib_linkstate(dd, dd->ipath_lastibcstat));
438                 else
439                         ipath_cdbg(LINKVERB, "Unit %u link state changed "
440                                    "to %s (0x%x) from down (%x)\n",
441                                    dd->ipath_unit,
442                                    ipath_ibcstatus_str[ltstate],
443                                    ibstate, lastlstate);
444         }
445
446 skip_ibchange:
447         dd->ipath_lastibcstat = ibcs;
448 done:
449         return;
450 }
451
452 static void handle_supp_msgs(struct ipath_devdata *dd,
453                              unsigned supp_msgs, char *msg, u32 msgsz)
454 {
455         /*
456          * Print the message unless it's ibc status change only, which
457          * happens so often we never want to count it.
458          */
459         if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
460                 int iserr;
461                 ipath_err_t mask;
462                 iserr = ipath_decode_err(dd, msg, msgsz,
463                                          dd->ipath_lasterror &
464                                          ~INFINIPATH_E_IBSTATUSCHANGED);
465
466                 mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
467                         INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
468
469                 /* if we're in debug, then don't mask SDMADISABLED msgs */
470                 if (ipath_debug & __IPATH_DBG)
471                         mask &= ~INFINIPATH_E_SDMADISABLED;
472
473                 if (dd->ipath_lasterror & ~mask)
474                         ipath_dev_err(dd, "Suppressed %u messages for "
475                                       "fast-repeating errors (%s) (%llx)\n",
476                                       supp_msgs, msg,
477                                       (unsigned long long)
478                                       dd->ipath_lasterror);
479                 else {
480                         /*
481                          * rcvegrfull and rcvhdrqfull are "normal", for some
482                          * types of processes (mostly benchmarks) that send
483                          * huge numbers of messages, while not processing
484                          * them. So only complain about these at debug
485                          * level.
486                          */
487                         if (iserr)
488                                 ipath_dbg("Suppressed %u messages for %s\n",
489                                           supp_msgs, msg);
490                         else
491                                 ipath_cdbg(ERRPKT,
492                                         "Suppressed %u messages for %s\n",
493                                           supp_msgs, msg);
494                 }
495         }
496 }
497
498 static unsigned handle_frequent_errors(struct ipath_devdata *dd,
499                                        ipath_err_t errs, char *msg,
500                                        u32 msgsz, int *noprint)
501 {
502         unsigned long nc;
503         static unsigned long nextmsg_time;
504         static unsigned nmsgs, supp_msgs;
505
506         /*
507          * Throttle back "fast" messages to no more than 10 per 5 seconds.
508          * This isn't perfect, but it's a reasonable heuristic. If we get
509          * more than 10, give a 6x longer delay.
510          */
511         nc = jiffies;
512         if (nmsgs > 10) {
513                 if (time_before(nc, nextmsg_time)) {
514                         *noprint = 1;
515                         if (!supp_msgs++)
516                                 nextmsg_time = nc + HZ * 3;
517                 }
518                 else if (supp_msgs) {
519                         handle_supp_msgs(dd, supp_msgs, msg, msgsz);
520                         supp_msgs = 0;
521                         nmsgs = 0;
522                 }
523         }
524         else if (!nmsgs++ || time_after(nc, nextmsg_time))
525                 nextmsg_time = nc + HZ / 2;
526
527         return supp_msgs;
528 }
529
530 static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
531 {
532         unsigned long flags;
533         int expected;
534
535         if (ipath_debug & __IPATH_DBG) {
536                 char msg[128];
537                 ipath_decode_err(dd, msg, sizeof msg, errs &
538                         INFINIPATH_E_SDMAERRS);
539                 ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
540         }
541         if (ipath_debug & __IPATH_VERBDBG) {
542                 unsigned long tl, hd, status, lengen;
543                 tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
544                 hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
545                 status = ipath_read_kreg64(dd
546                         , dd->ipath_kregs->kr_senddmastatus);
547                 lengen = ipath_read_kreg64(dd,
548                         dd->ipath_kregs->kr_senddmalengen);
549                 ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
550                         "lengen 0x%lx\n", tl, hd, status, lengen);
551         }
552
553         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
554         __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
555         expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
556         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
557         if (!expected)
558                 ipath_cancel_sends(dd, 1);
559 }
560
561 static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
562 {
563         unsigned long flags;
564         int expected;
565
566         if ((istat & INFINIPATH_I_SDMAINT) &&
567             !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
568                 ipath_sdma_intr(dd);
569
570         if (istat & INFINIPATH_I_SDMADISABLED) {
571                 expected = test_bit(IPATH_SDMA_ABORTING,
572                         &dd->ipath_sdma_status);
573                 ipath_dbg("%s SDmaDisabled intr\n",
574                         expected ? "expected" : "unexpected");
575                 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
576                 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
577                 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
578                 if (!expected)
579                         ipath_cancel_sends(dd, 1);
580                 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
581                         tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
582         }
583 }
584
585 static int handle_hdrq_full(struct ipath_devdata *dd)
586 {
587         int chkerrpkts = 0;
588         u32 hd, tl;
589         u32 i;
590
591         ipath_stats.sps_hdrqfull++;
592         for (i = 0; i < dd->ipath_cfgports; i++) {
593                 struct ipath_portdata *pd = dd->ipath_pd[i];
594
595                 if (i == 0) {
596                         /*
597                          * For kernel receive queues, we just want to know
598                          * if there are packets in the queue that we can
599                          * process.
600                          */
601                         if (pd->port_head != ipath_get_hdrqtail(pd))
602                                 chkerrpkts |= 1 << i;
603                         continue;
604                 }
605
606                 /* Skip if user context is not open */
607                 if (!pd || !pd->port_cnt)
608                         continue;
609
610                 /* Don't report the same point multiple times. */
611                 if (dd->ipath_flags & IPATH_NODMA_RTAIL)
612                         tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
613                 else
614                         tl = ipath_get_rcvhdrtail(pd);
615                 if (tl == pd->port_lastrcvhdrqtail)
616                         continue;
617
618                 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
619                 if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
620                         pd->port_lastrcvhdrqtail = tl;
621                         pd->port_hdrqfull++;
622                         /* flush hdrqfull so that poll() sees it */
623                         wmb();
624                         wake_up_interruptible(&pd->port_wait);
625                 }
626         }
627
628         return chkerrpkts;
629 }
630
631 static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
632 {
633         char msg[128];
634         u64 ignore_this_time = 0;
635         u64 iserr = 0;
636         int chkerrpkts = 0, noprint = 0;
637         unsigned supp_msgs;
638         int log_idx;
639
640         /*
641          * don't report errors that are masked, either at init
642          * (not set in ipath_errormask), or temporarily (set in
643          * ipath_maskederrs)
644          */
645         errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
646
647         supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
648                 &noprint);
649
650         /* do these first, they are most important */
651         if (errs & INFINIPATH_E_HARDWARE) {
652                 /* reuse same msg buf */
653                 dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
654         } else {
655                 u64 mask;
656                 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
657                         mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
658                         if (errs & mask)
659                                 ipath_inc_eeprom_err(dd, log_idx, 1);
660                 }
661         }
662
663         if (errs & INFINIPATH_E_SDMAERRS)
664                 handle_sdma_errors(dd, errs);
665
666         if (!noprint && (errs & ~dd->ipath_e_bitsextant))
667                 ipath_dev_err(dd, "error interrupt with unknown errors "
668                               "%llx set\n", (unsigned long long)
669                               (errs & ~dd->ipath_e_bitsextant));
670
671         if (errs & E_SUM_ERRS)
672                 ignore_this_time = handle_e_sum_errs(dd, errs);
673         else if ((errs & E_SUM_LINK_PKTERRS) &&
674             !(dd->ipath_flags & IPATH_LINKACTIVE)) {
675                 /*
676                  * This can happen when SMA is trying to bring the link
677                  * up, but the IB link changes state at the "wrong" time.
678                  * The IB logic then complains that the packet isn't
679                  * valid.  We don't want to confuse people, so we just
680                  * don't print them, except at debug
681                  */
682                 ipath_dbg("Ignoring packet errors %llx, because link not "
683                           "ACTIVE\n", (unsigned long long) errs);
684                 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
685         }
686
687         if (supp_msgs == 250000) {
688                 int s_iserr;
689                 /*
690                  * It's not entirely reasonable assuming that the errors set
691                  * in the last clear period are all responsible for the
692                  * problem, but the alternative is to assume it's the only
693                  * ones on this particular interrupt, which also isn't great
694                  */
695                 dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
696
697                 dd->ipath_errormask &= ~dd->ipath_maskederrs;
698                 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
699                                  dd->ipath_errormask);
700                 s_iserr = ipath_decode_err(dd, msg, sizeof msg,
701                                            dd->ipath_maskederrs);
702
703                 if (dd->ipath_maskederrs &
704                     ~(INFINIPATH_E_RRCVEGRFULL |
705                       INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
706                         ipath_dev_err(dd, "Temporarily disabling "
707                             "error(s) %llx reporting; too frequent (%s)\n",
708                                 (unsigned long long) dd->ipath_maskederrs,
709                                 msg);
710                 else {
711                         /*
712                          * rcvegrfull and rcvhdrqfull are "normal",
713                          * for some types of processes (mostly benchmarks)
714                          * that send huge numbers of messages, while not
715                          * processing them.  So only complain about
716                          * these at debug level.
717                          */
718                         if (s_iserr)
719                                 ipath_dbg("Temporarily disabling reporting "
720                                     "too frequent queue full errors (%s)\n",
721                                     msg);
722                         else
723                                 ipath_cdbg(ERRPKT,
724                                     "Temporarily disabling reporting too"
725                                     " frequent packet errors (%s)\n",
726                                     msg);
727                 }
728
729                 /*
730                  * Re-enable the masked errors after around 3 minutes.  in
731                  * ipath_get_faststats().  If we have a series of fast
732                  * repeating but different errors, the interval will keep
733                  * stretching out, but that's OK, as that's pretty
734                  * catastrophic.
735                  */
736                 dd->ipath_unmasktime = jiffies + HZ * 180;
737         }
738
739         ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
740         if (ignore_this_time)
741                 errs &= ~ignore_this_time;
742         if (errs & ~dd->ipath_lasterror) {
743                 errs &= ~dd->ipath_lasterror;
744                 /* never suppress duplicate hwerrors or ibstatuschange */
745                 dd->ipath_lasterror |= errs &
746                         ~(INFINIPATH_E_HARDWARE |
747                           INFINIPATH_E_IBSTATUSCHANGED);
748         }
749
750         if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
751                 dd->ipath_spectriggerhit++;
752                 ipath_dbg("%lu special trigger hits\n",
753                         dd->ipath_spectriggerhit);
754         }
755
756         /* likely due to cancel; so suppress message unless verbose */
757         if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
758                 dd->ipath_lastcancel > jiffies) {
759                 /* armlaunch takes precedence; it often causes both. */
760                 ipath_cdbg(VERBOSE,
761                         "Suppressed %s error (%llx) after sendbuf cancel\n",
762                         (errs &  INFINIPATH_E_SPIOARMLAUNCH) ?
763                         "armlaunch" : "sendpktlen", (unsigned long long)errs);
764                 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
765         }
766
767         if (!errs)
768                 return 0;
769
770         if (!noprint) {
771                 ipath_err_t mask;
772                 /*
773                  * The ones we mask off are handled specially below
774                  * or above.  Also mask SDMADISABLED by default as it
775                  * is too chatty.
776                  */
777                 mask = INFINIPATH_E_IBSTATUSCHANGED |
778                         INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
779                         INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
780
781                 /* if we're in debug, then don't mask SDMADISABLED msgs */
782                 if (ipath_debug & __IPATH_DBG)
783                         mask &= ~INFINIPATH_E_SDMADISABLED;
784
785                 ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
786         } else
787                 /* so we don't need if (!noprint) at strlcat's below */
788                 *msg = 0;
789
790         if (errs & E_SUM_PKTERRS) {
791                 ipath_stats.sps_pkterrs++;
792                 chkerrpkts = 1;
793         }
794         if (errs & E_SUM_ERRS)
795                 ipath_stats.sps_errs++;
796
797         if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
798                 ipath_stats.sps_crcerrs++;
799                 chkerrpkts = 1;
800         }
801         iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
802
803
804         /*
805          * We don't want to print these two as they happen, or we can make
806          * the situation even worse, because it takes so long to print
807          * messages to serial consoles.  Kernel ports get printed from
808          * fast_stats, no more than every 5 seconds, user ports get printed
809          * on close
810          */
811         if (errs & INFINIPATH_E_RRCVHDRFULL)
812                 chkerrpkts |= handle_hdrq_full(dd);
813         if (errs & INFINIPATH_E_RRCVEGRFULL) {
814                 struct ipath_portdata *pd = dd->ipath_pd[0];
815
816                 /*
817                  * since this is of less importance and not likely to
818                  * happen without also getting hdrfull, only count
819                  * occurrences; don't check each port (or even the kernel
820                  * vs user)
821                  */
822                 ipath_stats.sps_etidfull++;
823                 if (pd->port_head != ipath_get_hdrqtail(pd))
824                         chkerrpkts |= 1;
825         }
826
827         /*
828          * do this before IBSTATUSCHANGED, in case both bits set in a single
829          * interrupt; we want the STATUSCHANGE to "win", so we do our
830          * internal copy of state machine correctly
831          */
832         if (errs & INFINIPATH_E_RIBLOSTLINK) {
833                 /*
834                  * force through block below
835                  */
836                 errs |= INFINIPATH_E_IBSTATUSCHANGED;
837                 ipath_stats.sps_iblink++;
838                 dd->ipath_flags |= IPATH_LINKDOWN;
839                 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
840                                      | IPATH_LINKARMED | IPATH_LINKACTIVE);
841                 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
842
843                 ipath_dbg("Lost link, link now down (%s)\n",
844                         ipath_ibcstatus_str[ipath_read_kreg64(dd,
845                         dd->ipath_kregs->kr_ibcstatus) & 0xf]);
846         }
847         if (errs & INFINIPATH_E_IBSTATUSCHANGED)
848                 handle_e_ibstatuschanged(dd, errs);
849
850         if (errs & INFINIPATH_E_RESET) {
851                 if (!noprint)
852                         ipath_dev_err(dd, "Got reset, requires re-init "
853                                       "(unload and reload driver)\n");
854                 dd->ipath_flags &= ~IPATH_INITTED;      /* needs re-init */
855                 /* mark as having had error */
856                 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
857                 *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
858         }
859
860         if (!noprint && *msg) {
861                 if (iserr)
862                         ipath_dev_err(dd, "%s error\n", msg);
863         }
864         if (dd->ipath_state_wanted & dd->ipath_flags) {
865                 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
866                            "waking\n", dd->ipath_state_wanted,
867                            dd->ipath_flags);
868                 wake_up_interruptible(&ipath_state_wait);
869         }
870
871         return chkerrpkts;
872 }
873
874 /*
875  * try to cleanup as much as possible for anything that might have gone
876  * wrong while in freeze mode, such as pio buffers being written by user
877  * processes (causing armlaunch), send errors due to going into freeze mode,
878  * etc., and try to avoid causing extra interrupts while doing so.
879  * Forcibly update the in-memory pioavail register copies after cleanup
880  * because the chip won't do it while in freeze mode (the register values
881  * themselves are kept correct).
882  * Make sure that we don't lose any important interrupts by using the chip
883  * feature that says that writing 0 to a bit in *clear that is set in
884  * *status will cause an interrupt to be generated again (if allowed by
885  * the *mask value).
886  */
887 void ipath_clear_freeze(struct ipath_devdata *dd)
888 {
889         /* disable error interrupts, to avoid confusion */
890         ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
891
892         /* also disable interrupts; errormask is sometimes overwriten */
893         ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
894
895         ipath_cancel_sends(dd, 1);
896
897         /* clear the freeze, and be sure chip saw it */
898         ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
899                          dd->ipath_control);
900         ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
901
902         /* force in-memory update now we are out of freeze */
903         ipath_force_pio_avail_update(dd);
904
905         /*
906          * force new interrupt if any hwerr, error or interrupt bits are
907          * still set, and clear "safe" send packet errors related to freeze
908          * and cancelling sends.  Re-enable error interrupts before possible
909          * force of re-interrupt on pending interrupts.
910          */
911         ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
912         ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
913                 E_SPKT_ERRS_IGNORE);
914         ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
915                 dd->ipath_errormask);
916         ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
917         ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
918 }
919
920
921 /* this is separate to allow for better optimization of ipath_intr() */
922
923 static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
924 {
925         /*
926          * sometimes happen during driver init and unload, don't want
927          * to process any interrupts at that point
928          */
929
930         /* this is just a bandaid, not a fix, if something goes badly
931          * wrong */
932         if (++*unexpectp > 100) {
933                 if (++*unexpectp > 105) {
934                         /*
935                          * ok, we must be taking somebody else's interrupts,
936                          * due to a messed up mptable and/or PIRQ table, so
937                          * unregister the interrupt.  We've seen this during
938                          * linuxbios development work, and it may happen in
939                          * the future again.
940                          */
941                         if (dd->pcidev && dd->ipath_irq) {
942                                 ipath_dev_err(dd, "Now %u unexpected "
943                                               "interrupts, unregistering "
944                                               "interrupt handler\n",
945                                               *unexpectp);
946                                 ipath_dbg("free_irq of irq %d\n",
947                                           dd->ipath_irq);
948                                 dd->ipath_f_free_irq(dd);
949                         }
950                 }
951                 if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
952                         ipath_dev_err(dd, "%u unexpected interrupts, "
953                                       "disabling interrupts completely\n",
954                                       *unexpectp);
955                         /*
956                          * disable all interrupts, something is very wrong
957                          */
958                         ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
959                                          0ULL);
960                 }
961         } else if (*unexpectp > 1)
962                 ipath_dbg("Interrupt when not ready, should not happen, "
963                           "ignoring\n");
964 }
965
966 static noinline void ipath_bad_regread(struct ipath_devdata *dd)
967 {
968         static int allbits;
969
970         /* separate routine, for better optimization of ipath_intr() */
971
972         /*
973          * We print the message and disable interrupts, in hope of
974          * having a better chance of debugging the problem.
975          */
976         ipath_dev_err(dd,
977                       "Read of interrupt status failed (all bits set)\n");
978         if (allbits++) {
979                 /* disable all interrupts, something is very wrong */
980                 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
981                 if (allbits == 2) {
982                         ipath_dev_err(dd, "Still bad interrupt status, "
983                                       "unregistering interrupt\n");
984                         dd->ipath_f_free_irq(dd);
985                 } else if (allbits > 2) {
986                         if ((allbits % 10000) == 0)
987                                 printk(".");
988                 } else
989                         ipath_dev_err(dd, "Disabling interrupts, "
990                                       "multiple errors\n");
991         }
992 }
993
994 static void handle_layer_pioavail(struct ipath_devdata *dd)
995 {
996         unsigned long flags;
997         int ret;
998
999         ret = ipath_ib_piobufavail(dd->verbs_dev);
1000         if (ret > 0)
1001                 goto set;
1002
1003         return;
1004 set:
1005         spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1006         dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
1007         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1008                          dd->ipath_sendctrl);
1009         ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1010         spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1011 }
1012
1013 /*
1014  * Handle receive interrupts for user ports; this means a user
1015  * process was waiting for a packet to arrive, and didn't want
1016  * to poll
1017  */
1018 static void handle_urcv(struct ipath_devdata *dd, u64 istat)
1019 {
1020         u64 portr;
1021         int i;
1022         int rcvdint = 0;
1023
1024         /*
1025          * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
1026          * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
1027          * would both like timely updates of the bits so that
1028          * we don't pass them by unnecessarily.  the rmb()
1029          * here ensures that we see them promptly -- the
1030          * corresponding wmb()'s are in ipath_poll_urgent()
1031          * and ipath_poll_next()...
1032          */
1033         rmb();
1034         portr = ((istat >> dd->ipath_i_rcvavail_shift) &
1035                  dd->ipath_i_rcvavail_mask) |
1036                 ((istat >> dd->ipath_i_rcvurg_shift) &
1037                  dd->ipath_i_rcvurg_mask);
1038         for (i = 1; i < dd->ipath_cfgports; i++) {
1039                 struct ipath_portdata *pd = dd->ipath_pd[i];
1040
1041                 if (portr & (1 << i) && pd && pd->port_cnt) {
1042                         if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
1043                                                &pd->port_flag)) {
1044                                 clear_bit(i + dd->ipath_r_intravail_shift,
1045                                           &dd->ipath_rcvctrl);
1046                                 wake_up_interruptible(&pd->port_wait);
1047                                 rcvdint = 1;
1048                         } else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,
1049                                                       &pd->port_flag)) {
1050                                 pd->port_urgent++;
1051                                 wake_up_interruptible(&pd->port_wait);
1052                         }
1053                 }
1054         }
1055         if (rcvdint) {
1056                 /* only want to take one interrupt, so turn off the rcv
1057                  * interrupt for all the ports that we set the rcv_waiting
1058                  * (but never for kernel port)
1059                  */
1060                 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1061                                  dd->ipath_rcvctrl);
1062         }
1063 }
1064
1065 irqreturn_t ipath_intr(int irq, void *data)
1066 {
1067         struct ipath_devdata *dd = data;
1068         u64 istat, chk0rcv = 0;
1069         ipath_err_t estat = 0;
1070         irqreturn_t ret;
1071         static unsigned unexpected = 0;
1072         u64 kportrbits;
1073
1074         ipath_stats.sps_ints++;
1075
1076         if (dd->ipath_int_counter != (u32) -1)
1077                 dd->ipath_int_counter++;
1078
1079         if (!(dd->ipath_flags & IPATH_PRESENT)) {
1080                 /*
1081                  * This return value is not great, but we do not want the
1082                  * interrupt core code to remove our interrupt handler
1083                  * because we don't appear to be handling an interrupt
1084                  * during a chip reset.
1085                  */
1086                 return IRQ_HANDLED;
1087         }
1088
1089         /*
1090          * this needs to be flags&initted, not statusp, so we keep
1091          * taking interrupts even after link goes down, etc.
1092          * Also, we *must* clear the interrupt at some point, or we won't
1093          * take it again, which can be real bad for errors, etc...
1094          */
1095
1096         if (!(dd->ipath_flags & IPATH_INITTED)) {
1097                 ipath_bad_intr(dd, &unexpected);
1098                 ret = IRQ_NONE;
1099                 goto bail;
1100         }
1101
1102         istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
1103
1104         if (unlikely(!istat)) {
1105                 ipath_stats.sps_nullintr++;
1106                 ret = IRQ_NONE; /* not our interrupt, or already handled */
1107                 goto bail;
1108         }
1109         if (unlikely(istat == -1)) {
1110                 ipath_bad_regread(dd);
1111                 /* don't know if it was our interrupt or not */
1112                 ret = IRQ_NONE;
1113                 goto bail;
1114         }
1115
1116         if (unexpected)
1117                 unexpected = 0;
1118
1119         if (unlikely(istat & ~dd->ipath_i_bitsextant))
1120                 ipath_dev_err(dd,
1121                               "interrupt with unknown interrupts %Lx set\n",
1122                               (unsigned long long)
1123                               istat & ~dd->ipath_i_bitsextant);
1124         else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
1125                 ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
1126                         (unsigned long long) istat);
1127
1128         if (istat & INFINIPATH_I_ERROR) {
1129                 ipath_stats.sps_errints++;
1130                 estat = ipath_read_kreg64(dd,
1131                                           dd->ipath_kregs->kr_errorstatus);
1132                 if (!estat)
1133                         dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
1134                                  "but no error bits set!\n",
1135                                  (unsigned long long) istat);
1136                 else if (estat == -1LL)
1137                         /*
1138                          * should we try clearing all, or hope next read
1139                          * works?
1140                          */
1141                         ipath_dev_err(dd, "Read of error status failed "
1142                                       "(all bits set); ignoring\n");
1143                 else
1144                         chk0rcv |= handle_errors(dd, estat);
1145         }
1146
1147         if (istat & INFINIPATH_I_GPIO) {
1148                 /*
1149                  * GPIO interrupts fall in two broad classes:
1150                  * GPIO_2 indicates (on some HT4xx boards) that a packet
1151                  *        has arrived for Port 0. Checking for this
1152                  *        is controlled by flag IPATH_GPIO_INTR.
1153                  * GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate
1154                  *        errors that we need to count. Checking for this
1155                  *        is controlled by flag IPATH_GPIO_ERRINTRS.
1156                  */
1157                 u32 gpiostatus;
1158                 u32 to_clear = 0;
1159
1160                 gpiostatus = ipath_read_kreg32(
1161                         dd, dd->ipath_kregs->kr_gpio_status);
1162                 /* First the error-counter case. */
1163                 if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
1164                     (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
1165                         /* want to clear the bits we see asserted. */
1166                         to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);
1167
1168                         /*
1169                          * Count appropriately, clear bits out of our copy,
1170                          * as they have been "handled".
1171                          */
1172                         if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {
1173                                 ipath_dbg("FlowCtl on UnsupVL\n");
1174                                 dd->ipath_rxfc_unsupvl_errs++;
1175                         }
1176                         if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {
1177                                 ipath_dbg("Overrun Threshold exceeded\n");
1178                                 dd->ipath_overrun_thresh_errs++;
1179                         }
1180                         if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {
1181                                 ipath_dbg("Local Link Integrity error\n");
1182                                 dd->ipath_lli_errs++;
1183                         }
1184                         gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;
1185                 }
1186                 /* Now the Port0 Receive case */
1187                 if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&
1188                     (dd->ipath_flags & IPATH_GPIO_INTR)) {
1189                         /*
1190                          * GPIO status bit 2 is set, and we expected it.
1191                          * clear it and indicate in p0bits.
1192                          * This probably only happens if a Port0 pkt
1193                          * arrives at _just_ the wrong time, and we
1194                          * handle that by seting chk0rcv;
1195                          */
1196                         to_clear |= (1 << IPATH_GPIO_PORT0_BIT);
1197                         gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
1198                         chk0rcv = 1;
1199                 }
1200                 if (gpiostatus) {
1201                         /*
1202                          * Some unexpected bits remain. If they could have
1203                          * caused the interrupt, complain and clear.
1204                          * To avoid repetition of this condition, also clear
1205                          * the mask. It is almost certainly due to error.
1206                          */
1207                         const u32 mask = (u32) dd->ipath_gpio_mask;
1208
1209                         if (mask & gpiostatus) {
1210                                 ipath_dbg("Unexpected GPIO IRQ bits %x\n",
1211                                   gpiostatus & mask);
1212                                 to_clear |= (gpiostatus & mask);
1213                                 dd->ipath_gpio_mask &= ~(gpiostatus & mask);
1214                                 ipath_write_kreg(dd,
1215                                         dd->ipath_kregs->kr_gpio_mask,
1216                                         dd->ipath_gpio_mask);
1217                         }
1218                 }
1219                 if (to_clear) {
1220                         ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
1221                                         (u64) to_clear);
1222                 }
1223         }
1224
1225         /*
1226          * Clear the interrupt bits we found set, unless they are receive
1227          * related, in which case we already cleared them above, and don't
1228          * want to clear them again, because we might lose an interrupt.
1229          * Clear it early, so we "know" know the chip will have seen this by
1230          * the time we process the queue, and will re-interrupt if necessary.
1231          * The processor itself won't take the interrupt again until we return.
1232          */
1233         ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
1234
1235         /*
1236          * Handle kernel receive queues before checking for pio buffers
1237          * available since receives can overflow; piobuf waiters can afford
1238          * a few extra cycles, since they were waiting anyway, and user's
1239          * waiting for receive are at the bottom.
1240          */
1241         kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
1242                 (1ULL << dd->ipath_i_rcvurg_shift);
1243         if (chk0rcv || (istat & kportrbits)) {
1244                 istat &= ~kportrbits;
1245                 ipath_kreceive(dd->ipath_pd[0]);
1246         }
1247
1248         if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
1249                      (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
1250                 handle_urcv(dd, istat);
1251
1252         if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
1253                 handle_sdma_intr(dd, istat);
1254
1255         if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
1256                 unsigned long flags;
1257
1258                 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1259                 dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
1260                 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1261                                  dd->ipath_sendctrl);
1262                 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1263                 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1264
1265                 /* always process; sdma verbs uses PIO for acks and VL15  */
1266                 handle_layer_pioavail(dd);
1267         }
1268
1269         ret = IRQ_HANDLED;
1270
1271 bail:
1272         return ret;
1273 }