Merge branches 'release' and 'ppc-workaround' into release
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_verbs.c
1 /*
2  * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <linux/io.h>
37 #include <linux/utsname.h>
38
39 #include "ipath_kernel.h"
40 #include "ipath_verbs.h"
41 #include "ipath_common.h"
42
43 static unsigned int ib_ipath_qp_table_size = 251;
44 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
45 MODULE_PARM_DESC(qp_table_size, "QP table size");
46
47 unsigned int ib_ipath_lkey_table_size = 12;
48 module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
49                    S_IRUGO);
50 MODULE_PARM_DESC(lkey_table_size,
51                  "LKEY table size in bits (2^n, 1 <= n <= 23)");
52
53 static unsigned int ib_ipath_max_pds = 0xFFFF;
54 module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
55 MODULE_PARM_DESC(max_pds,
56                  "Maximum number of protection domains to support");
57
58 static unsigned int ib_ipath_max_ahs = 0xFFFF;
59 module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
60 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
61
62 unsigned int ib_ipath_max_cqes = 0x2FFFF;
63 module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
64 MODULE_PARM_DESC(max_cqes,
65                  "Maximum number of completion queue entries to support");
66
67 unsigned int ib_ipath_max_cqs = 0x1FFFF;
68 module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
69 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
70
71 unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
72 module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
73                    S_IWUSR | S_IRUGO);
74 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
75
76 unsigned int ib_ipath_max_qps = 16384;
77 module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
78 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
79
80 unsigned int ib_ipath_max_sges = 0x60;
81 module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
82 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
83
84 unsigned int ib_ipath_max_mcast_grps = 16384;
85 module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
86                    S_IWUSR | S_IRUGO);
87 MODULE_PARM_DESC(max_mcast_grps,
88                  "Maximum number of multicast groups to support");
89
90 unsigned int ib_ipath_max_mcast_qp_attached = 16;
91 module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
92                    uint, S_IWUSR | S_IRUGO);
93 MODULE_PARM_DESC(max_mcast_qp_attached,
94                  "Maximum number of attached QPs to support");
95
96 unsigned int ib_ipath_max_srqs = 1024;
97 module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
99
100 unsigned int ib_ipath_max_srq_sges = 128;
101 module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
102                    uint, S_IWUSR | S_IRUGO);
103 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
104
105 unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
106 module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
107                    uint, S_IWUSR | S_IRUGO);
108 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
109
110 static unsigned int ib_ipath_disable_sma;
111 module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
112 MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA");
113
114 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
115         [IB_QPS_RESET] = 0,
116         [IB_QPS_INIT] = IPATH_POST_RECV_OK,
117         [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
118         [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
119             IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
120         [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
121             IPATH_POST_SEND_OK,
122         [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
123         [IB_QPS_ERR] = 0,
124 };
125
126 struct ipath_ucontext {
127         struct ib_ucontext ibucontext;
128 };
129
130 static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
131                                                   *ibucontext)
132 {
133         return container_of(ibucontext, struct ipath_ucontext, ibucontext);
134 }
135
136 /*
137  * Translate ib_wr_opcode into ib_wc_opcode.
138  */
139 const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
140         [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
141         [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
142         [IB_WR_SEND] = IB_WC_SEND,
143         [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
144         [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
145         [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
146         [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
147 };
148
149 /*
150  * System image GUID.
151  */
152 static __be64 sys_image_guid;
153
154 /**
155  * ipath_copy_sge - copy data to SGE memory
156  * @ss: the SGE state
157  * @data: the data to copy
158  * @length: the length of the data
159  */
160 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
161 {
162         struct ipath_sge *sge = &ss->sge;
163
164         while (length) {
165                 u32 len = sge->length;
166
167                 if (len > length)
168                         len = length;
169                 if (len > sge->sge_length)
170                         len = sge->sge_length;
171                 BUG_ON(len == 0);
172                 memcpy(sge->vaddr, data, len);
173                 sge->vaddr += len;
174                 sge->length -= len;
175                 sge->sge_length -= len;
176                 if (sge->sge_length == 0) {
177                         if (--ss->num_sge)
178                                 *sge = *ss->sg_list++;
179                 } else if (sge->length == 0 && sge->mr != NULL) {
180                         if (++sge->n >= IPATH_SEGSZ) {
181                                 if (++sge->m >= sge->mr->mapsz)
182                                         break;
183                                 sge->n = 0;
184                         }
185                         sge->vaddr =
186                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
187                         sge->length =
188                                 sge->mr->map[sge->m]->segs[sge->n].length;
189                 }
190                 data += len;
191                 length -= len;
192         }
193 }
194
195 /**
196  * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
197  * @ss: the SGE state
198  * @length: the number of bytes to skip
199  */
200 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
201 {
202         struct ipath_sge *sge = &ss->sge;
203
204         while (length) {
205                 u32 len = sge->length;
206
207                 if (len > length)
208                         len = length;
209                 if (len > sge->sge_length)
210                         len = sge->sge_length;
211                 BUG_ON(len == 0);
212                 sge->vaddr += len;
213                 sge->length -= len;
214                 sge->sge_length -= len;
215                 if (sge->sge_length == 0) {
216                         if (--ss->num_sge)
217                                 *sge = *ss->sg_list++;
218                 } else if (sge->length == 0 && sge->mr != NULL) {
219                         if (++sge->n >= IPATH_SEGSZ) {
220                                 if (++sge->m >= sge->mr->mapsz)
221                                         break;
222                                 sge->n = 0;
223                         }
224                         sge->vaddr =
225                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
226                         sge->length =
227                                 sge->mr->map[sge->m]->segs[sge->n].length;
228                 }
229                 length -= len;
230         }
231 }
232
233 static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
234 {
235         struct ib_wc wc;
236
237         memset(&wc, 0, sizeof(wc));
238         wc.wr_id = wr->wr_id;
239         wc.status = IB_WC_WR_FLUSH_ERR;
240         wc.opcode = ib_ipath_wc_opcode[wr->opcode];
241         wc.qp = &qp->ibqp;
242         ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
243 }
244
245 /**
246  * ipath_post_one_send - post one RC, UC, or UD send work request
247  * @qp: the QP to post on
248  * @wr: the work request to send
249  */
250 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
251 {
252         struct ipath_swqe *wqe;
253         u32 next;
254         int i;
255         int j;
256         int acc;
257         int ret;
258         unsigned long flags;
259
260         spin_lock_irqsave(&qp->s_lock, flags);
261
262         /* Check that state is OK to post send. */
263         if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) {
264                 if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR)
265                         goto bail_inval;
266                 /* C10-96 says generate a flushed completion entry. */
267                 ipath_flush_wqe(qp, wr);
268                 ret = 0;
269                 goto bail;
270         }
271
272         /* IB spec says that num_sge == 0 is OK. */
273         if (wr->num_sge > qp->s_max_sge)
274                 goto bail_inval;
275
276         /*
277          * Don't allow RDMA reads or atomic operations on UC or
278          * undefined operations.
279          * Make sure buffer is large enough to hold the result for atomics.
280          */
281         if (qp->ibqp.qp_type == IB_QPT_UC) {
282                 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
283                         goto bail_inval;
284         } else if (qp->ibqp.qp_type == IB_QPT_UD) {
285                 /* Check UD opcode */
286                 if (wr->opcode != IB_WR_SEND &&
287                     wr->opcode != IB_WR_SEND_WITH_IMM)
288                         goto bail_inval;
289                 /* Check UD destination address PD */
290                 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
291                         goto bail_inval;
292         } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
293                 goto bail_inval;
294         else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
295                    (wr->num_sge == 0 ||
296                     wr->sg_list[0].length < sizeof(u64) ||
297                     wr->sg_list[0].addr & (sizeof(u64) - 1)))
298                 goto bail_inval;
299         else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
300                 goto bail_inval;
301
302         next = qp->s_head + 1;
303         if (next >= qp->s_size)
304                 next = 0;
305         if (next == qp->s_last) {
306                 ret = -ENOMEM;
307                 goto bail;
308         }
309
310         wqe = get_swqe_ptr(qp, qp->s_head);
311         wqe->wr = *wr;
312         wqe->ssn = qp->s_ssn++;
313         wqe->length = 0;
314         if (wr->num_sge) {
315                 acc = wr->opcode >= IB_WR_RDMA_READ ?
316                         IB_ACCESS_LOCAL_WRITE : 0;
317                 for (i = 0, j = 0; i < wr->num_sge; i++) {
318                         u32 length = wr->sg_list[i].length;
319                         int ok;
320
321                         if (length == 0)
322                                 continue;
323                         ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
324                                            &wr->sg_list[i], acc);
325                         if (!ok)
326                                 goto bail_inval;
327                         wqe->length += length;
328                         j++;
329                 }
330                 wqe->wr.num_sge = j;
331         }
332         if (qp->ibqp.qp_type == IB_QPT_UC ||
333             qp->ibqp.qp_type == IB_QPT_RC) {
334                 if (wqe->length > 0x80000000U)
335                         goto bail_inval;
336         } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
337                 goto bail_inval;
338         qp->s_head = next;
339
340         ret = 0;
341         goto bail;
342
343 bail_inval:
344         ret = -EINVAL;
345 bail:
346         spin_unlock_irqrestore(&qp->s_lock, flags);
347         return ret;
348 }
349
350 /**
351  * ipath_post_send - post a send on a QP
352  * @ibqp: the QP to post the send on
353  * @wr: the list of work requests to post
354  * @bad_wr: the first bad WR is put here
355  *
356  * This may be called from interrupt context.
357  */
358 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
359                            struct ib_send_wr **bad_wr)
360 {
361         struct ipath_qp *qp = to_iqp(ibqp);
362         int err = 0;
363
364         for (; wr; wr = wr->next) {
365                 err = ipath_post_one_send(qp, wr);
366                 if (err) {
367                         *bad_wr = wr;
368                         goto bail;
369                 }
370         }
371
372         /* Try to do the send work in the caller's context. */
373         ipath_do_send((unsigned long) qp);
374
375 bail:
376         return err;
377 }
378
379 /**
380  * ipath_post_receive - post a receive on a QP
381  * @ibqp: the QP to post the receive on
382  * @wr: the WR to post
383  * @bad_wr: the first bad WR is put here
384  *
385  * This may be called from interrupt context.
386  */
387 static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
388                               struct ib_recv_wr **bad_wr)
389 {
390         struct ipath_qp *qp = to_iqp(ibqp);
391         struct ipath_rwq *wq = qp->r_rq.wq;
392         unsigned long flags;
393         int ret;
394
395         /* Check that state is OK to post receive. */
396         if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
397                 *bad_wr = wr;
398                 ret = -EINVAL;
399                 goto bail;
400         }
401
402         for (; wr; wr = wr->next) {
403                 struct ipath_rwqe *wqe;
404                 u32 next;
405                 int i;
406
407                 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
408                         *bad_wr = wr;
409                         ret = -EINVAL;
410                         goto bail;
411                 }
412
413                 spin_lock_irqsave(&qp->r_rq.lock, flags);
414                 next = wq->head + 1;
415                 if (next >= qp->r_rq.size)
416                         next = 0;
417                 if (next == wq->tail) {
418                         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
419                         *bad_wr = wr;
420                         ret = -ENOMEM;
421                         goto bail;
422                 }
423
424                 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
425                 wqe->wr_id = wr->wr_id;
426                 wqe->num_sge = wr->num_sge;
427                 for (i = 0; i < wr->num_sge; i++)
428                         wqe->sg_list[i] = wr->sg_list[i];
429                 /* Make sure queue entry is written before the head index. */
430                 smp_wmb();
431                 wq->head = next;
432                 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
433         }
434         ret = 0;
435
436 bail:
437         return ret;
438 }
439
440 /**
441  * ipath_qp_rcv - processing an incoming packet on a QP
442  * @dev: the device the packet came on
443  * @hdr: the packet header
444  * @has_grh: true if the packet has a GRH
445  * @data: the packet data
446  * @tlen: the packet length
447  * @qp: the QP the packet came on
448  *
449  * This is called from ipath_ib_rcv() to process an incoming packet
450  * for the given QP.
451  * Called at interrupt level.
452  */
453 static void ipath_qp_rcv(struct ipath_ibdev *dev,
454                          struct ipath_ib_header *hdr, int has_grh,
455                          void *data, u32 tlen, struct ipath_qp *qp)
456 {
457         /* Check for valid receive state. */
458         if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
459                 dev->n_pkt_drops++;
460                 return;
461         }
462
463         switch (qp->ibqp.qp_type) {
464         case IB_QPT_SMI:
465         case IB_QPT_GSI:
466                 if (ib_ipath_disable_sma)
467                         break;
468                 /* FALLTHROUGH */
469         case IB_QPT_UD:
470                 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
471                 break;
472
473         case IB_QPT_RC:
474                 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
475                 break;
476
477         case IB_QPT_UC:
478                 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
479                 break;
480
481         default:
482                 break;
483         }
484 }
485
486 /**
487  * ipath_ib_rcv - process an incoming packet
488  * @arg: the device pointer
489  * @rhdr: the header of the packet
490  * @data: the packet data
491  * @tlen: the packet length
492  *
493  * This is called from ipath_kreceive() to process an incoming packet at
494  * interrupt level. Tlen is the length of the header + data + CRC in bytes.
495  */
496 void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
497                   u32 tlen)
498 {
499         struct ipath_ib_header *hdr = rhdr;
500         struct ipath_other_headers *ohdr;
501         struct ipath_qp *qp;
502         u32 qp_num;
503         int lnh;
504         u8 opcode;
505         u16 lid;
506
507         if (unlikely(dev == NULL))
508                 goto bail;
509
510         if (unlikely(tlen < 24)) {      /* LRH+BTH+CRC */
511                 dev->rcv_errors++;
512                 goto bail;
513         }
514
515         /* Check for a valid destination LID (see ch. 7.11.1). */
516         lid = be16_to_cpu(hdr->lrh[1]);
517         if (lid < IPATH_MULTICAST_LID_BASE) {
518                 lid &= ~((1 << dev->dd->ipath_lmc) - 1);
519                 if (unlikely(lid != dev->dd->ipath_lid)) {
520                         dev->rcv_errors++;
521                         goto bail;
522                 }
523         }
524
525         /* Check for GRH */
526         lnh = be16_to_cpu(hdr->lrh[0]) & 3;
527         if (lnh == IPATH_LRH_BTH)
528                 ohdr = &hdr->u.oth;
529         else if (lnh == IPATH_LRH_GRH)
530                 ohdr = &hdr->u.l.oth;
531         else {
532                 dev->rcv_errors++;
533                 goto bail;
534         }
535
536         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
537         dev->opstats[opcode].n_bytes += tlen;
538         dev->opstats[opcode].n_packets++;
539
540         /* Get the destination QP number. */
541         qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
542         if (qp_num == IPATH_MULTICAST_QPN) {
543                 struct ipath_mcast *mcast;
544                 struct ipath_mcast_qp *p;
545
546                 if (lnh != IPATH_LRH_GRH) {
547                         dev->n_pkt_drops++;
548                         goto bail;
549                 }
550                 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
551                 if (mcast == NULL) {
552                         dev->n_pkt_drops++;
553                         goto bail;
554                 }
555                 dev->n_multicast_rcv++;
556                 list_for_each_entry_rcu(p, &mcast->qp_list, list)
557                         ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
558                 /*
559                  * Notify ipath_multicast_detach() if it is waiting for us
560                  * to finish.
561                  */
562                 if (atomic_dec_return(&mcast->refcount) <= 1)
563                         wake_up(&mcast->wait);
564         } else {
565                 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
566                 if (qp) {
567                         dev->n_unicast_rcv++;
568                         ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
569                                      tlen, qp);
570                         /*
571                          * Notify ipath_destroy_qp() if it is waiting
572                          * for us to finish.
573                          */
574                         if (atomic_dec_and_test(&qp->refcount))
575                                 wake_up(&qp->wait);
576                 } else
577                         dev->n_pkt_drops++;
578         }
579
580 bail:;
581 }
582
583 /**
584  * ipath_ib_timer - verbs timer
585  * @arg: the device pointer
586  *
587  * This is called from ipath_do_rcv_timer() at interrupt level to check for
588  * QPs which need retransmits and to collect performance numbers.
589  */
590 static void ipath_ib_timer(struct ipath_ibdev *dev)
591 {
592         struct ipath_qp *resend = NULL;
593         struct list_head *last;
594         struct ipath_qp *qp;
595         unsigned long flags;
596
597         if (dev == NULL)
598                 return;
599
600         spin_lock_irqsave(&dev->pending_lock, flags);
601         /* Start filling the next pending queue. */
602         if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
603                 dev->pending_index = 0;
604         /* Save any requests still in the new queue, they have timed out. */
605         last = &dev->pending[dev->pending_index];
606         while (!list_empty(last)) {
607                 qp = list_entry(last->next, struct ipath_qp, timerwait);
608                 list_del_init(&qp->timerwait);
609                 qp->timer_next = resend;
610                 resend = qp;
611                 atomic_inc(&qp->refcount);
612         }
613         last = &dev->rnrwait;
614         if (!list_empty(last)) {
615                 qp = list_entry(last->next, struct ipath_qp, timerwait);
616                 if (--qp->s_rnr_timeout == 0) {
617                         do {
618                                 list_del_init(&qp->timerwait);
619                                 tasklet_hi_schedule(&qp->s_task);
620                                 if (list_empty(last))
621                                         break;
622                                 qp = list_entry(last->next, struct ipath_qp,
623                                                 timerwait);
624                         } while (qp->s_rnr_timeout == 0);
625                 }
626         }
627         /*
628          * We should only be in the started state if pma_sample_start != 0
629          */
630         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
631             --dev->pma_sample_start == 0) {
632                 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
633                 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
634                                         &dev->ipath_rword,
635                                         &dev->ipath_spkts,
636                                         &dev->ipath_rpkts,
637                                         &dev->ipath_xmit_wait);
638         }
639         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
640                 if (dev->pma_sample_interval == 0) {
641                         u64 ta, tb, tc, td, te;
642
643                         dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
644                         ipath_snapshot_counters(dev->dd, &ta, &tb,
645                                                 &tc, &td, &te);
646
647                         dev->ipath_sword = ta - dev->ipath_sword;
648                         dev->ipath_rword = tb - dev->ipath_rword;
649                         dev->ipath_spkts = tc - dev->ipath_spkts;
650                         dev->ipath_rpkts = td - dev->ipath_rpkts;
651                         dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
652                 }
653                 else
654                         dev->pma_sample_interval--;
655         }
656         spin_unlock_irqrestore(&dev->pending_lock, flags);
657
658         /* XXX What if timer fires again while this is running? */
659         for (qp = resend; qp != NULL; qp = qp->timer_next) {
660                 struct ib_wc wc;
661
662                 spin_lock_irqsave(&qp->s_lock, flags);
663                 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
664                         dev->n_timeouts++;
665                         ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
666                 }
667                 spin_unlock_irqrestore(&qp->s_lock, flags);
668
669                 /* Notify ipath_destroy_qp() if it is waiting. */
670                 if (atomic_dec_and_test(&qp->refcount))
671                         wake_up(&qp->wait);
672         }
673 }
674
675 static void update_sge(struct ipath_sge_state *ss, u32 length)
676 {
677         struct ipath_sge *sge = &ss->sge;
678
679         sge->vaddr += length;
680         sge->length -= length;
681         sge->sge_length -= length;
682         if (sge->sge_length == 0) {
683                 if (--ss->num_sge)
684                         *sge = *ss->sg_list++;
685         } else if (sge->length == 0 && sge->mr != NULL) {
686                 if (++sge->n >= IPATH_SEGSZ) {
687                         if (++sge->m >= sge->mr->mapsz)
688                                 return;
689                         sge->n = 0;
690                 }
691                 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
692                 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
693         }
694 }
695
696 #ifdef __LITTLE_ENDIAN
697 static inline u32 get_upper_bits(u32 data, u32 shift)
698 {
699         return data >> shift;
700 }
701
702 static inline u32 set_upper_bits(u32 data, u32 shift)
703 {
704         return data << shift;
705 }
706
707 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
708 {
709         data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
710         data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
711         return data;
712 }
713 #else
714 static inline u32 get_upper_bits(u32 data, u32 shift)
715 {
716         return data << shift;
717 }
718
719 static inline u32 set_upper_bits(u32 data, u32 shift)
720 {
721         return data >> shift;
722 }
723
724 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
725 {
726         data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
727         data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
728         return data;
729 }
730 #endif
731
732 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
733                     u32 length, unsigned flush_wc)
734 {
735         u32 extra = 0;
736         u32 data = 0;
737         u32 last;
738
739         while (1) {
740                 u32 len = ss->sge.length;
741                 u32 off;
742
743                 if (len > length)
744                         len = length;
745                 if (len > ss->sge.sge_length)
746                         len = ss->sge.sge_length;
747                 BUG_ON(len == 0);
748                 /* If the source address is not aligned, try to align it. */
749                 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
750                 if (off) {
751                         u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
752                                             ~(sizeof(u32) - 1));
753                         u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
754                         u32 y;
755
756                         y = sizeof(u32) - off;
757                         if (len > y)
758                                 len = y;
759                         if (len + extra >= sizeof(u32)) {
760                                 data |= set_upper_bits(v, extra *
761                                                        BITS_PER_BYTE);
762                                 len = sizeof(u32) - extra;
763                                 if (len == length) {
764                                         last = data;
765                                         break;
766                                 }
767                                 __raw_writel(data, piobuf);
768                                 piobuf++;
769                                 extra = 0;
770                                 data = 0;
771                         } else {
772                                 /* Clear unused upper bytes */
773                                 data |= clear_upper_bytes(v, len, extra);
774                                 if (len == length) {
775                                         last = data;
776                                         break;
777                                 }
778                                 extra += len;
779                         }
780                 } else if (extra) {
781                         /* Source address is aligned. */
782                         u32 *addr = (u32 *) ss->sge.vaddr;
783                         int shift = extra * BITS_PER_BYTE;
784                         int ushift = 32 - shift;
785                         u32 l = len;
786
787                         while (l >= sizeof(u32)) {
788                                 u32 v = *addr;
789
790                                 data |= set_upper_bits(v, shift);
791                                 __raw_writel(data, piobuf);
792                                 data = get_upper_bits(v, ushift);
793                                 piobuf++;
794                                 addr++;
795                                 l -= sizeof(u32);
796                         }
797                         /*
798                          * We still have 'extra' number of bytes leftover.
799                          */
800                         if (l) {
801                                 u32 v = *addr;
802
803                                 if (l + extra >= sizeof(u32)) {
804                                         data |= set_upper_bits(v, shift);
805                                         len -= l + extra - sizeof(u32);
806                                         if (len == length) {
807                                                 last = data;
808                                                 break;
809                                         }
810                                         __raw_writel(data, piobuf);
811                                         piobuf++;
812                                         extra = 0;
813                                         data = 0;
814                                 } else {
815                                         /* Clear unused upper bytes */
816                                         data |= clear_upper_bytes(v, l,
817                                                                   extra);
818                                         if (len == length) {
819                                                 last = data;
820                                                 break;
821                                         }
822                                         extra += l;
823                                 }
824                         } else if (len == length) {
825                                 last = data;
826                                 break;
827                         }
828                 } else if (len == length) {
829                         u32 w;
830
831                         /*
832                          * Need to round up for the last dword in the
833                          * packet.
834                          */
835                         w = (len + 3) >> 2;
836                         __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
837                         piobuf += w - 1;
838                         last = ((u32 *) ss->sge.vaddr)[w - 1];
839                         break;
840                 } else {
841                         u32 w = len >> 2;
842
843                         __iowrite32_copy(piobuf, ss->sge.vaddr, w);
844                         piobuf += w;
845
846                         extra = len & (sizeof(u32) - 1);
847                         if (extra) {
848                                 u32 v = ((u32 *) ss->sge.vaddr)[w];
849
850                                 /* Clear unused upper bytes */
851                                 data = clear_upper_bytes(v, extra, 0);
852                         }
853                 }
854                 update_sge(ss, len);
855                 length -= len;
856         }
857         /* Update address before sending packet. */
858         update_sge(ss, length);
859         if (flush_wc) {
860                 /* must flush early everything before trigger word */
861                 ipath_flush_wc();
862                 __raw_writel(last, piobuf);
863                 /* be sure trigger word is written */
864                 ipath_flush_wc();
865         } else
866                 __raw_writel(last, piobuf);
867 }
868
869 static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
870                                 struct ipath_sge_state *ss, u32 len,
871                                 u32 plen, u32 dwords)
872 {
873         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
874         u32 __iomem *piobuf;
875         unsigned flush_wc;
876         int ret;
877
878         piobuf = ipath_getpiobuf(dd, NULL);
879         if (unlikely(piobuf == NULL)) {
880                 ret = -EBUSY;
881                 goto bail;
882         }
883
884         /*
885          * Write len to control qword, no flags.
886          * We have to flush after the PBC for correctness on some cpus
887          * or WC buffer can be written out of order.
888          */
889         writeq(plen, piobuf);
890         piobuf += 2;
891
892         flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
893         if (len == 0) {
894                 /*
895                  * If there is just the header portion, must flush before
896                  * writing last word of header for correctness, and after
897                  * the last header word (trigger word).
898                  */
899                 if (flush_wc) {
900                         ipath_flush_wc();
901                         __iowrite32_copy(piobuf, hdr, hdrwords - 1);
902                         ipath_flush_wc();
903                         __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
904                         ipath_flush_wc();
905                 } else
906                         __iowrite32_copy(piobuf, hdr, hdrwords);
907                 goto done;
908         }
909
910         if (flush_wc)
911                 ipath_flush_wc();
912         __iowrite32_copy(piobuf, hdr, hdrwords);
913         piobuf += hdrwords;
914
915         /* The common case is aligned and contained in one segment. */
916         if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
917                    !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
918                 u32 *addr = (u32 *) ss->sge.vaddr;
919
920                 /* Update address before sending packet. */
921                 update_sge(ss, len);
922                 if (flush_wc) {
923                         __iowrite32_copy(piobuf, addr, dwords - 1);
924                         /* must flush early everything before trigger word */
925                         ipath_flush_wc();
926                         __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
927                         /* be sure trigger word is written */
928                         ipath_flush_wc();
929                 } else
930                         __iowrite32_copy(piobuf, addr, dwords);
931                 goto done;
932         }
933         copy_io(piobuf, ss, len, flush_wc);
934 done:
935         if (qp->s_wqe)
936                 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
937         ret = 0;
938 bail:
939         return ret;
940 }
941
942 /**
943  * ipath_verbs_send - send a packet
944  * @qp: the QP to send on
945  * @hdr: the packet header
946  * @hdrwords: the number of 32-bit words in the header
947  * @ss: the SGE to send
948  * @len: the length of the packet in bytes
949  */
950 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
951                      u32 hdrwords, struct ipath_sge_state *ss, u32 len)
952 {
953         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
954         u32 plen;
955         int ret;
956         u32 dwords = (len + 3) >> 2;
957
958         /*
959          * Calculate the send buffer trigger address.
960          * The +1 counts for the pbc control dword following the pbc length.
961          */
962         plen = hdrwords + dwords + 1;
963
964         /* Drop non-VL15 packets if we are not in the active state */
965         if (!(dd->ipath_flags & IPATH_LINKACTIVE) &&
966             qp->ibqp.qp_type != IB_QPT_SMI) {
967                 if (qp->s_wqe)
968                         ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
969                 ret = 0;
970         } else
971                 ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords,
972                                            ss, len, plen, dwords);
973
974         return ret;
975 }
976
977 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
978                             u64 *rwords, u64 *spkts, u64 *rpkts,
979                             u64 *xmit_wait)
980 {
981         int ret;
982
983         if (!(dd->ipath_flags & IPATH_INITTED)) {
984                 /* no hardware, freeze, etc. */
985                 ret = -EINVAL;
986                 goto bail;
987         }
988         *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
989         *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
990         *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
991         *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
992         *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
993
994         ret = 0;
995
996 bail:
997         return ret;
998 }
999
1000 /**
1001  * ipath_get_counters - get various chip counters
1002  * @dd: the infinipath device
1003  * @cntrs: counters are placed here
1004  *
1005  * Return the counters needed by recv_pma_get_portcounters().
1006  */
1007 int ipath_get_counters(struct ipath_devdata *dd,
1008                        struct ipath_verbs_counters *cntrs)
1009 {
1010         struct ipath_cregs const *crp = dd->ipath_cregs;
1011         int ret;
1012
1013         if (!(dd->ipath_flags & IPATH_INITTED)) {
1014                 /* no hardware, freeze, etc. */
1015                 ret = -EINVAL;
1016                 goto bail;
1017         }
1018         cntrs->symbol_error_counter =
1019                 ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
1020         cntrs->link_error_recovery_counter =
1021                 ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
1022         /*
1023          * The link downed counter counts when the other side downs the
1024          * connection.  We add in the number of times we downed the link
1025          * due to local link integrity errors to compensate.
1026          */
1027         cntrs->link_downed_counter =
1028                 ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
1029         cntrs->port_rcv_errors =
1030                 ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
1031                 ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
1032                 ipath_snap_cntr(dd, crp->cr_portovflcnt) +
1033                 ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
1034                 ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
1035                 ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
1036                 ipath_snap_cntr(dd, crp->cr_erricrccnt) +
1037                 ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
1038                 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1039                 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1040                 dd->ipath_rxfc_unsupvl_errs;
1041         cntrs->port_rcv_remphys_errors =
1042                 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1043         cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
1044         cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
1045         cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
1046         cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1047         cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1048         cntrs->local_link_integrity_errors =
1049                 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1050                 dd->ipath_lli_errs : dd->ipath_lli_errors;
1051         cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs;
1052
1053         ret = 0;
1054
1055 bail:
1056         return ret;
1057 }
1058
1059 /**
1060  * ipath_ib_piobufavail - callback when a PIO buffer is available
1061  * @arg: the device pointer
1062  *
1063  * This is called from ipath_intr() at interrupt level when a PIO buffer is
1064  * available after ipath_verbs_send() returned an error that no buffers were
1065  * available.  Return 1 if we consumed all the PIO buffers and we still have
1066  * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
1067  * return zero).
1068  */
1069 int ipath_ib_piobufavail(struct ipath_ibdev *dev)
1070 {
1071         struct ipath_qp *qp;
1072         unsigned long flags;
1073
1074         if (dev == NULL)
1075                 goto bail;
1076
1077         spin_lock_irqsave(&dev->pending_lock, flags);
1078         while (!list_empty(&dev->piowait)) {
1079                 qp = list_entry(dev->piowait.next, struct ipath_qp,
1080                                 piowait);
1081                 list_del_init(&qp->piowait);
1082                 clear_bit(IPATH_S_BUSY, &qp->s_busy);
1083                 tasklet_hi_schedule(&qp->s_task);
1084         }
1085         spin_unlock_irqrestore(&dev->pending_lock, flags);
1086
1087 bail:
1088         return 0;
1089 }
1090
1091 static int ipath_query_device(struct ib_device *ibdev,
1092                               struct ib_device_attr *props)
1093 {
1094         struct ipath_ibdev *dev = to_idev(ibdev);
1095
1096         memset(props, 0, sizeof(*props));
1097
1098         props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1099                 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1100                 IB_DEVICE_SYS_IMAGE_GUID;
1101         props->page_size_cap = PAGE_SIZE;
1102         props->vendor_id = dev->dd->ipath_vendorid;
1103         props->vendor_part_id = dev->dd->ipath_deviceid;
1104         props->hw_ver = dev->dd->ipath_pcirev;
1105
1106         props->sys_image_guid = dev->sys_image_guid;
1107
1108         props->max_mr_size = ~0ull;
1109         props->max_qp = ib_ipath_max_qps;
1110         props->max_qp_wr = ib_ipath_max_qp_wrs;
1111         props->max_sge = ib_ipath_max_sges;
1112         props->max_cq = ib_ipath_max_cqs;
1113         props->max_ah = ib_ipath_max_ahs;
1114         props->max_cqe = ib_ipath_max_cqes;
1115         props->max_mr = dev->lk_table.max;
1116         props->max_fmr = dev->lk_table.max;
1117         props->max_map_per_fmr = 32767;
1118         props->max_pd = ib_ipath_max_pds;
1119         props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
1120         props->max_qp_init_rd_atom = 255;
1121         /* props->max_res_rd_atom */
1122         props->max_srq = ib_ipath_max_srqs;
1123         props->max_srq_wr = ib_ipath_max_srq_wrs;
1124         props->max_srq_sge = ib_ipath_max_srq_sges;
1125         /* props->local_ca_ack_delay */
1126         props->atomic_cap = IB_ATOMIC_GLOB;
1127         props->max_pkeys = ipath_get_npkeys(dev->dd);
1128         props->max_mcast_grp = ib_ipath_max_mcast_grps;
1129         props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
1130         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1131                 props->max_mcast_grp;
1132
1133         return 0;
1134 }
1135
1136 const u8 ipath_cvt_physportstate[32] = {
1137         [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
1138         [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
1139         [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
1140         [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
1141         [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
1142         [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
1143         [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
1144                 IB_PHYSPORTSTATE_CFG_TRAIN,
1145         [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
1146                 IB_PHYSPORTSTATE_CFG_TRAIN,
1147         [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
1148                 IB_PHYSPORTSTATE_CFG_TRAIN,
1149         [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
1150         [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
1151                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1152         [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
1153                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1154         [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
1155                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1156         [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
1157         [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
1158         [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
1159         [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
1160         [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
1161         [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
1162         [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
1163         [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
1164 };
1165
1166 u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1167 {
1168         return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1169 }
1170
1171 static int ipath_query_port(struct ib_device *ibdev,
1172                             u8 port, struct ib_port_attr *props)
1173 {
1174         struct ipath_ibdev *dev = to_idev(ibdev);
1175         struct ipath_devdata *dd = dev->dd;
1176         enum ib_mtu mtu;
1177         u16 lid = dd->ipath_lid;
1178         u64 ibcstat;
1179
1180         memset(props, 0, sizeof(*props));
1181         props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1182         props->lmc = dd->ipath_lmc;
1183         props->sm_lid = dev->sm_lid;
1184         props->sm_sl = dev->sm_sl;
1185         ibcstat = dd->ipath_lastibcstat;
1186         props->state = ((ibcstat >> 4) & 0x3) + 1;
1187         /* See phys_state_show() */
1188         props->phys_state = /* MEA: assumes shift == 0 */
1189                 ipath_cvt_physportstate[dd->ipath_lastibcstat &
1190                 dd->ibcs_lts_mask];
1191         props->port_cap_flags = dev->port_cap_flags;
1192         props->gid_tbl_len = 1;
1193         props->max_msg_sz = 0x80000000;
1194         props->pkey_tbl_len = ipath_get_npkeys(dd);
1195         props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1196                 dev->z_pkey_violations;
1197         props->qkey_viol_cntr = dev->qkey_violations;
1198         props->active_width = IB_WIDTH_4X;
1199         /* See rate_show() */
1200         props->active_speed = 1;        /* Regular 10Mbs speed. */
1201         props->max_vl_num = 1;          /* VLCap = VL0 */
1202         props->init_type_reply = 0;
1203
1204         /*
1205          * Note: the chip supports a maximum MTU of 4096, but the driver
1206          * hasn't implemented this feature yet, so set the maximum value
1207          * to 2048.
1208          */
1209         props->max_mtu = IB_MTU_2048;
1210         switch (dd->ipath_ibmtu) {
1211         case 4096:
1212                 mtu = IB_MTU_4096;
1213                 break;
1214         case 2048:
1215                 mtu = IB_MTU_2048;
1216                 break;
1217         case 1024:
1218                 mtu = IB_MTU_1024;
1219                 break;
1220         case 512:
1221                 mtu = IB_MTU_512;
1222                 break;
1223         case 256:
1224                 mtu = IB_MTU_256;
1225                 break;
1226         default:
1227                 mtu = IB_MTU_2048;
1228         }
1229         props->active_mtu = mtu;
1230         props->subnet_timeout = dev->subnet_timeout;
1231
1232         return 0;
1233 }
1234
1235 static int ipath_modify_device(struct ib_device *device,
1236                                int device_modify_mask,
1237                                struct ib_device_modify *device_modify)
1238 {
1239         int ret;
1240
1241         if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1242                                    IB_DEVICE_MODIFY_NODE_DESC)) {
1243                 ret = -EOPNOTSUPP;
1244                 goto bail;
1245         }
1246
1247         if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
1248                 memcpy(device->node_desc, device_modify->node_desc, 64);
1249
1250         if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
1251                 to_idev(device)->sys_image_guid =
1252                         cpu_to_be64(device_modify->sys_image_guid);
1253
1254         ret = 0;
1255
1256 bail:
1257         return ret;
1258 }
1259
1260 static int ipath_modify_port(struct ib_device *ibdev,
1261                              u8 port, int port_modify_mask,
1262                              struct ib_port_modify *props)
1263 {
1264         struct ipath_ibdev *dev = to_idev(ibdev);
1265
1266         dev->port_cap_flags |= props->set_port_cap_mask;
1267         dev->port_cap_flags &= ~props->clr_port_cap_mask;
1268         if (port_modify_mask & IB_PORT_SHUTDOWN)
1269                 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
1270         if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1271                 dev->qkey_violations = 0;
1272         return 0;
1273 }
1274
1275 static int ipath_query_gid(struct ib_device *ibdev, u8 port,
1276                            int index, union ib_gid *gid)
1277 {
1278         struct ipath_ibdev *dev = to_idev(ibdev);
1279         int ret;
1280
1281         if (index >= 1) {
1282                 ret = -EINVAL;
1283                 goto bail;
1284         }
1285         gid->global.subnet_prefix = dev->gid_prefix;
1286         gid->global.interface_id = dev->dd->ipath_guid;
1287
1288         ret = 0;
1289
1290 bail:
1291         return ret;
1292 }
1293
1294 static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
1295                                     struct ib_ucontext *context,
1296                                     struct ib_udata *udata)
1297 {
1298         struct ipath_ibdev *dev = to_idev(ibdev);
1299         struct ipath_pd *pd;
1300         struct ib_pd *ret;
1301
1302         /*
1303          * This is actually totally arbitrary.  Some correctness tests
1304          * assume there's a maximum number of PDs that can be allocated.
1305          * We don't actually have this limit, but we fail the test if
1306          * we allow allocations of more than we report for this value.
1307          */
1308
1309         pd = kmalloc(sizeof *pd, GFP_KERNEL);
1310         if (!pd) {
1311                 ret = ERR_PTR(-ENOMEM);
1312                 goto bail;
1313         }
1314
1315         spin_lock(&dev->n_pds_lock);
1316         if (dev->n_pds_allocated == ib_ipath_max_pds) {
1317                 spin_unlock(&dev->n_pds_lock);
1318                 kfree(pd);
1319                 ret = ERR_PTR(-ENOMEM);
1320                 goto bail;
1321         }
1322
1323         dev->n_pds_allocated++;
1324         spin_unlock(&dev->n_pds_lock);
1325
1326         /* ib_alloc_pd() will initialize pd->ibpd. */
1327         pd->user = udata != NULL;
1328
1329         ret = &pd->ibpd;
1330
1331 bail:
1332         return ret;
1333 }
1334
1335 static int ipath_dealloc_pd(struct ib_pd *ibpd)
1336 {
1337         struct ipath_pd *pd = to_ipd(ibpd);
1338         struct ipath_ibdev *dev = to_idev(ibpd->device);
1339
1340         spin_lock(&dev->n_pds_lock);
1341         dev->n_pds_allocated--;
1342         spin_unlock(&dev->n_pds_lock);
1343
1344         kfree(pd);
1345
1346         return 0;
1347 }
1348
1349 /**
1350  * ipath_create_ah - create an address handle
1351  * @pd: the protection domain
1352  * @ah_attr: the attributes of the AH
1353  *
1354  * This may be called from interrupt context.
1355  */
1356 static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1357                                      struct ib_ah_attr *ah_attr)
1358 {
1359         struct ipath_ah *ah;
1360         struct ib_ah *ret;
1361         struct ipath_ibdev *dev = to_idev(pd->device);
1362         unsigned long flags;
1363
1364         /* A multicast address requires a GRH (see ch. 8.4.1). */
1365         if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
1366             ah_attr->dlid != IPATH_PERMISSIVE_LID &&
1367             !(ah_attr->ah_flags & IB_AH_GRH)) {
1368                 ret = ERR_PTR(-EINVAL);
1369                 goto bail;
1370         }
1371
1372         if (ah_attr->dlid == 0) {
1373                 ret = ERR_PTR(-EINVAL);
1374                 goto bail;
1375         }
1376
1377         if (ah_attr->port_num < 1 ||
1378             ah_attr->port_num > pd->device->phys_port_cnt) {
1379                 ret = ERR_PTR(-EINVAL);
1380                 goto bail;
1381         }
1382
1383         ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1384         if (!ah) {
1385                 ret = ERR_PTR(-ENOMEM);
1386                 goto bail;
1387         }
1388
1389         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1390         if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1391                 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1392                 kfree(ah);
1393                 ret = ERR_PTR(-ENOMEM);
1394                 goto bail;
1395         }
1396
1397         dev->n_ahs_allocated++;
1398         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1399
1400         /* ib_create_ah() will initialize ah->ibah. */
1401         ah->attr = *ah_attr;
1402
1403         ret = &ah->ibah;
1404
1405 bail:
1406         return ret;
1407 }
1408
1409 /**
1410  * ipath_destroy_ah - destroy an address handle
1411  * @ibah: the AH to destroy
1412  *
1413  * This may be called from interrupt context.
1414  */
1415 static int ipath_destroy_ah(struct ib_ah *ibah)
1416 {
1417         struct ipath_ibdev *dev = to_idev(ibah->device);
1418         struct ipath_ah *ah = to_iah(ibah);
1419         unsigned long flags;
1420
1421         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1422         dev->n_ahs_allocated--;
1423         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1424
1425         kfree(ah);
1426
1427         return 0;
1428 }
1429
1430 static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1431 {
1432         struct ipath_ah *ah = to_iah(ibah);
1433
1434         *ah_attr = ah->attr;
1435
1436         return 0;
1437 }
1438
1439 /**
1440  * ipath_get_npkeys - return the size of the PKEY table for port 0
1441  * @dd: the infinipath device
1442  */
1443 unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1444 {
1445         return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1446 }
1447
1448 /**
1449  * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1450  * @dd: the infinipath device
1451  * @index: the PKEY index
1452  */
1453 unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1454 {
1455         unsigned ret;
1456
1457         if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1458                 ret = 0;
1459         else
1460                 ret = dd->ipath_pd[0]->port_pkeys[index];
1461
1462         return ret;
1463 }
1464
1465 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1466                             u16 *pkey)
1467 {
1468         struct ipath_ibdev *dev = to_idev(ibdev);
1469         int ret;
1470
1471         if (index >= ipath_get_npkeys(dev->dd)) {
1472                 ret = -EINVAL;
1473                 goto bail;
1474         }
1475
1476         *pkey = ipath_get_pkey(dev->dd, index);
1477         ret = 0;
1478
1479 bail:
1480         return ret;
1481 }
1482
1483 /**
1484  * ipath_alloc_ucontext - allocate a ucontest
1485  * @ibdev: the infiniband device
1486  * @udata: not used by the InfiniPath driver
1487  */
1488
1489 static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
1490                                                 struct ib_udata *udata)
1491 {
1492         struct ipath_ucontext *context;
1493         struct ib_ucontext *ret;
1494
1495         context = kmalloc(sizeof *context, GFP_KERNEL);
1496         if (!context) {
1497                 ret = ERR_PTR(-ENOMEM);
1498                 goto bail;
1499         }
1500
1501         ret = &context->ibucontext;
1502
1503 bail:
1504         return ret;
1505 }
1506
1507 static int ipath_dealloc_ucontext(struct ib_ucontext *context)
1508 {
1509         kfree(to_iucontext(context));
1510         return 0;
1511 }
1512
1513 static int ipath_verbs_register_sysfs(struct ib_device *dev);
1514
1515 static void __verbs_timer(unsigned long arg)
1516 {
1517         struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1518
1519         /* Handle verbs layer timeouts. */
1520         ipath_ib_timer(dd->verbs_dev);
1521
1522         mod_timer(&dd->verbs_timer, jiffies + 1);
1523 }
1524
1525 static int enable_timer(struct ipath_devdata *dd)
1526 {
1527         /*
1528          * Early chips had a design flaw where the chip and kernel idea
1529          * of the tail register don't always agree, and therefore we won't
1530          * get an interrupt on the next packet received.
1531          * If the board supports per packet receive interrupts, use it.
1532          * Otherwise, the timer function periodically checks for packets
1533          * to cover this case.
1534          * Either way, the timer is needed for verbs layer related
1535          * processing.
1536          */
1537         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1538                 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1539                                  0x2074076542310ULL);
1540                 /* Enable GPIO bit 2 interrupt */
1541                 dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
1542                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1543                                  dd->ipath_gpio_mask);
1544         }
1545
1546         init_timer(&dd->verbs_timer);
1547         dd->verbs_timer.function = __verbs_timer;
1548         dd->verbs_timer.data = (unsigned long)dd;
1549         dd->verbs_timer.expires = jiffies + 1;
1550         add_timer(&dd->verbs_timer);
1551
1552         return 0;
1553 }
1554
1555 static int disable_timer(struct ipath_devdata *dd)
1556 {
1557         /* Disable GPIO bit 2 interrupt */
1558         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1559                 /* Disable GPIO bit 2 interrupt */
1560                 dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
1561                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1562                                  dd->ipath_gpio_mask);
1563                 /*
1564                  * We might want to undo changes to debugportselect,
1565                  * but how?
1566                  */
1567         }
1568
1569         del_timer_sync(&dd->verbs_timer);
1570
1571         return 0;
1572 }
1573
1574 /**
1575  * ipath_register_ib_device - register our device with the infiniband core
1576  * @dd: the device data structure
1577  * Return the allocated ipath_ibdev pointer or NULL on error.
1578  */
1579 int ipath_register_ib_device(struct ipath_devdata *dd)
1580 {
1581         struct ipath_verbs_counters cntrs;
1582         struct ipath_ibdev *idev;
1583         struct ib_device *dev;
1584         int ret;
1585
1586         idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
1587         if (idev == NULL) {
1588                 ret = -ENOMEM;
1589                 goto bail;
1590         }
1591
1592         dev = &idev->ibdev;
1593
1594         /* Only need to initialize non-zero fields. */
1595         spin_lock_init(&idev->n_pds_lock);
1596         spin_lock_init(&idev->n_ahs_lock);
1597         spin_lock_init(&idev->n_cqs_lock);
1598         spin_lock_init(&idev->n_qps_lock);
1599         spin_lock_init(&idev->n_srqs_lock);
1600         spin_lock_init(&idev->n_mcast_grps_lock);
1601
1602         spin_lock_init(&idev->qp_table.lock);
1603         spin_lock_init(&idev->lk_table.lock);
1604         idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1605         /* Set the prefix to the default value (see ch. 4.1.1) */
1606         idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
1607
1608         ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
1609         if (ret)
1610                 goto err_qp;
1611
1612         /*
1613          * The top ib_ipath_lkey_table_size bits are used to index the
1614          * table.  The lower 8 bits can be owned by the user (copied from
1615          * the LKEY).  The remaining bits act as a generation number or tag.
1616          */
1617         idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
1618         idev->lk_table.table = kzalloc(idev->lk_table.max *
1619                                        sizeof(*idev->lk_table.table),
1620                                        GFP_KERNEL);
1621         if (idev->lk_table.table == NULL) {
1622                 ret = -ENOMEM;
1623                 goto err_lk;
1624         }
1625         INIT_LIST_HEAD(&idev->pending_mmaps);
1626         spin_lock_init(&idev->pending_lock);
1627         idev->mmap_offset = PAGE_SIZE;
1628         spin_lock_init(&idev->mmap_offset_lock);
1629         INIT_LIST_HEAD(&idev->pending[0]);
1630         INIT_LIST_HEAD(&idev->pending[1]);
1631         INIT_LIST_HEAD(&idev->pending[2]);
1632         INIT_LIST_HEAD(&idev->piowait);
1633         INIT_LIST_HEAD(&idev->rnrwait);
1634         idev->pending_index = 0;
1635         idev->port_cap_flags =
1636                 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1637         idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1638         idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1639         idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1640         idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1641         idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1642         idev->link_width_enabled = 3;   /* 1x or 4x */
1643
1644         /* Snapshot current HW counters to "clear" them. */
1645         ipath_get_counters(dd, &cntrs);
1646         idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1647         idev->z_link_error_recovery_counter =
1648                 cntrs.link_error_recovery_counter;
1649         idev->z_link_downed_counter = cntrs.link_downed_counter;
1650         idev->z_port_rcv_errors = cntrs.port_rcv_errors;
1651         idev->z_port_rcv_remphys_errors =
1652                 cntrs.port_rcv_remphys_errors;
1653         idev->z_port_xmit_discards = cntrs.port_xmit_discards;
1654         idev->z_port_xmit_data = cntrs.port_xmit_data;
1655         idev->z_port_rcv_data = cntrs.port_rcv_data;
1656         idev->z_port_xmit_packets = cntrs.port_xmit_packets;
1657         idev->z_port_rcv_packets = cntrs.port_rcv_packets;
1658         idev->z_local_link_integrity_errors =
1659                 cntrs.local_link_integrity_errors;
1660         idev->z_excessive_buffer_overrun_errors =
1661                 cntrs.excessive_buffer_overrun_errors;
1662         idev->z_vl15_dropped = cntrs.vl15_dropped;
1663
1664         /*
1665          * The system image GUID is supposed to be the same for all
1666          * IB HCAs in a single system but since there can be other
1667          * device types in the system, we can't be sure this is unique.
1668          */
1669         if (!sys_image_guid)
1670                 sys_image_guid = dd->ipath_guid;
1671         idev->sys_image_guid = sys_image_guid;
1672         idev->ib_unit = dd->ipath_unit;
1673         idev->dd = dd;
1674
1675         strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1676         dev->owner = THIS_MODULE;
1677         dev->node_guid = dd->ipath_guid;
1678         dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1679         dev->uverbs_cmd_mask =
1680                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
1681                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
1682                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
1683                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
1684                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
1685                 (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
1686                 (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
1687                 (1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
1688                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
1689                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
1690                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1691                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
1692                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
1693                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
1694                 (1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
1695                 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
1696                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
1697                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
1698                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
1699                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
1700                 (1ull << IB_USER_VERBS_CMD_POST_SEND)           |
1701                 (1ull << IB_USER_VERBS_CMD_POST_RECV)           |
1702                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
1703                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
1704                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
1705                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
1706                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
1707                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
1708                 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1709         dev->node_type = RDMA_NODE_IB_CA;
1710         dev->phys_port_cnt = 1;
1711         dev->num_comp_vectors = 1;
1712         dev->dma_device = &dd->pcidev->dev;
1713         dev->query_device = ipath_query_device;
1714         dev->modify_device = ipath_modify_device;
1715         dev->query_port = ipath_query_port;
1716         dev->modify_port = ipath_modify_port;
1717         dev->query_pkey = ipath_query_pkey;
1718         dev->query_gid = ipath_query_gid;
1719         dev->alloc_ucontext = ipath_alloc_ucontext;
1720         dev->dealloc_ucontext = ipath_dealloc_ucontext;
1721         dev->alloc_pd = ipath_alloc_pd;
1722         dev->dealloc_pd = ipath_dealloc_pd;
1723         dev->create_ah = ipath_create_ah;
1724         dev->destroy_ah = ipath_destroy_ah;
1725         dev->query_ah = ipath_query_ah;
1726         dev->create_srq = ipath_create_srq;
1727         dev->modify_srq = ipath_modify_srq;
1728         dev->query_srq = ipath_query_srq;
1729         dev->destroy_srq = ipath_destroy_srq;
1730         dev->create_qp = ipath_create_qp;
1731         dev->modify_qp = ipath_modify_qp;
1732         dev->query_qp = ipath_query_qp;
1733         dev->destroy_qp = ipath_destroy_qp;
1734         dev->post_send = ipath_post_send;
1735         dev->post_recv = ipath_post_receive;
1736         dev->post_srq_recv = ipath_post_srq_receive;
1737         dev->create_cq = ipath_create_cq;
1738         dev->destroy_cq = ipath_destroy_cq;
1739         dev->resize_cq = ipath_resize_cq;
1740         dev->poll_cq = ipath_poll_cq;
1741         dev->req_notify_cq = ipath_req_notify_cq;
1742         dev->get_dma_mr = ipath_get_dma_mr;
1743         dev->reg_phys_mr = ipath_reg_phys_mr;
1744         dev->reg_user_mr = ipath_reg_user_mr;
1745         dev->dereg_mr = ipath_dereg_mr;
1746         dev->alloc_fmr = ipath_alloc_fmr;
1747         dev->map_phys_fmr = ipath_map_phys_fmr;
1748         dev->unmap_fmr = ipath_unmap_fmr;
1749         dev->dealloc_fmr = ipath_dealloc_fmr;
1750         dev->attach_mcast = ipath_multicast_attach;
1751         dev->detach_mcast = ipath_multicast_detach;
1752         dev->process_mad = ipath_process_mad;
1753         dev->mmap = ipath_mmap;
1754         dev->dma_ops = &ipath_dma_mapping_ops;
1755
1756         snprintf(dev->node_desc, sizeof(dev->node_desc),
1757                  IPATH_IDSTR " %s", init_utsname()->nodename);
1758
1759         ret = ib_register_device(dev);
1760         if (ret)
1761                 goto err_reg;
1762
1763         if (ipath_verbs_register_sysfs(dev))
1764                 goto err_class;
1765
1766         enable_timer(dd);
1767
1768         goto bail;
1769
1770 err_class:
1771         ib_unregister_device(dev);
1772 err_reg:
1773         kfree(idev->lk_table.table);
1774 err_lk:
1775         kfree(idev->qp_table.table);
1776 err_qp:
1777         ib_dealloc_device(dev);
1778         ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1779         idev = NULL;
1780
1781 bail:
1782         dd->verbs_dev = idev;
1783         return ret;
1784 }
1785
1786 void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1787 {
1788         struct ib_device *ibdev = &dev->ibdev;
1789
1790         disable_timer(dev->dd);
1791
1792         ib_unregister_device(ibdev);
1793
1794         if (!list_empty(&dev->pending[0]) ||
1795             !list_empty(&dev->pending[1]) ||
1796             !list_empty(&dev->pending[2]))
1797                 ipath_dev_err(dev->dd, "pending list not empty!\n");
1798         if (!list_empty(&dev->piowait))
1799                 ipath_dev_err(dev->dd, "piowait list not empty!\n");
1800         if (!list_empty(&dev->rnrwait))
1801                 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
1802         if (!ipath_mcast_tree_empty())
1803                 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
1804         /*
1805          * Note that ipath_unregister_ib_device() can be called before all
1806          * the QPs are destroyed!
1807          */
1808         ipath_free_all_qps(&dev->qp_table);
1809         kfree(dev->qp_table.table);
1810         kfree(dev->lk_table.table);
1811         ib_dealloc_device(ibdev);
1812 }
1813
1814 static ssize_t show_rev(struct class_device *cdev, char *buf)
1815 {
1816         struct ipath_ibdev *dev =
1817                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1818
1819         return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1820 }
1821
1822 static ssize_t show_hca(struct class_device *cdev, char *buf)
1823 {
1824         struct ipath_ibdev *dev =
1825                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1826         int ret;
1827
1828         ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1829         if (ret < 0)
1830                 goto bail;
1831         strcat(buf, "\n");
1832         ret = strlen(buf);
1833
1834 bail:
1835         return ret;
1836 }
1837
1838 static ssize_t show_stats(struct class_device *cdev, char *buf)
1839 {
1840         struct ipath_ibdev *dev =
1841                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1842         int i;
1843         int len;
1844
1845         len = sprintf(buf,
1846                       "RC resends  %d\n"
1847                       "RC no QACK  %d\n"
1848                       "RC ACKs     %d\n"
1849                       "RC SEQ NAKs %d\n"
1850                       "RC RDMA seq %d\n"
1851                       "RC RNR NAKs %d\n"
1852                       "RC OTH NAKs %d\n"
1853                       "RC timeouts %d\n"
1854                       "RC RDMA dup %d\n"
1855                       "RC stalls   %d\n"
1856                       "piobuf wait %d\n"
1857                       "no piobuf   %d\n"
1858                       "PKT drops   %d\n"
1859                       "WQE errs    %d\n",
1860                       dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1861                       dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1862                       dev->n_other_naks, dev->n_timeouts,
1863                       dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
1864                       dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
1865         for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1866                 const struct ipath_opcode_stats *si = &dev->opstats[i];
1867
1868                 if (!si->n_packets && !si->n_bytes)
1869                         continue;
1870                 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
1871                                (unsigned long long) si->n_packets,
1872                                (unsigned long long) si->n_bytes);
1873         }
1874         return len;
1875 }
1876
1877 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1878 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1879 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
1880 static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
1881
1882 static struct class_device_attribute *ipath_class_attributes[] = {
1883         &class_device_attr_hw_rev,
1884         &class_device_attr_hca_type,
1885         &class_device_attr_board_id,
1886         &class_device_attr_stats
1887 };
1888
1889 static int ipath_verbs_register_sysfs(struct ib_device *dev)
1890 {
1891         int i;
1892         int ret;
1893
1894         for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
1895                 if (class_device_create_file(&dev->class_dev,
1896                                              ipath_class_attributes[i])) {
1897                         ret = 1;
1898                         goto bail;
1899                 }
1900
1901         ret = 0;
1902
1903 bail:
1904         return ret;
1905 }