Merge with http://kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[linux-2.6] / drivers / infiniband / hw / mthca / mthca_qp.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
36  */
37
38 #include <linux/init.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
45
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
50
51 enum {
52         MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53         MTHCA_ACK_REQ_FREQ       = 10,
54         MTHCA_FLIGHT_LIMIT       = 9,
55         MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */
56         MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */
57         MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */
58 };
59
60 enum {
61         MTHCA_QP_STATE_RST  = 0,
62         MTHCA_QP_STATE_INIT = 1,
63         MTHCA_QP_STATE_RTR  = 2,
64         MTHCA_QP_STATE_RTS  = 3,
65         MTHCA_QP_STATE_SQE  = 4,
66         MTHCA_QP_STATE_SQD  = 5,
67         MTHCA_QP_STATE_ERR  = 6,
68         MTHCA_QP_STATE_DRAINING = 7
69 };
70
71 enum {
72         MTHCA_QP_ST_RC  = 0x0,
73         MTHCA_QP_ST_UC  = 0x1,
74         MTHCA_QP_ST_RD  = 0x2,
75         MTHCA_QP_ST_UD  = 0x3,
76         MTHCA_QP_ST_MLX = 0x7
77 };
78
79 enum {
80         MTHCA_QP_PM_MIGRATED = 0x3,
81         MTHCA_QP_PM_ARMED    = 0x0,
82         MTHCA_QP_PM_REARM    = 0x1
83 };
84
85 enum {
86         /* qp_context flags */
87         MTHCA_QP_BIT_DE  = 1 <<  8,
88         /* params1 */
89         MTHCA_QP_BIT_SRE = 1 << 15,
90         MTHCA_QP_BIT_SWE = 1 << 14,
91         MTHCA_QP_BIT_SAE = 1 << 13,
92         MTHCA_QP_BIT_SIC = 1 <<  4,
93         MTHCA_QP_BIT_SSC = 1 <<  3,
94         /* params2 */
95         MTHCA_QP_BIT_RRE = 1 << 15,
96         MTHCA_QP_BIT_RWE = 1 << 14,
97         MTHCA_QP_BIT_RAE = 1 << 13,
98         MTHCA_QP_BIT_RIC = 1 <<  4,
99         MTHCA_QP_BIT_RSC = 1 <<  3
100 };
101
102 struct mthca_qp_path {
103         __be32 port_pkey;
104         u8     rnr_retry;
105         u8     g_mylmc;
106         __be16 rlid;
107         u8     ackto;
108         u8     mgid_index;
109         u8     static_rate;
110         u8     hop_limit;
111         __be32 sl_tclass_flowlabel;
112         u8     rgid[16];
113 } __attribute__((packed));
114
115 struct mthca_qp_context {
116         __be32 flags;
117         __be32 tavor_sched_queue; /* Reserved on Arbel */
118         u8     mtu_msgmax;
119         u8     rq_size_stride;  /* Reserved on Tavor */
120         u8     sq_size_stride;  /* Reserved on Tavor */
121         u8     rlkey_arbel_sched_queue; /* Reserved on Tavor */
122         __be32 usr_page;
123         __be32 local_qpn;
124         __be32 remote_qpn;
125         u32    reserved1[2];
126         struct mthca_qp_path pri_path;
127         struct mthca_qp_path alt_path;
128         __be32 rdd;
129         __be32 pd;
130         __be32 wqe_base;
131         __be32 wqe_lkey;
132         __be32 params1;
133         __be32 reserved2;
134         __be32 next_send_psn;
135         __be32 cqn_snd;
136         __be32 snd_wqe_base_l;  /* Next send WQE on Tavor */
137         __be32 snd_db_index;    /* (debugging only entries) */
138         __be32 last_acked_psn;
139         __be32 ssn;
140         __be32 params2;
141         __be32 rnr_nextrecvpsn;
142         __be32 ra_buff_indx;
143         __be32 cqn_rcv;
144         __be32 rcv_wqe_base_l;  /* Next recv WQE on Tavor */
145         __be32 rcv_db_index;    /* (debugging only entries) */
146         __be32 qkey;
147         __be32 srqn;
148         __be32 rmsn;
149         __be16 rq_wqe_counter;  /* reserved on Tavor */
150         __be16 sq_wqe_counter;  /* reserved on Tavor */
151         u32    reserved3[18];
152 } __attribute__((packed));
153
154 struct mthca_qp_param {
155         __be32 opt_param_mask;
156         u32    reserved1;
157         struct mthca_qp_context context;
158         u32    reserved2[62];
159 } __attribute__((packed));
160
161 enum {
162         MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,
163         MTHCA_QP_OPTPAR_RRE               = 1 << 1,
164         MTHCA_QP_OPTPAR_RAE               = 1 << 2,
165         MTHCA_QP_OPTPAR_RWE               = 1 << 3,
166         MTHCA_QP_OPTPAR_PKEY_INDEX        = 1 << 4,
167         MTHCA_QP_OPTPAR_Q_KEY             = 1 << 5,
168         MTHCA_QP_OPTPAR_RNR_TIMEOUT       = 1 << 6,
169         MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
170         MTHCA_QP_OPTPAR_SRA_MAX           = 1 << 8,
171         MTHCA_QP_OPTPAR_RRA_MAX           = 1 << 9,
172         MTHCA_QP_OPTPAR_PM_STATE          = 1 << 10,
173         MTHCA_QP_OPTPAR_PORT_NUM          = 1 << 11,
174         MTHCA_QP_OPTPAR_RETRY_COUNT       = 1 << 12,
175         MTHCA_QP_OPTPAR_ALT_RNR_RETRY     = 1 << 13,
176         MTHCA_QP_OPTPAR_ACK_TIMEOUT       = 1 << 14,
177         MTHCA_QP_OPTPAR_RNR_RETRY         = 1 << 15,
178         MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16
179 };
180
181 static const u8 mthca_opcode[] = {
182         [IB_WR_SEND]                 = MTHCA_OPCODE_SEND,
183         [IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,
184         [IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,
185         [IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,
186         [IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,
187         [IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,
188         [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
189 };
190
191 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
192 {
193         return qp->qpn >= dev->qp_table.sqp_start &&
194                 qp->qpn <= dev->qp_table.sqp_start + 3;
195 }
196
197 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
198 {
199         return qp->qpn >= dev->qp_table.sqp_start &&
200                 qp->qpn <= dev->qp_table.sqp_start + 1;
201 }
202
203 static void *get_recv_wqe(struct mthca_qp *qp, int n)
204 {
205         if (qp->is_direct)
206                 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
207         else
208                 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
209                         ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
210 }
211
212 static void *get_send_wqe(struct mthca_qp *qp, int n)
213 {
214         if (qp->is_direct)
215                 return qp->queue.direct.buf + qp->send_wqe_offset +
216                         (n << qp->sq.wqe_shift);
217         else
218                 return qp->queue.page_list[(qp->send_wqe_offset +
219                                             (n << qp->sq.wqe_shift)) >>
220                                            PAGE_SHIFT].buf +
221                         ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
222                          (PAGE_SIZE - 1));
223 }
224
225 static void mthca_wq_init(struct mthca_wq *wq)
226 {
227         spin_lock_init(&wq->lock);
228         wq->next_ind  = 0;
229         wq->last_comp = wq->max - 1;
230         wq->head      = 0;
231         wq->tail      = 0;
232 }
233
234 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
235                     enum ib_event_type event_type)
236 {
237         struct mthca_qp *qp;
238         struct ib_event event;
239
240         spin_lock(&dev->qp_table.lock);
241         qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
242         if (qp)
243                 atomic_inc(&qp->refcount);
244         spin_unlock(&dev->qp_table.lock);
245
246         if (!qp) {
247                 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
248                 return;
249         }
250
251         event.device      = &dev->ib_dev;
252         event.event       = event_type;
253         event.element.qp  = &qp->ibqp;
254         if (qp->ibqp.event_handler)
255                 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
256
257         if (atomic_dec_and_test(&qp->refcount))
258                 wake_up(&qp->wait);
259 }
260
261 static int to_mthca_state(enum ib_qp_state ib_state)
262 {
263         switch (ib_state) {
264         case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
265         case IB_QPS_INIT:  return MTHCA_QP_STATE_INIT;
266         case IB_QPS_RTR:   return MTHCA_QP_STATE_RTR;
267         case IB_QPS_RTS:   return MTHCA_QP_STATE_RTS;
268         case IB_QPS_SQD:   return MTHCA_QP_STATE_SQD;
269         case IB_QPS_SQE:   return MTHCA_QP_STATE_SQE;
270         case IB_QPS_ERR:   return MTHCA_QP_STATE_ERR;
271         default:                return -1;
272         }
273 }
274
275 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
276
277 static int to_mthca_st(int transport)
278 {
279         switch (transport) {
280         case RC:  return MTHCA_QP_ST_RC;
281         case UC:  return MTHCA_QP_ST_UC;
282         case UD:  return MTHCA_QP_ST_UD;
283         case RD:  return MTHCA_QP_ST_RD;
284         case MLX: return MTHCA_QP_ST_MLX;
285         default:  return -1;
286         }
287 }
288
289 static const struct {
290         int trans;
291         u32 req_param[NUM_TRANS];
292         u32 opt_param[NUM_TRANS];
293 } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
294         [IB_QPS_RESET] = {
295                 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
296                 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
297                 [IB_QPS_INIT]  = {
298                         .trans = MTHCA_TRANS_RST2INIT,
299                         .req_param = {
300                                 [UD]  = (IB_QP_PKEY_INDEX |
301                                          IB_QP_PORT       |
302                                          IB_QP_QKEY),
303                                 [UC]  = (IB_QP_PKEY_INDEX |
304                                          IB_QP_PORT       |
305                                          IB_QP_ACCESS_FLAGS),
306                                 [RC]  = (IB_QP_PKEY_INDEX |
307                                          IB_QP_PORT       |
308                                          IB_QP_ACCESS_FLAGS),
309                                 [MLX] = (IB_QP_PKEY_INDEX |
310                                          IB_QP_QKEY),
311                         },
312                         /* bug-for-bug compatibility with VAPI: */
313                         .opt_param = {
314                                 [MLX] = IB_QP_PORT
315                         }
316                 },
317         },
318         [IB_QPS_INIT]  = {
319                 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
320                 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
321                 [IB_QPS_INIT]  = {
322                         .trans = MTHCA_TRANS_INIT2INIT,
323                         .opt_param = {
324                                 [UD]  = (IB_QP_PKEY_INDEX |
325                                          IB_QP_PORT       |
326                                          IB_QP_QKEY),
327                                 [UC]  = (IB_QP_PKEY_INDEX |
328                                          IB_QP_PORT       |
329                                          IB_QP_ACCESS_FLAGS),
330                                 [RC]  = (IB_QP_PKEY_INDEX |
331                                          IB_QP_PORT       |
332                                          IB_QP_ACCESS_FLAGS),
333                                 [MLX] = (IB_QP_PKEY_INDEX |
334                                          IB_QP_QKEY),
335                         }
336                 },
337                 [IB_QPS_RTR]   = {
338                         .trans = MTHCA_TRANS_INIT2RTR,
339                         .req_param = {
340                                 [UC]  = (IB_QP_AV                  |
341                                          IB_QP_PATH_MTU            |
342                                          IB_QP_DEST_QPN            |
343                                          IB_QP_RQ_PSN),
344                                 [RC]  = (IB_QP_AV                  |
345                                          IB_QP_PATH_MTU            |
346                                          IB_QP_DEST_QPN            |
347                                          IB_QP_RQ_PSN              |
348                                          IB_QP_MAX_DEST_RD_ATOMIC  |
349                                          IB_QP_MIN_RNR_TIMER),
350                         },
351                         .opt_param = {
352                                 [UD]  = (IB_QP_PKEY_INDEX |
353                                          IB_QP_QKEY),
354                                 [UC]  = (IB_QP_ALT_PATH     |
355                                          IB_QP_ACCESS_FLAGS |
356                                          IB_QP_PKEY_INDEX),
357                                 [RC]  = (IB_QP_ALT_PATH     |
358                                          IB_QP_ACCESS_FLAGS |
359                                          IB_QP_PKEY_INDEX),
360                                 [MLX] = (IB_QP_PKEY_INDEX |
361                                          IB_QP_QKEY),
362                         }
363                 }
364         },
365         [IB_QPS_RTR]   = {
366                 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
367                 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
368                 [IB_QPS_RTS]   = {
369                         .trans = MTHCA_TRANS_RTR2RTS,
370                         .req_param = {
371                                 [UD]  = IB_QP_SQ_PSN,
372                                 [UC]  = IB_QP_SQ_PSN,
373                                 [RC]  = (IB_QP_TIMEOUT           |
374                                          IB_QP_RETRY_CNT         |
375                                          IB_QP_RNR_RETRY         |
376                                          IB_QP_SQ_PSN            |
377                                          IB_QP_MAX_QP_RD_ATOMIC),
378                                 [MLX] = IB_QP_SQ_PSN,
379                         },
380                         .opt_param = {
381                                 [UD]  = (IB_QP_CUR_STATE             |
382                                          IB_QP_QKEY),
383                                 [UC]  = (IB_QP_CUR_STATE             |
384                                          IB_QP_ALT_PATH              |
385                                          IB_QP_ACCESS_FLAGS          |
386                                          IB_QP_PKEY_INDEX            |
387                                          IB_QP_PATH_MIG_STATE),
388                                 [RC]  = (IB_QP_CUR_STATE             |
389                                          IB_QP_ALT_PATH              |
390                                          IB_QP_ACCESS_FLAGS          |
391                                          IB_QP_PKEY_INDEX            |
392                                          IB_QP_MIN_RNR_TIMER         |
393                                          IB_QP_PATH_MIG_STATE),
394                                 [MLX] = (IB_QP_CUR_STATE             |
395                                          IB_QP_QKEY),
396                         }
397                 }
398         },
399         [IB_QPS_RTS]   = {
400                 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
401                 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
402                 [IB_QPS_RTS]   = {
403                         .trans = MTHCA_TRANS_RTS2RTS,
404                         .opt_param = {
405                                 [UD]  = (IB_QP_CUR_STATE             |
406                                          IB_QP_QKEY),
407                                 [UC]  = (IB_QP_ACCESS_FLAGS          |
408                                          IB_QP_ALT_PATH              |
409                                          IB_QP_PATH_MIG_STATE),
410                                 [RC]  = (IB_QP_ACCESS_FLAGS          |
411                                          IB_QP_ALT_PATH              |
412                                          IB_QP_PATH_MIG_STATE        |
413                                          IB_QP_MIN_RNR_TIMER),
414                                 [MLX] = (IB_QP_CUR_STATE             |
415                                          IB_QP_QKEY),
416                         }
417                 },
418                 [IB_QPS_SQD]   = {
419                         .trans = MTHCA_TRANS_RTS2SQD,
420                 },
421         },
422         [IB_QPS_SQD]   = {
423                 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
424                 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
425                 [IB_QPS_RTS]   = {
426                         .trans = MTHCA_TRANS_SQD2RTS,
427                         .opt_param = {
428                                 [UD]  = (IB_QP_CUR_STATE             |
429                                          IB_QP_QKEY),
430                                 [UC]  = (IB_QP_CUR_STATE             |
431                                          IB_QP_ALT_PATH              |
432                                          IB_QP_ACCESS_FLAGS          |
433                                          IB_QP_PATH_MIG_STATE),
434                                 [RC]  = (IB_QP_CUR_STATE             |
435                                          IB_QP_ALT_PATH              |
436                                          IB_QP_ACCESS_FLAGS          |
437                                          IB_QP_MIN_RNR_TIMER         |
438                                          IB_QP_PATH_MIG_STATE),
439                                 [MLX] = (IB_QP_CUR_STATE             |
440                                          IB_QP_QKEY),
441                         }
442                 },
443                 [IB_QPS_SQD]   = {
444                         .trans = MTHCA_TRANS_SQD2SQD,
445                         .opt_param = {
446                                 [UD]  = (IB_QP_PKEY_INDEX            |
447                                          IB_QP_QKEY),
448                                 [UC]  = (IB_QP_AV                    |
449                                          IB_QP_CUR_STATE             |
450                                          IB_QP_ALT_PATH              |
451                                          IB_QP_ACCESS_FLAGS          |
452                                          IB_QP_PKEY_INDEX            |
453                                          IB_QP_PATH_MIG_STATE),
454                                 [RC]  = (IB_QP_AV                    |
455                                          IB_QP_TIMEOUT               |
456                                          IB_QP_RETRY_CNT             |
457                                          IB_QP_RNR_RETRY             |
458                                          IB_QP_MAX_QP_RD_ATOMIC      |
459                                          IB_QP_MAX_DEST_RD_ATOMIC    |
460                                          IB_QP_CUR_STATE             |
461                                          IB_QP_ALT_PATH              |
462                                          IB_QP_ACCESS_FLAGS          |
463                                          IB_QP_PKEY_INDEX            |
464                                          IB_QP_MIN_RNR_TIMER         |
465                                          IB_QP_PATH_MIG_STATE),
466                                 [MLX] = (IB_QP_PKEY_INDEX            |
467                                          IB_QP_QKEY),
468                         }
469                 }
470         },
471         [IB_QPS_SQE]   = {
472                 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
473                 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
474                 [IB_QPS_RTS]   = {
475                         .trans = MTHCA_TRANS_SQERR2RTS,
476                         .opt_param = {
477                                 [UD]  = (IB_QP_CUR_STATE             |
478                                          IB_QP_QKEY),
479                                 [UC]  = IB_QP_CUR_STATE,
480                                 [RC]  = (IB_QP_CUR_STATE             |
481                                          IB_QP_MIN_RNR_TIMER),
482                                 [MLX] = (IB_QP_CUR_STATE             |
483                                          IB_QP_QKEY),
484                         }
485                 }
486         },
487         [IB_QPS_ERR] = {
488                 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
489                 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
490         }
491 };
492
493 static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
494                         int attr_mask)
495 {
496         if (attr_mask & IB_QP_PKEY_INDEX)
497                 sqp->pkey_index = attr->pkey_index;
498         if (attr_mask & IB_QP_QKEY)
499                 sqp->qkey = attr->qkey;
500         if (attr_mask & IB_QP_SQ_PSN)
501                 sqp->send_psn = attr->sq_psn;
502 }
503
504 static void init_port(struct mthca_dev *dev, int port)
505 {
506         int err;
507         u8 status;
508         struct mthca_init_ib_param param;
509
510         memset(&param, 0, sizeof param);
511
512         param.port_width = dev->limits.port_width_cap;
513         param.vl_cap     = dev->limits.vl_cap;
514         param.mtu_cap    = dev->limits.mtu_cap;
515         param.gid_cap    = dev->limits.gid_table_len;
516         param.pkey_cap   = dev->limits.pkey_table_len;
517
518         err = mthca_INIT_IB(dev, &param, port, &status);
519         if (err)
520                 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
521         if (status)
522                 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
523 }
524
525 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
526 {
527         struct mthca_dev *dev = to_mdev(ibqp->device);
528         struct mthca_qp *qp = to_mqp(ibqp);
529         enum ib_qp_state cur_state, new_state;
530         struct mthca_mailbox *mailbox;
531         struct mthca_qp_param *qp_param;
532         struct mthca_qp_context *qp_context;
533         u32 req_param, opt_param;
534         u8 status;
535         int err;
536
537         if (attr_mask & IB_QP_CUR_STATE) {
538                 if (attr->cur_qp_state != IB_QPS_RTR &&
539                     attr->cur_qp_state != IB_QPS_RTS &&
540                     attr->cur_qp_state != IB_QPS_SQD &&
541                     attr->cur_qp_state != IB_QPS_SQE)
542                         return -EINVAL;
543                 else
544                         cur_state = attr->cur_qp_state;
545         } else {
546                 spin_lock_irq(&qp->sq.lock);
547                 spin_lock(&qp->rq.lock);
548                 cur_state = qp->state;
549                 spin_unlock(&qp->rq.lock);
550                 spin_unlock_irq(&qp->sq.lock);
551         }
552
553         if (attr_mask & IB_QP_STATE) {
554                if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
555                         return -EINVAL;
556                 new_state = attr->qp_state;
557         } else
558                 new_state = cur_state;
559
560         if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
561                 mthca_dbg(dev, "Illegal QP transition "
562                           "%d->%d\n", cur_state, new_state);
563                 return -EINVAL;
564         }
565
566         req_param = state_table[cur_state][new_state].req_param[qp->transport];
567         opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
568
569         if ((req_param & attr_mask) != req_param) {
570                 mthca_dbg(dev, "QP transition "
571                           "%d->%d missing req attr 0x%08x\n",
572                           cur_state, new_state,
573                           req_param & ~attr_mask);
574                 return -EINVAL;
575         }
576
577         if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
578                 mthca_dbg(dev, "QP transition (transport %d) "
579                           "%d->%d has extra attr 0x%08x\n",
580                           qp->transport,
581                           cur_state, new_state,
582                           attr_mask & ~(req_param | opt_param |
583                                                  IB_QP_STATE));
584                 return -EINVAL;
585         }
586
587         if ((attr_mask & IB_QP_PKEY_INDEX) && 
588              attr->pkey_index >= dev->limits.pkey_table_len) {
589                 mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
590                           attr->pkey_index,dev->limits.pkey_table_len-1); 
591                 return -EINVAL;
592         }
593
594         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
595         if (IS_ERR(mailbox))
596                 return PTR_ERR(mailbox);
597         qp_param = mailbox->buf;
598         qp_context = &qp_param->context;
599         memset(qp_param, 0, sizeof *qp_param);
600
601         qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |
602                                              (to_mthca_st(qp->transport) << 16));
603         qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);
604         if (!(attr_mask & IB_QP_PATH_MIG_STATE))
605                 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
606         else {
607                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
608                 switch (attr->path_mig_state) {
609                 case IB_MIG_MIGRATED:
610                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
611                         break;
612                 case IB_MIG_REARM:
613                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
614                         break;
615                 case IB_MIG_ARMED:
616                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
617                         break;
618                 }
619         }
620
621         /* leave tavor_sched_queue as 0 */
622
623         if (qp->transport == MLX || qp->transport == UD)
624                 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
625         else if (attr_mask & IB_QP_PATH_MTU)
626                 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
627
628         if (mthca_is_memfree(dev)) {
629                 if (qp->rq.max)
630                         qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
631                 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
632
633                 if (qp->sq.max)
634                         qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
635                 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
636         }
637
638         /* leave arbel_sched_queue as 0 */
639
640         if (qp->ibqp.uobject)
641                 qp_context->usr_page =
642                         cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
643         else
644                 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
645         qp_context->local_qpn  = cpu_to_be32(qp->qpn);
646         if (attr_mask & IB_QP_DEST_QPN) {
647                 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
648         }
649
650         if (qp->transport == MLX)
651                 qp_context->pri_path.port_pkey |=
652                         cpu_to_be32(to_msqp(qp)->port << 24);
653         else {
654                 if (attr_mask & IB_QP_PORT) {
655                         qp_context->pri_path.port_pkey |=
656                                 cpu_to_be32(attr->port_num << 24);
657                         qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
658                 }
659         }
660
661         if (attr_mask & IB_QP_PKEY_INDEX) {
662                 qp_context->pri_path.port_pkey |=
663                         cpu_to_be32(attr->pkey_index);
664                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
665         }
666
667         if (attr_mask & IB_QP_RNR_RETRY) {
668                 qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
669                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
670         }
671
672         if (attr_mask & IB_QP_AV) {
673                 qp_context->pri_path.g_mylmc     = attr->ah_attr.src_path_bits & 0x7f;
674                 qp_context->pri_path.rlid        = cpu_to_be16(attr->ah_attr.dlid);
675                 qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
676                 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
677                         qp_context->pri_path.g_mylmc |= 1 << 7;
678                         qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
679                         qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
680                         qp_context->pri_path.sl_tclass_flowlabel =
681                                 cpu_to_be32((attr->ah_attr.sl << 28)                |
682                                             (attr->ah_attr.grh.traffic_class << 20) |
683                                             (attr->ah_attr.grh.flow_label));
684                         memcpy(qp_context->pri_path.rgid,
685                                attr->ah_attr.grh.dgid.raw, 16);
686                 } else {
687                         qp_context->pri_path.sl_tclass_flowlabel =
688                                 cpu_to_be32(attr->ah_attr.sl << 28);
689                 }
690                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
691         }
692
693         if (attr_mask & IB_QP_TIMEOUT) {
694                 qp_context->pri_path.ackto = attr->timeout << 3;
695                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
696         }
697
698         /* XXX alt_path */
699
700         /* leave rdd as 0 */
701         qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
702         /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
703         qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);
704         qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
705                                              (MTHCA_FLIGHT_LIMIT << 24) |
706                                              MTHCA_QP_BIT_SRE           |
707                                              MTHCA_QP_BIT_SWE           |
708                                              MTHCA_QP_BIT_SAE);
709         if (qp->sq_policy == IB_SIGNAL_ALL_WR)
710                 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
711         if (attr_mask & IB_QP_RETRY_CNT) {
712                 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
713                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
714         }
715
716         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
717                 qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
718                                                        ffs(attr->max_rd_atomic) - 1 : 0,
719                                                        7) << 21);
720                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
721         }
722
723         if (attr_mask & IB_QP_SQ_PSN)
724                 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
725         qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
726
727         if (mthca_is_memfree(dev)) {
728                 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
729                 qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);
730         }
731
732         if (attr_mask & IB_QP_ACCESS_FLAGS) {
733                 qp_context->params2 |=
734                         cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
735                                     MTHCA_QP_BIT_RWE : 0);
736
737                 /*
738                  * Only enable RDMA reads and atomics if we have
739                  * responder resources set to a non-zero value.
740                  */
741                 if (qp->resp_depth) {
742                         qp_context->params2 |=
743                                 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
744                                             MTHCA_QP_BIT_RRE : 0);
745                         qp_context->params2 |=
746                                 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
747                                             MTHCA_QP_BIT_RAE : 0);
748                 }
749
750                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
751                                                         MTHCA_QP_OPTPAR_RRE |
752                                                         MTHCA_QP_OPTPAR_RAE);
753
754                 qp->atomic_rd_en = attr->qp_access_flags;
755         }
756
757         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
758                 u8 rra_max;
759
760                 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
761                         /*
762                          * Lowering our responder resources to zero.
763                          * Turn off reads RDMA and atomics as responder.
764                          * (RRE/RAE in params2 already zero)
765                          */
766                         qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
767                                                                 MTHCA_QP_OPTPAR_RAE);
768                 }
769
770                 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
771                         /*
772                          * Increasing our responder resources from
773                          * zero.  Turn on RDMA reads and atomics as
774                          * appropriate.
775                          */
776                         qp_context->params2 |=
777                                 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
778                                             MTHCA_QP_BIT_RRE : 0);
779                         qp_context->params2 |=
780                                 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
781                                             MTHCA_QP_BIT_RAE : 0);
782
783                         qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
784                                                                 MTHCA_QP_OPTPAR_RAE);
785                 }
786
787                 for (rra_max = 0;
788                      1 << rra_max < attr->max_dest_rd_atomic &&
789                              rra_max < dev->qp_table.rdb_shift;
790                      ++rra_max)
791                         ; /* nothing */
792
793                 qp_context->params2      |= cpu_to_be32(rra_max << 21);
794                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
795
796                 qp->resp_depth = attr->max_dest_rd_atomic;
797         }
798
799         qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
800
801         if (ibqp->srq)
802                 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
803
804         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
805                 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
806                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
807         }
808         if (attr_mask & IB_QP_RQ_PSN)
809                 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
810
811         qp_context->ra_buff_indx =
812                 cpu_to_be32(dev->qp_table.rdb_base +
813                             ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
814                              dev->qp_table.rdb_shift));
815
816         qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
817
818         if (mthca_is_memfree(dev))
819                 qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);
820
821         if (attr_mask & IB_QP_QKEY) {
822                 qp_context->qkey = cpu_to_be32(attr->qkey);
823                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
824         }
825
826         if (ibqp->srq)
827                 qp_context->srqn = cpu_to_be32(1 << 24 |
828                                                to_msrq(ibqp->srq)->srqn);
829
830         err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
831                               qp->qpn, 0, mailbox, 0, &status);
832         if (status) {
833                 mthca_warn(dev, "modify QP %d returned status %02x.\n",
834                            state_table[cur_state][new_state].trans, status);
835                 err = -EINVAL;
836         }
837
838         if (!err)
839                 qp->state = new_state;
840
841         mthca_free_mailbox(dev, mailbox);
842
843         if (is_sqp(dev, qp))
844                 store_attrs(to_msqp(qp), attr, attr_mask);
845
846         /*
847          * If we moved QP0 to RTR, bring the IB link up; if we moved
848          * QP0 to RESET or ERROR, bring the link back down.
849          */
850         if (is_qp0(dev, qp)) {
851                 if (cur_state != IB_QPS_RTR &&
852                     new_state == IB_QPS_RTR)
853                         init_port(dev, to_msqp(qp)->port);
854
855                 if (cur_state != IB_QPS_RESET &&
856                     cur_state != IB_QPS_ERR &&
857                     (new_state == IB_QPS_RESET ||
858                      new_state == IB_QPS_ERR))
859                         mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
860         }
861
862         /*
863          * If we moved a kernel QP to RESET, clean up all old CQ
864          * entries and reinitialize the QP.
865          */
866         if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
867                 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
868                                qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
869                 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
870                         mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
871                                        qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
872
873                 mthca_wq_init(&qp->sq);
874                 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
875
876                 mthca_wq_init(&qp->rq);
877                 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
878
879                 if (mthca_is_memfree(dev)) {
880                         *qp->sq.db = 0;
881                         *qp->rq.db = 0;
882                 }
883         }
884
885         return err;
886 }
887
888 static void mthca_adjust_qp_caps(struct mthca_dev *dev,
889                                  struct mthca_pd *pd,
890                                  struct mthca_qp *qp)
891 {
892         int max_data_size;
893
894         /*
895          * Calculate the maximum size of WQE s/g segments, excluding
896          * the next segment and other non-data segments.
897          */
898         max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) -
899                 sizeof (struct mthca_next_seg);
900
901         switch (qp->transport) {
902         case MLX:
903                 max_data_size -= 2 * sizeof (struct mthca_data_seg);
904                 break;
905
906         case UD:
907                 if (mthca_is_memfree(dev))
908                         max_data_size -= sizeof (struct mthca_arbel_ud_seg);
909                 else
910                         max_data_size -= sizeof (struct mthca_tavor_ud_seg);
911                 break;
912
913         default:
914                 max_data_size -= sizeof (struct mthca_raddr_seg);
915                 break;
916         }
917
918         /* We don't support inline data for kernel QPs (yet). */
919         if (!pd->ibpd.uobject)
920                 qp->max_inline_data = 0;
921         else
922                 qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
923
924         qp->sq.max_gs = min_t(int, dev->limits.max_sg,
925                               max_data_size / sizeof (struct mthca_data_seg));
926         qp->rq.max_gs = min_t(int, dev->limits.max_sg,
927                                (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
928                                 sizeof (struct mthca_next_seg)) /
929                                sizeof (struct mthca_data_seg));
930 }
931
932 /*
933  * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
934  * rq.max_gs and sq.max_gs must all be assigned.
935  * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
936  * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
937  * queue)
938  */
939 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
940                                struct mthca_pd *pd,
941                                struct mthca_qp *qp)
942 {
943         int size;
944         int err = -ENOMEM;
945
946         size = sizeof (struct mthca_next_seg) +
947                 qp->rq.max_gs * sizeof (struct mthca_data_seg);
948
949         if (size > dev->limits.max_desc_sz)
950                 return -EINVAL;
951
952         for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
953              qp->rq.wqe_shift++)
954                 ; /* nothing */
955
956         size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
957         switch (qp->transport) {
958         case MLX:
959                 size += 2 * sizeof (struct mthca_data_seg);
960                 break;
961
962         case UD:
963                 size += mthca_is_memfree(dev) ?
964                         sizeof (struct mthca_arbel_ud_seg) :
965                         sizeof (struct mthca_tavor_ud_seg);
966                 break;
967
968         case UC:
969                 size += sizeof (struct mthca_raddr_seg);
970                 break;
971
972         case RC:
973                 size += sizeof (struct mthca_raddr_seg);
974                 /*
975                  * An atomic op will require an atomic segment, a
976                  * remote address segment and one scatter entry.
977                  */
978                 size = max_t(int, size,
979                              sizeof (struct mthca_atomic_seg) +
980                              sizeof (struct mthca_raddr_seg) +
981                              sizeof (struct mthca_data_seg));
982                 break;
983
984         default:
985                 break;
986         }
987
988         /* Make sure that we have enough space for a bind request */
989         size = max_t(int, size, sizeof (struct mthca_bind_seg));
990
991         size += sizeof (struct mthca_next_seg);
992
993         if (size > dev->limits.max_desc_sz)
994                 return -EINVAL;
995
996         for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
997              qp->sq.wqe_shift++)
998                 ; /* nothing */
999
1000         qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1001                                     1 << qp->sq.wqe_shift);
1002
1003         /*
1004          * If this is a userspace QP, we don't actually have to
1005          * allocate anything.  All we need is to calculate the WQE
1006          * sizes and the send_wqe_offset, so we're done now.
1007          */
1008         if (pd->ibpd.uobject)
1009                 return 0;
1010
1011         size = PAGE_ALIGN(qp->send_wqe_offset +
1012                           (qp->sq.max << qp->sq.wqe_shift));
1013
1014         qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1015                            GFP_KERNEL);
1016         if (!qp->wrid)
1017                 goto err_out;
1018
1019         err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1020                               &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1021         if (err)
1022                 goto err_out;
1023
1024         return 0;
1025
1026 err_out:
1027         kfree(qp->wrid);
1028         return err;
1029 }
1030
1031 static void mthca_free_wqe_buf(struct mthca_dev *dev,
1032                                struct mthca_qp *qp)
1033 {
1034         mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1035                                        (qp->sq.max << qp->sq.wqe_shift)),
1036                        &qp->queue, qp->is_direct, &qp->mr);
1037         kfree(qp->wrid);
1038 }
1039
1040 static int mthca_map_memfree(struct mthca_dev *dev,
1041                              struct mthca_qp *qp)
1042 {
1043         int ret;
1044
1045         if (mthca_is_memfree(dev)) {
1046                 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1047                 if (ret)
1048                         return ret;
1049
1050                 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1051                 if (ret)
1052                         goto err_qpc;
1053
1054                 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1055                                       qp->qpn << dev->qp_table.rdb_shift);
1056                 if (ret)
1057                         goto err_eqpc;
1058
1059         }
1060
1061         return 0;
1062
1063 err_eqpc:
1064         mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1065
1066 err_qpc:
1067         mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1068
1069         return ret;
1070 }
1071
1072 static void mthca_unmap_memfree(struct mthca_dev *dev,
1073                                 struct mthca_qp *qp)
1074 {
1075         mthca_table_put(dev, dev->qp_table.rdb_table,
1076                         qp->qpn << dev->qp_table.rdb_shift);
1077         mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1078         mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1079 }
1080
1081 static int mthca_alloc_memfree(struct mthca_dev *dev,
1082                                struct mthca_qp *qp)
1083 {
1084         int ret = 0;
1085
1086         if (mthca_is_memfree(dev)) {
1087                 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1088                                                  qp->qpn, &qp->rq.db);
1089                 if (qp->rq.db_index < 0)
1090                         return ret;
1091
1092                 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1093                                                  qp->qpn, &qp->sq.db);
1094                 if (qp->sq.db_index < 0)
1095                         mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1096         }
1097
1098         return ret;
1099 }
1100
1101 static void mthca_free_memfree(struct mthca_dev *dev,
1102                                struct mthca_qp *qp)
1103 {
1104         if (mthca_is_memfree(dev)) {
1105                 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1106                 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1107         }
1108 }
1109
1110 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1111                                  struct mthca_pd *pd,
1112                                  struct mthca_cq *send_cq,
1113                                  struct mthca_cq *recv_cq,
1114                                  enum ib_sig_type send_policy,
1115                                  struct mthca_qp *qp)
1116 {
1117         int ret;
1118         int i;
1119
1120         atomic_set(&qp->refcount, 1);
1121         init_waitqueue_head(&qp->wait);
1122         qp->state        = IB_QPS_RESET;
1123         qp->atomic_rd_en = 0;
1124         qp->resp_depth   = 0;
1125         qp->sq_policy    = send_policy;
1126         mthca_wq_init(&qp->sq);
1127         mthca_wq_init(&qp->rq);
1128
1129         ret = mthca_map_memfree(dev, qp);
1130         if (ret)
1131                 return ret;
1132
1133         ret = mthca_alloc_wqe_buf(dev, pd, qp);
1134         if (ret) {
1135                 mthca_unmap_memfree(dev, qp);
1136                 return ret;
1137         }
1138
1139         mthca_adjust_qp_caps(dev, pd, qp);
1140
1141         /*
1142          * If this is a userspace QP, we're done now.  The doorbells
1143          * will be allocated and buffers will be initialized in
1144          * userspace.
1145          */
1146         if (pd->ibpd.uobject)
1147                 return 0;
1148
1149         ret = mthca_alloc_memfree(dev, qp);
1150         if (ret) {
1151                 mthca_free_wqe_buf(dev, qp);
1152                 mthca_unmap_memfree(dev, qp);
1153                 return ret;
1154         }
1155
1156         if (mthca_is_memfree(dev)) {
1157                 struct mthca_next_seg *next;
1158                 struct mthca_data_seg *scatter;
1159                 int size = (sizeof (struct mthca_next_seg) +
1160                             qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1161
1162                 for (i = 0; i < qp->rq.max; ++i) {
1163                         next = get_recv_wqe(qp, i);
1164                         next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1165                                                    qp->rq.wqe_shift);
1166                         next->ee_nds = cpu_to_be32(size);
1167
1168                         for (scatter = (void *) (next + 1);
1169                              (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1170                              ++scatter)
1171                                 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1172                 }
1173
1174                 for (i = 0; i < qp->sq.max; ++i) {
1175                         next = get_send_wqe(qp, i);
1176                         next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1177                                                     qp->sq.wqe_shift) +
1178                                                    qp->send_wqe_offset);
1179                 }
1180         }
1181
1182         qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1183         qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1184
1185         return 0;
1186 }
1187
1188 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1189                              struct mthca_qp *qp)
1190 {
1191         /* Sanity check QP size before proceeding */
1192         if (cap->max_send_wr  > dev->limits.max_wqes ||
1193             cap->max_recv_wr  > dev->limits.max_wqes ||
1194             cap->max_send_sge > dev->limits.max_sg   ||
1195             cap->max_recv_sge > dev->limits.max_sg)
1196                 return -EINVAL;
1197
1198         if (mthca_is_memfree(dev)) {
1199                 qp->rq.max = cap->max_recv_wr ?
1200                         roundup_pow_of_two(cap->max_recv_wr) : 0;
1201                 qp->sq.max = cap->max_send_wr ?
1202                         roundup_pow_of_two(cap->max_send_wr) : 0;
1203         } else {
1204                 qp->rq.max = cap->max_recv_wr;
1205                 qp->sq.max = cap->max_send_wr;
1206         }
1207
1208         qp->rq.max_gs = cap->max_recv_sge;
1209         qp->sq.max_gs = max_t(int, cap->max_send_sge,
1210                               ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1211                                     MTHCA_INLINE_CHUNK_SIZE) /
1212                               sizeof (struct mthca_data_seg));
1213
1214         /*
1215          * For MLX transport we need 2 extra S/G entries:
1216          * one for the header and one for the checksum at the end
1217          */
1218         if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
1219             qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
1220                 return -EINVAL;
1221
1222         return 0;
1223 }
1224
1225 int mthca_alloc_qp(struct mthca_dev *dev,
1226                    struct mthca_pd *pd,
1227                    struct mthca_cq *send_cq,
1228                    struct mthca_cq *recv_cq,
1229                    enum ib_qp_type type,
1230                    enum ib_sig_type send_policy,
1231                    struct ib_qp_cap *cap,
1232                    struct mthca_qp *qp)
1233 {
1234         int err;
1235
1236         err = mthca_set_qp_size(dev, cap, qp);
1237         if (err)
1238                 return err;
1239
1240         switch (type) {
1241         case IB_QPT_RC: qp->transport = RC; break;
1242         case IB_QPT_UC: qp->transport = UC; break;
1243         case IB_QPT_UD: qp->transport = UD; break;
1244         default: return -EINVAL;
1245         }
1246
1247         qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1248         if (qp->qpn == -1)
1249                 return -ENOMEM;
1250
1251         err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1252                                     send_policy, qp);
1253         if (err) {
1254                 mthca_free(&dev->qp_table.alloc, qp->qpn);
1255                 return err;
1256         }
1257
1258         spin_lock_irq(&dev->qp_table.lock);
1259         mthca_array_set(&dev->qp_table.qp,
1260                         qp->qpn & (dev->limits.num_qps - 1), qp);
1261         spin_unlock_irq(&dev->qp_table.lock);
1262
1263         return 0;
1264 }
1265
1266 int mthca_alloc_sqp(struct mthca_dev *dev,
1267                     struct mthca_pd *pd,
1268                     struct mthca_cq *send_cq,
1269                     struct mthca_cq *recv_cq,
1270                     enum ib_sig_type send_policy,
1271                     struct ib_qp_cap *cap,
1272                     int qpn,
1273                     int port,
1274                     struct mthca_sqp *sqp)
1275 {
1276         u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1277         int err;
1278
1279         err = mthca_set_qp_size(dev, cap, &sqp->qp);
1280         if (err)
1281                 return err;
1282
1283         sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1284         sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1285                                              &sqp->header_dma, GFP_KERNEL);
1286         if (!sqp->header_buf)
1287                 return -ENOMEM;
1288
1289         spin_lock_irq(&dev->qp_table.lock);
1290         if (mthca_array_get(&dev->qp_table.qp, mqpn))
1291                 err = -EBUSY;
1292         else
1293                 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1294         spin_unlock_irq(&dev->qp_table.lock);
1295
1296         if (err)
1297                 goto err_out;
1298
1299         sqp->port = port;
1300         sqp->qp.qpn       = mqpn;
1301         sqp->qp.transport = MLX;
1302
1303         err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1304                                     send_policy, &sqp->qp);
1305         if (err)
1306                 goto err_out_free;
1307
1308         atomic_inc(&pd->sqp_count);
1309
1310         return 0;
1311
1312  err_out_free:
1313         /*
1314          * Lock CQs here, so that CQ polling code can do QP lookup
1315          * without taking a lock.
1316          */
1317         spin_lock_irq(&send_cq->lock);
1318         if (send_cq != recv_cq)
1319                 spin_lock(&recv_cq->lock);
1320
1321         spin_lock(&dev->qp_table.lock);
1322         mthca_array_clear(&dev->qp_table.qp, mqpn);
1323         spin_unlock(&dev->qp_table.lock);
1324
1325         if (send_cq != recv_cq)
1326                 spin_unlock(&recv_cq->lock);
1327         spin_unlock_irq(&send_cq->lock);
1328
1329  err_out:
1330         dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1331                           sqp->header_buf, sqp->header_dma);
1332
1333         return err;
1334 }
1335
1336 void mthca_free_qp(struct mthca_dev *dev,
1337                    struct mthca_qp *qp)
1338 {
1339         u8 status;
1340         struct mthca_cq *send_cq;
1341         struct mthca_cq *recv_cq;
1342
1343         send_cq = to_mcq(qp->ibqp.send_cq);
1344         recv_cq = to_mcq(qp->ibqp.recv_cq);
1345
1346         /*
1347          * Lock CQs here, so that CQ polling code can do QP lookup
1348          * without taking a lock.
1349          */
1350         spin_lock_irq(&send_cq->lock);
1351         if (send_cq != recv_cq)
1352                 spin_lock(&recv_cq->lock);
1353
1354         spin_lock(&dev->qp_table.lock);
1355         mthca_array_clear(&dev->qp_table.qp,
1356                           qp->qpn & (dev->limits.num_qps - 1));
1357         spin_unlock(&dev->qp_table.lock);
1358
1359         if (send_cq != recv_cq)
1360                 spin_unlock(&recv_cq->lock);
1361         spin_unlock_irq(&send_cq->lock);
1362
1363         atomic_dec(&qp->refcount);
1364         wait_event(qp->wait, !atomic_read(&qp->refcount));
1365
1366         if (qp->state != IB_QPS_RESET)
1367                 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
1368
1369         /*
1370          * If this is a userspace QP, the buffers, MR, CQs and so on
1371          * will be cleaned up in userspace, so all we have to do is
1372          * unref the mem-free tables and free the QPN in our table.
1373          */
1374         if (!qp->ibqp.uobject) {
1375                 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
1376                                qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1377                 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1378                         mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
1379                                        qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1380
1381                 mthca_free_memfree(dev, qp);
1382                 mthca_free_wqe_buf(dev, qp);
1383         }
1384
1385         mthca_unmap_memfree(dev, qp);
1386
1387         if (is_sqp(dev, qp)) {
1388                 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1389                 dma_free_coherent(&dev->pdev->dev,
1390                                   to_msqp(qp)->header_buf_size,
1391                                   to_msqp(qp)->header_buf,
1392                                   to_msqp(qp)->header_dma);
1393         } else
1394                 mthca_free(&dev->qp_table.alloc, qp->qpn);
1395 }
1396
1397 /* Create UD header for an MLX send and build a data segment for it */
1398 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1399                             int ind, struct ib_send_wr *wr,
1400                             struct mthca_mlx_seg *mlx,
1401                             struct mthca_data_seg *data)
1402 {
1403         int header_size;
1404         int err;
1405         u16 pkey;
1406
1407         ib_ud_header_init(256, /* assume a MAD */
1408                           sqp->ud_header.grh_present,
1409                           &sqp->ud_header);
1410
1411         err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1412         if (err)
1413                 return err;
1414         mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1415         mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1416                                   (sqp->ud_header.lrh.destination_lid ==
1417                                    IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1418                                   (sqp->ud_header.lrh.service_level << 8));
1419         mlx->rlid = sqp->ud_header.lrh.destination_lid;
1420         mlx->vcrc = 0;
1421
1422         switch (wr->opcode) {
1423         case IB_WR_SEND:
1424                 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1425                 sqp->ud_header.immediate_present = 0;
1426                 break;
1427         case IB_WR_SEND_WITH_IMM:
1428                 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1429                 sqp->ud_header.immediate_present = 1;
1430                 sqp->ud_header.immediate_data = wr->imm_data;
1431                 break;
1432         default:
1433                 return -EINVAL;
1434         }
1435
1436         sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
1437         if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1438                 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1439         sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1440         if (!sqp->qp.ibqp.qp_num)
1441                 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1442                                    sqp->pkey_index, &pkey);
1443         else
1444                 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1445                                    wr->wr.ud.pkey_index, &pkey);
1446         sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1447         sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1448         sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1449         sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1450                                                sqp->qkey : wr->wr.ud.remote_qkey);
1451         sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1452
1453         header_size = ib_ud_header_pack(&sqp->ud_header,
1454                                         sqp->header_buf +
1455                                         ind * MTHCA_UD_HEADER_SIZE);
1456
1457         data->byte_count = cpu_to_be32(header_size);
1458         data->lkey       = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1459         data->addr       = cpu_to_be64(sqp->header_dma +
1460                                        ind * MTHCA_UD_HEADER_SIZE);
1461
1462         return 0;
1463 }
1464
1465 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1466                                     struct ib_cq *ib_cq)
1467 {
1468         unsigned cur;
1469         struct mthca_cq *cq;
1470
1471         cur = wq->head - wq->tail;
1472         if (likely(cur + nreq < wq->max))
1473                 return 0;
1474
1475         cq = to_mcq(ib_cq);
1476         spin_lock(&cq->lock);
1477         cur = wq->head - wq->tail;
1478         spin_unlock(&cq->lock);
1479
1480         return cur + nreq >= wq->max;
1481 }
1482
1483 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1484                           struct ib_send_wr **bad_wr)
1485 {
1486         struct mthca_dev *dev = to_mdev(ibqp->device);
1487         struct mthca_qp *qp = to_mqp(ibqp);
1488         void *wqe;
1489         void *prev_wqe;
1490         unsigned long flags;
1491         int err = 0;
1492         int nreq;
1493         int i;
1494         int size;
1495         int size0 = 0;
1496         u32 f0 = 0;
1497         int ind;
1498         u8 op0 = 0;
1499
1500         spin_lock_irqsave(&qp->sq.lock, flags);
1501
1502         /* XXX check that state is OK to post send */
1503
1504         ind = qp->sq.next_ind;
1505
1506         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1507                 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1508                         mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1509                                         " %d max, %d nreq)\n", qp->qpn,
1510                                         qp->sq.head, qp->sq.tail,
1511                                         qp->sq.max, nreq);
1512                         err = -ENOMEM;
1513                         *bad_wr = wr;
1514                         goto out;
1515                 }
1516
1517                 wqe = get_send_wqe(qp, ind);
1518                 prev_wqe = qp->sq.last;
1519                 qp->sq.last = wqe;
1520
1521                 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1522                 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1523                 ((struct mthca_next_seg *) wqe)->flags =
1524                         ((wr->send_flags & IB_SEND_SIGNALED) ?
1525                          cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1526                         ((wr->send_flags & IB_SEND_SOLICITED) ?
1527                          cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1528                         cpu_to_be32(1);
1529                 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1530                     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1531                         ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1532
1533                 wqe += sizeof (struct mthca_next_seg);
1534                 size = sizeof (struct mthca_next_seg) / 16;
1535
1536                 switch (qp->transport) {
1537                 case RC:
1538                         switch (wr->opcode) {
1539                         case IB_WR_ATOMIC_CMP_AND_SWP:
1540                         case IB_WR_ATOMIC_FETCH_AND_ADD:
1541                                 ((struct mthca_raddr_seg *) wqe)->raddr =
1542                                         cpu_to_be64(wr->wr.atomic.remote_addr);
1543                                 ((struct mthca_raddr_seg *) wqe)->rkey =
1544                                         cpu_to_be32(wr->wr.atomic.rkey);
1545                                 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1546
1547                                 wqe += sizeof (struct mthca_raddr_seg);
1548
1549                                 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1550                                         ((struct mthca_atomic_seg *) wqe)->swap_add =
1551                                                 cpu_to_be64(wr->wr.atomic.swap);
1552                                         ((struct mthca_atomic_seg *) wqe)->compare =
1553                                                 cpu_to_be64(wr->wr.atomic.compare_add);
1554                                 } else {
1555                                         ((struct mthca_atomic_seg *) wqe)->swap_add =
1556                                                 cpu_to_be64(wr->wr.atomic.compare_add);
1557                                         ((struct mthca_atomic_seg *) wqe)->compare = 0;
1558                                 }
1559
1560                                 wqe += sizeof (struct mthca_atomic_seg);
1561                                 size += (sizeof (struct mthca_raddr_seg) +
1562                                          sizeof (struct mthca_atomic_seg)) / 16;
1563                                 break;
1564
1565                         case IB_WR_RDMA_WRITE:
1566                         case IB_WR_RDMA_WRITE_WITH_IMM:
1567                         case IB_WR_RDMA_READ:
1568                                 ((struct mthca_raddr_seg *) wqe)->raddr =
1569                                         cpu_to_be64(wr->wr.rdma.remote_addr);
1570                                 ((struct mthca_raddr_seg *) wqe)->rkey =
1571                                         cpu_to_be32(wr->wr.rdma.rkey);
1572                                 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1573                                 wqe += sizeof (struct mthca_raddr_seg);
1574                                 size += sizeof (struct mthca_raddr_seg) / 16;
1575                                 break;
1576
1577                         default:
1578                                 /* No extra segments required for sends */
1579                                 break;
1580                         }
1581
1582                         break;
1583
1584                 case UC:
1585                         switch (wr->opcode) {
1586                         case IB_WR_RDMA_WRITE:
1587                         case IB_WR_RDMA_WRITE_WITH_IMM:
1588                                 ((struct mthca_raddr_seg *) wqe)->raddr =
1589                                         cpu_to_be64(wr->wr.rdma.remote_addr);
1590                                 ((struct mthca_raddr_seg *) wqe)->rkey =
1591                                         cpu_to_be32(wr->wr.rdma.rkey);
1592                                 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1593                                 wqe += sizeof (struct mthca_raddr_seg);
1594                                 size += sizeof (struct mthca_raddr_seg) / 16;
1595                                 break;
1596
1597                         default:
1598                                 /* No extra segments required for sends */
1599                                 break;
1600                         }
1601
1602                         break;
1603
1604                 case UD:
1605                         ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1606                                 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1607                         ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1608                                 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1609                         ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1610                                 cpu_to_be32(wr->wr.ud.remote_qpn);
1611                         ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1612                                 cpu_to_be32(wr->wr.ud.remote_qkey);
1613
1614                         wqe += sizeof (struct mthca_tavor_ud_seg);
1615                         size += sizeof (struct mthca_tavor_ud_seg) / 16;
1616                         break;
1617
1618                 case MLX:
1619                         err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1620                                                wqe - sizeof (struct mthca_next_seg),
1621                                                wqe);
1622                         if (err) {
1623                                 *bad_wr = wr;
1624                                 goto out;
1625                         }
1626                         wqe += sizeof (struct mthca_data_seg);
1627                         size += sizeof (struct mthca_data_seg) / 16;
1628                         break;
1629                 }
1630
1631                 if (wr->num_sge > qp->sq.max_gs) {
1632                         mthca_err(dev, "too many gathers\n");
1633                         err = -EINVAL;
1634                         *bad_wr = wr;
1635                         goto out;
1636                 }
1637
1638                 for (i = 0; i < wr->num_sge; ++i) {
1639                         ((struct mthca_data_seg *) wqe)->byte_count =
1640                                 cpu_to_be32(wr->sg_list[i].length);
1641                         ((struct mthca_data_seg *) wqe)->lkey =
1642                                 cpu_to_be32(wr->sg_list[i].lkey);
1643                         ((struct mthca_data_seg *) wqe)->addr =
1644                                 cpu_to_be64(wr->sg_list[i].addr);
1645                         wqe += sizeof (struct mthca_data_seg);
1646                         size += sizeof (struct mthca_data_seg) / 16;
1647                 }
1648
1649                 /* Add one more inline data segment for ICRC */
1650                 if (qp->transport == MLX) {
1651                         ((struct mthca_data_seg *) wqe)->byte_count =
1652                                 cpu_to_be32((1 << 31) | 4);
1653                         ((u32 *) wqe)[1] = 0;
1654                         wqe += sizeof (struct mthca_data_seg);
1655                         size += sizeof (struct mthca_data_seg) / 16;
1656                 }
1657
1658                 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1659
1660                 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1661                         mthca_err(dev, "opcode invalid\n");
1662                         err = -EINVAL;
1663                         *bad_wr = wr;
1664                         goto out;
1665                 }
1666
1667                 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1668                         cpu_to_be32(((ind << qp->sq.wqe_shift) +
1669                                      qp->send_wqe_offset) |
1670                                     mthca_opcode[wr->opcode]);
1671                 wmb();
1672                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1673                         cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1674
1675                 if (!size0) {
1676                         size0 = size;
1677                         op0   = mthca_opcode[wr->opcode];
1678                 }
1679
1680                 ++ind;
1681                 if (unlikely(ind >= qp->sq.max))
1682                         ind -= qp->sq.max;
1683         }
1684
1685 out:
1686         if (likely(nreq)) {
1687                 __be32 doorbell[2];
1688
1689                 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1690                                            qp->send_wqe_offset) | f0 | op0);
1691                 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1692
1693                 wmb();
1694
1695                 mthca_write64(doorbell,
1696                               dev->kar + MTHCA_SEND_DOORBELL,
1697                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1698         }
1699
1700         qp->sq.next_ind = ind;
1701         qp->sq.head    += nreq;
1702
1703         spin_unlock_irqrestore(&qp->sq.lock, flags);
1704         return err;
1705 }
1706
1707 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1708                              struct ib_recv_wr **bad_wr)
1709 {
1710         struct mthca_dev *dev = to_mdev(ibqp->device);
1711         struct mthca_qp *qp = to_mqp(ibqp);
1712         __be32 doorbell[2];
1713         unsigned long flags;
1714         int err = 0;
1715         int nreq;
1716         int i;
1717         int size;
1718         int size0 = 0;
1719         int ind;
1720         void *wqe;
1721         void *prev_wqe;
1722
1723         spin_lock_irqsave(&qp->rq.lock, flags);
1724
1725         /* XXX check that state is OK to post receive */
1726
1727         ind = qp->rq.next_ind;
1728
1729         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1730                 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1731                         nreq = 0;
1732
1733                         doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1734                         doorbell[1] = cpu_to_be32(qp->qpn << 8);
1735
1736                         wmb();
1737
1738                         mthca_write64(doorbell,
1739                                       dev->kar + MTHCA_RECEIVE_DOORBELL,
1740                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1741
1742                         qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1743                         size0 = 0;
1744                 }
1745
1746                 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1747                         mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1748                                         " %d max, %d nreq)\n", qp->qpn,
1749                                         qp->rq.head, qp->rq.tail,
1750                                         qp->rq.max, nreq);
1751                         err = -ENOMEM;
1752                         *bad_wr = wr;
1753                         goto out;
1754                 }
1755
1756                 wqe = get_recv_wqe(qp, ind);
1757                 prev_wqe = qp->rq.last;
1758                 qp->rq.last = wqe;
1759
1760                 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1761                 ((struct mthca_next_seg *) wqe)->ee_nds =
1762                         cpu_to_be32(MTHCA_NEXT_DBD);
1763                 ((struct mthca_next_seg *) wqe)->flags = 0;
1764
1765                 wqe += sizeof (struct mthca_next_seg);
1766                 size = sizeof (struct mthca_next_seg) / 16;
1767
1768                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1769                         err = -EINVAL;
1770                         *bad_wr = wr;
1771                         goto out;
1772                 }
1773
1774                 for (i = 0; i < wr->num_sge; ++i) {
1775                         ((struct mthca_data_seg *) wqe)->byte_count =
1776                                 cpu_to_be32(wr->sg_list[i].length);
1777                         ((struct mthca_data_seg *) wqe)->lkey =
1778                                 cpu_to_be32(wr->sg_list[i].lkey);
1779                         ((struct mthca_data_seg *) wqe)->addr =
1780                                 cpu_to_be64(wr->sg_list[i].addr);
1781                         wqe += sizeof (struct mthca_data_seg);
1782                         size += sizeof (struct mthca_data_seg) / 16;
1783                 }
1784
1785                 qp->wrid[ind] = wr->wr_id;
1786
1787                 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1788                         cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1789                 wmb();
1790                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1791                         cpu_to_be32(MTHCA_NEXT_DBD | size);
1792
1793                 if (!size0)
1794                         size0 = size;
1795
1796                 ++ind;
1797                 if (unlikely(ind >= qp->rq.max))
1798                         ind -= qp->rq.max;
1799         }
1800
1801 out:
1802         if (likely(nreq)) {
1803                 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1804                 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1805
1806                 wmb();
1807
1808                 mthca_write64(doorbell,
1809                               dev->kar + MTHCA_RECEIVE_DOORBELL,
1810                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1811         }
1812
1813         qp->rq.next_ind = ind;
1814         qp->rq.head    += nreq;
1815
1816         spin_unlock_irqrestore(&qp->rq.lock, flags);
1817         return err;
1818 }
1819
1820 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1821                           struct ib_send_wr **bad_wr)
1822 {
1823         struct mthca_dev *dev = to_mdev(ibqp->device);
1824         struct mthca_qp *qp = to_mqp(ibqp);
1825         __be32 doorbell[2];
1826         void *wqe;
1827         void *prev_wqe;
1828         unsigned long flags;
1829         int err = 0;
1830         int nreq;
1831         int i;
1832         int size;
1833         int size0 = 0;
1834         u32 f0 = 0;
1835         int ind;
1836         u8 op0 = 0;
1837
1838         spin_lock_irqsave(&qp->sq.lock, flags);
1839
1840         /* XXX check that state is OK to post send */
1841
1842         ind = qp->sq.head & (qp->sq.max - 1);
1843
1844         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1845                 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1846                         nreq = 0;
1847
1848                         doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1849                                                   ((qp->sq.head & 0xffff) << 8) |
1850                                                   f0 | op0);
1851                         doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1852
1853                         qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1854                         size0 = 0;
1855
1856                         /*
1857                          * Make sure that descriptors are written before
1858                          * doorbell record.
1859                          */
1860                         wmb();
1861                         *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1862
1863                         /*
1864                          * Make sure doorbell record is written before we
1865                          * write MMIO send doorbell.
1866                          */
1867                         wmb();
1868                         mthca_write64(doorbell,
1869                                       dev->kar + MTHCA_SEND_DOORBELL,
1870                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1871                 }
1872
1873                 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1874                         mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1875                                         " %d max, %d nreq)\n", qp->qpn,
1876                                         qp->sq.head, qp->sq.tail,
1877                                         qp->sq.max, nreq);
1878                         err = -ENOMEM;
1879                         *bad_wr = wr;
1880                         goto out;
1881                 }
1882
1883                 wqe = get_send_wqe(qp, ind);
1884                 prev_wqe = qp->sq.last;
1885                 qp->sq.last = wqe;
1886
1887                 ((struct mthca_next_seg *) wqe)->flags =
1888                         ((wr->send_flags & IB_SEND_SIGNALED) ?
1889                          cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1890                         ((wr->send_flags & IB_SEND_SOLICITED) ?
1891                          cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1892                         cpu_to_be32(1);
1893                 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1894                     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1895                         ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1896
1897                 wqe += sizeof (struct mthca_next_seg);
1898                 size = sizeof (struct mthca_next_seg) / 16;
1899
1900                 switch (qp->transport) {
1901                 case RC:
1902                         switch (wr->opcode) {
1903                         case IB_WR_ATOMIC_CMP_AND_SWP:
1904                         case IB_WR_ATOMIC_FETCH_AND_ADD:
1905                                 ((struct mthca_raddr_seg *) wqe)->raddr =
1906                                         cpu_to_be64(wr->wr.atomic.remote_addr);
1907                                 ((struct mthca_raddr_seg *) wqe)->rkey =
1908                                         cpu_to_be32(wr->wr.atomic.rkey);
1909                                 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1910
1911                                 wqe += sizeof (struct mthca_raddr_seg);
1912
1913                                 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1914                                         ((struct mthca_atomic_seg *) wqe)->swap_add =
1915                                                 cpu_to_be64(wr->wr.atomic.swap);
1916                                         ((struct mthca_atomic_seg *) wqe)->compare =
1917                                                 cpu_to_be64(wr->wr.atomic.compare_add);
1918                                 } else {
1919                                         ((struct mthca_atomic_seg *) wqe)->swap_add =
1920                                                 cpu_to_be64(wr->wr.atomic.compare_add);
1921                                         ((struct mthca_atomic_seg *) wqe)->compare = 0;
1922                                 }
1923
1924                                 wqe += sizeof (struct mthca_atomic_seg);
1925                                 size += (sizeof (struct mthca_raddr_seg) +
1926                                          sizeof (struct mthca_atomic_seg)) / 16;
1927                                 break;
1928
1929                         case IB_WR_RDMA_READ:
1930                         case IB_WR_RDMA_WRITE:
1931                         case IB_WR_RDMA_WRITE_WITH_IMM:
1932                                 ((struct mthca_raddr_seg *) wqe)->raddr =
1933                                         cpu_to_be64(wr->wr.rdma.remote_addr);
1934                                 ((struct mthca_raddr_seg *) wqe)->rkey =
1935                                         cpu_to_be32(wr->wr.rdma.rkey);
1936                                 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1937                                 wqe += sizeof (struct mthca_raddr_seg);
1938                                 size += sizeof (struct mthca_raddr_seg) / 16;
1939                                 break;
1940
1941                         default:
1942                                 /* No extra segments required for sends */
1943                                 break;
1944                         }
1945
1946                         break;
1947
1948                 case UC:
1949                         switch (wr->opcode) {
1950                         case IB_WR_RDMA_WRITE:
1951                         case IB_WR_RDMA_WRITE_WITH_IMM:
1952                                 ((struct mthca_raddr_seg *) wqe)->raddr =
1953                                         cpu_to_be64(wr->wr.rdma.remote_addr);
1954                                 ((struct mthca_raddr_seg *) wqe)->rkey =
1955                                         cpu_to_be32(wr->wr.rdma.rkey);
1956                                 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1957                                 wqe += sizeof (struct mthca_raddr_seg);
1958                                 size += sizeof (struct mthca_raddr_seg) / 16;
1959                                 break;
1960
1961                         default:
1962                                 /* No extra segments required for sends */
1963                                 break;
1964                         }
1965
1966                         break;
1967
1968                 case UD:
1969                         memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
1970                                to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1971                         ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
1972                                 cpu_to_be32(wr->wr.ud.remote_qpn);
1973                         ((struct mthca_arbel_ud_seg *) wqe)->qkey =
1974                                 cpu_to_be32(wr->wr.ud.remote_qkey);
1975
1976                         wqe += sizeof (struct mthca_arbel_ud_seg);
1977                         size += sizeof (struct mthca_arbel_ud_seg) / 16;
1978                         break;
1979
1980                 case MLX:
1981                         err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1982                                                wqe - sizeof (struct mthca_next_seg),
1983                                                wqe);
1984                         if (err) {
1985                                 *bad_wr = wr;
1986                                 goto out;
1987                         }
1988                         wqe += sizeof (struct mthca_data_seg);
1989                         size += sizeof (struct mthca_data_seg) / 16;
1990                         break;
1991                 }
1992
1993                 if (wr->num_sge > qp->sq.max_gs) {
1994                         mthca_err(dev, "too many gathers\n");
1995                         err = -EINVAL;
1996                         *bad_wr = wr;
1997                         goto out;
1998                 }
1999
2000                 for (i = 0; i < wr->num_sge; ++i) {
2001                         ((struct mthca_data_seg *) wqe)->byte_count =
2002                                 cpu_to_be32(wr->sg_list[i].length);
2003                         ((struct mthca_data_seg *) wqe)->lkey =
2004                                 cpu_to_be32(wr->sg_list[i].lkey);
2005                         ((struct mthca_data_seg *) wqe)->addr =
2006                                 cpu_to_be64(wr->sg_list[i].addr);
2007                         wqe += sizeof (struct mthca_data_seg);
2008                         size += sizeof (struct mthca_data_seg) / 16;
2009                 }
2010
2011                 /* Add one more inline data segment for ICRC */
2012                 if (qp->transport == MLX) {
2013                         ((struct mthca_data_seg *) wqe)->byte_count =
2014                                 cpu_to_be32((1 << 31) | 4);
2015                         ((u32 *) wqe)[1] = 0;
2016                         wqe += sizeof (struct mthca_data_seg);
2017                         size += sizeof (struct mthca_data_seg) / 16;
2018                 }
2019
2020                 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2021
2022                 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2023                         mthca_err(dev, "opcode invalid\n");
2024                         err = -EINVAL;
2025                         *bad_wr = wr;
2026                         goto out;
2027                 }
2028
2029                 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2030                         cpu_to_be32(((ind << qp->sq.wqe_shift) +
2031                                      qp->send_wqe_offset) |
2032                                     mthca_opcode[wr->opcode]);
2033                 wmb();
2034                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2035                         cpu_to_be32(MTHCA_NEXT_DBD | size);
2036
2037                 if (!size0) {
2038                         size0 = size;
2039                         op0   = mthca_opcode[wr->opcode];
2040                 }
2041
2042                 ++ind;
2043                 if (unlikely(ind >= qp->sq.max))
2044                         ind -= qp->sq.max;
2045         }
2046
2047 out:
2048         if (likely(nreq)) {
2049                 doorbell[0] = cpu_to_be32((nreq << 24)                  |
2050                                           ((qp->sq.head & 0xffff) << 8) |
2051                                           f0 | op0);
2052                 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
2053
2054                 qp->sq.head += nreq;
2055
2056                 /*
2057                  * Make sure that descriptors are written before
2058                  * doorbell record.
2059                  */
2060                 wmb();
2061                 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2062
2063                 /*
2064                  * Make sure doorbell record is written before we
2065                  * write MMIO send doorbell.
2066                  */
2067                 wmb();
2068                 mthca_write64(doorbell,
2069                               dev->kar + MTHCA_SEND_DOORBELL,
2070                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2071         }
2072
2073         spin_unlock_irqrestore(&qp->sq.lock, flags);
2074         return err;
2075 }
2076
2077 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2078                              struct ib_recv_wr **bad_wr)
2079 {
2080         struct mthca_dev *dev = to_mdev(ibqp->device);
2081         struct mthca_qp *qp = to_mqp(ibqp);
2082         unsigned long flags;
2083         int err = 0;
2084         int nreq;
2085         int ind;
2086         int i;
2087         void *wqe;
2088
2089         spin_lock_irqsave(&qp->rq.lock, flags);
2090
2091         /* XXX check that state is OK to post receive */
2092
2093         ind = qp->rq.head & (qp->rq.max - 1);
2094
2095         for (nreq = 0; wr; ++nreq, wr = wr->next) {
2096                 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2097                         mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2098                                         " %d max, %d nreq)\n", qp->qpn,
2099                                         qp->rq.head, qp->rq.tail,
2100                                         qp->rq.max, nreq);
2101                         err = -ENOMEM;
2102                         *bad_wr = wr;
2103                         goto out;
2104                 }
2105
2106                 wqe = get_recv_wqe(qp, ind);
2107
2108                 ((struct mthca_next_seg *) wqe)->flags = 0;
2109
2110                 wqe += sizeof (struct mthca_next_seg);
2111
2112                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2113                         err = -EINVAL;
2114                         *bad_wr = wr;
2115                         goto out;
2116                 }
2117
2118                 for (i = 0; i < wr->num_sge; ++i) {
2119                         ((struct mthca_data_seg *) wqe)->byte_count =
2120                                 cpu_to_be32(wr->sg_list[i].length);
2121                         ((struct mthca_data_seg *) wqe)->lkey =
2122                                 cpu_to_be32(wr->sg_list[i].lkey);
2123                         ((struct mthca_data_seg *) wqe)->addr =
2124                                 cpu_to_be64(wr->sg_list[i].addr);
2125                         wqe += sizeof (struct mthca_data_seg);
2126                 }
2127
2128                 if (i < qp->rq.max_gs) {
2129                         ((struct mthca_data_seg *) wqe)->byte_count = 0;
2130                         ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
2131                         ((struct mthca_data_seg *) wqe)->addr = 0;
2132                 }
2133
2134                 qp->wrid[ind] = wr->wr_id;
2135
2136                 ++ind;
2137                 if (unlikely(ind >= qp->rq.max))
2138                         ind -= qp->rq.max;
2139         }
2140 out:
2141         if (likely(nreq)) {
2142                 qp->rq.head += nreq;
2143
2144                 /*
2145                  * Make sure that descriptors are written before
2146                  * doorbell record.
2147                  */
2148                 wmb();
2149                 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2150         }
2151
2152         spin_unlock_irqrestore(&qp->rq.lock, flags);
2153         return err;
2154 }
2155
2156 int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2157                        int index, int *dbd, __be32 *new_wqe)
2158 {
2159         struct mthca_next_seg *next;
2160
2161         /*
2162          * For SRQs, all WQEs generate a CQE, so we're always at the
2163          * end of the doorbell chain.
2164          */
2165         if (qp->ibqp.srq) {
2166                 *new_wqe = 0;
2167                 return 0;
2168         }
2169
2170         if (is_send)
2171                 next = get_send_wqe(qp, index);
2172         else
2173                 next = get_recv_wqe(qp, index);
2174
2175         *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2176         if (next->ee_nds & cpu_to_be32(0x3f))
2177                 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2178                         (next->ee_nds & cpu_to_be32(0x3f));
2179         else
2180                 *new_wqe = 0;
2181
2182         return 0;
2183 }
2184
2185 int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2186 {
2187         int err;
2188         u8 status;
2189         int i;
2190
2191         spin_lock_init(&dev->qp_table.lock);
2192
2193         /*
2194          * We reserve 2 extra QPs per port for the special QPs.  The
2195          * special QP for port 1 has to be even, so round up.
2196          */
2197         dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2198         err = mthca_alloc_init(&dev->qp_table.alloc,
2199                                dev->limits.num_qps,
2200                                (1 << 24) - 1,
2201                                dev->qp_table.sqp_start +
2202                                MTHCA_MAX_PORTS * 2);
2203         if (err)
2204                 return err;
2205
2206         err = mthca_array_init(&dev->qp_table.qp,
2207                                dev->limits.num_qps);
2208         if (err) {
2209                 mthca_alloc_cleanup(&dev->qp_table.alloc);
2210                 return err;
2211         }
2212
2213         for (i = 0; i < 2; ++i) {
2214                 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2215                                             dev->qp_table.sqp_start + i * 2,
2216                                             &status);
2217                 if (err)
2218                         goto err_out;
2219                 if (status) {
2220                         mthca_warn(dev, "CONF_SPECIAL_QP returned "
2221                                    "status %02x, aborting.\n",
2222                                    status);
2223                         err = -EINVAL;
2224                         goto err_out;
2225                 }
2226         }
2227         return 0;
2228
2229  err_out:
2230         for (i = 0; i < 2; ++i)
2231                 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2232
2233         mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2234         mthca_alloc_cleanup(&dev->qp_table.alloc);
2235
2236         return err;
2237 }
2238
2239 void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2240 {
2241         int i;
2242         u8 status;
2243
2244         for (i = 0; i < 2; ++i)
2245                 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2246
2247         mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2248         mthca_alloc_cleanup(&dev->qp_table.alloc);
2249 }