2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
38 #include <linux/init.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53 MTHCA_ACK_REQ_FREQ = 10,
54 MTHCA_FLIGHT_LIMIT = 9,
55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
61 MTHCA_QP_STATE_RST = 0,
62 MTHCA_QP_STATE_INIT = 1,
63 MTHCA_QP_STATE_RTR = 2,
64 MTHCA_QP_STATE_RTS = 3,
65 MTHCA_QP_STATE_SQE = 4,
66 MTHCA_QP_STATE_SQD = 5,
67 MTHCA_QP_STATE_ERR = 6,
68 MTHCA_QP_STATE_DRAINING = 7
80 MTHCA_QP_PM_MIGRATED = 0x3,
81 MTHCA_QP_PM_ARMED = 0x0,
82 MTHCA_QP_PM_REARM = 0x1
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE = 1 << 8,
89 MTHCA_QP_BIT_SRE = 1 << 15,
90 MTHCA_QP_BIT_SWE = 1 << 14,
91 MTHCA_QP_BIT_SAE = 1 << 13,
92 MTHCA_QP_BIT_SIC = 1 << 4,
93 MTHCA_QP_BIT_SSC = 1 << 3,
95 MTHCA_QP_BIT_RRE = 1 << 15,
96 MTHCA_QP_BIT_RWE = 1 << 14,
97 MTHCA_QP_BIT_RAE = 1 << 13,
98 MTHCA_QP_BIT_RIC = 1 << 4,
99 MTHCA_QP_BIT_RSC = 1 << 3
102 struct mthca_qp_path {
111 __be32 sl_tclass_flowlabel;
113 } __attribute__((packed));
115 struct mthca_qp_context {
117 __be32 tavor_sched_queue; /* Reserved on Arbel */
119 u8 rq_size_stride; /* Reserved on Tavor */
120 u8 sq_size_stride; /* Reserved on Tavor */
121 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
126 struct mthca_qp_path pri_path;
127 struct mthca_qp_path alt_path;
134 __be32 next_send_psn;
136 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
137 __be32 snd_db_index; /* (debugging only entries) */
138 __be32 last_acked_psn;
141 __be32 rnr_nextrecvpsn;
144 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
145 __be32 rcv_db_index; /* (debugging only entries) */
149 __be16 rq_wqe_counter; /* reserved on Tavor */
150 __be16 sq_wqe_counter; /* reserved on Tavor */
152 } __attribute__((packed));
154 struct mthca_qp_param {
155 __be32 opt_param_mask;
157 struct mthca_qp_context context;
159 } __attribute__((packed));
162 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
163 MTHCA_QP_OPTPAR_RRE = 1 << 1,
164 MTHCA_QP_OPTPAR_RAE = 1 << 2,
165 MTHCA_QP_OPTPAR_RWE = 1 << 3,
166 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
167 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
168 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
169 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
170 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
171 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
172 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
173 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
174 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
175 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
176 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
177 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
178 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
181 static const u8 mthca_opcode[] = {
182 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
183 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
184 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
185 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
186 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
187 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
188 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
191 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
193 return qp->qpn >= dev->qp_table.sqp_start &&
194 qp->qpn <= dev->qp_table.sqp_start + 3;
197 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
199 return qp->qpn >= dev->qp_table.sqp_start &&
200 qp->qpn <= dev->qp_table.sqp_start + 1;
203 static void *get_recv_wqe(struct mthca_qp *qp, int n)
206 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
208 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
209 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
212 static void *get_send_wqe(struct mthca_qp *qp, int n)
215 return qp->queue.direct.buf + qp->send_wqe_offset +
216 (n << qp->sq.wqe_shift);
218 return qp->queue.page_list[(qp->send_wqe_offset +
219 (n << qp->sq.wqe_shift)) >>
221 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
225 static void mthca_wq_init(struct mthca_wq *wq)
227 spin_lock_init(&wq->lock);
229 wq->last_comp = wq->max - 1;
234 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
235 enum ib_event_type event_type)
238 struct ib_event event;
240 spin_lock(&dev->qp_table.lock);
241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
244 spin_unlock(&dev->qp_table.lock);
247 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
251 if (event_type == IB_EVENT_PATH_MIG)
252 qp->port = qp->alt_port;
254 event.device = &dev->ib_dev;
255 event.event = event_type;
256 event.element.qp = &qp->ibqp;
257 if (qp->ibqp.event_handler)
258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
260 spin_lock(&dev->qp_table.lock);
263 spin_unlock(&dev->qp_table.lock);
266 static int to_mthca_state(enum ib_qp_state ib_state)
269 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
270 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
271 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
272 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
273 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
274 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
275 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
280 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
282 static int to_mthca_st(int transport)
285 case RC: return MTHCA_QP_ST_RC;
286 case UC: return MTHCA_QP_ST_UC;
287 case UD: return MTHCA_QP_ST_UD;
288 case RD: return MTHCA_QP_ST_RD;
289 case MLX: return MTHCA_QP_ST_MLX;
294 static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
297 if (attr_mask & IB_QP_PKEY_INDEX)
298 sqp->pkey_index = attr->pkey_index;
299 if (attr_mask & IB_QP_QKEY)
300 sqp->qkey = attr->qkey;
301 if (attr_mask & IB_QP_SQ_PSN)
302 sqp->send_psn = attr->sq_psn;
305 static void init_port(struct mthca_dev *dev, int port)
309 struct mthca_init_ib_param param;
311 memset(¶m, 0, sizeof param);
313 param.port_width = dev->limits.port_width_cap;
314 param.vl_cap = dev->limits.vl_cap;
315 param.mtu_cap = dev->limits.mtu_cap;
316 param.gid_cap = dev->limits.gid_table_len;
317 param.pkey_cap = dev->limits.pkey_table_len;
319 err = mthca_INIT_IB(dev, ¶m, port, &status);
321 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
323 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
326 static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
331 u32 hw_access_flags = 0;
333 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
334 dest_rd_atomic = attr->max_dest_rd_atomic;
336 dest_rd_atomic = qp->resp_depth;
338 if (attr_mask & IB_QP_ACCESS_FLAGS)
339 access_flags = attr->qp_access_flags;
341 access_flags = qp->atomic_rd_en;
344 access_flags &= IB_ACCESS_REMOTE_WRITE;
346 if (access_flags & IB_ACCESS_REMOTE_READ)
347 hw_access_flags |= MTHCA_QP_BIT_RRE;
348 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
349 hw_access_flags |= MTHCA_QP_BIT_RAE;
350 if (access_flags & IB_ACCESS_REMOTE_WRITE)
351 hw_access_flags |= MTHCA_QP_BIT_RWE;
353 return cpu_to_be32(hw_access_flags);
356 static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
358 switch (mthca_state) {
359 case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
360 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
361 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
362 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
363 case MTHCA_QP_STATE_DRAINING:
364 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
365 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
366 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
371 static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
373 switch (mthca_mig_state) {
374 case 0: return IB_MIG_ARMED;
375 case 1: return IB_MIG_REARM;
376 case 3: return IB_MIG_MIGRATED;
381 static int to_ib_qp_access_flags(int mthca_flags)
385 if (mthca_flags & MTHCA_QP_BIT_RRE)
386 ib_flags |= IB_ACCESS_REMOTE_READ;
387 if (mthca_flags & MTHCA_QP_BIT_RWE)
388 ib_flags |= IB_ACCESS_REMOTE_WRITE;
389 if (mthca_flags & MTHCA_QP_BIT_RAE)
390 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
395 static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
396 struct mthca_qp_path *path)
398 memset(ib_ah_attr, 0, sizeof *path);
399 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
401 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
404 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
405 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
406 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
407 ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
408 path->static_rate & 0x7,
409 ib_ah_attr->port_num);
410 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
411 if (ib_ah_attr->ah_flags) {
412 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
413 ib_ah_attr->grh.hop_limit = path->hop_limit;
414 ib_ah_attr->grh.traffic_class =
415 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
416 ib_ah_attr->grh.flow_label =
417 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
418 memcpy(ib_ah_attr->grh.dgid.raw,
419 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
423 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
424 struct ib_qp_init_attr *qp_init_attr)
426 struct mthca_dev *dev = to_mdev(ibqp->device);
427 struct mthca_qp *qp = to_mqp(ibqp);
429 struct mthca_mailbox *mailbox;
430 struct mthca_qp_param *qp_param;
431 struct mthca_qp_context *context;
435 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
437 return PTR_ERR(mailbox);
439 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
443 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
448 qp_param = mailbox->buf;
449 context = &qp_param->context;
450 mthca_state = be32_to_cpu(context->flags) >> 28;
452 qp_attr->qp_state = to_ib_qp_state(mthca_state);
453 qp_attr->cur_qp_state = qp_attr->qp_state;
454 qp_attr->path_mtu = context->mtu_msgmax >> 5;
455 qp_attr->path_mig_state =
456 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
457 qp_attr->qkey = be32_to_cpu(context->qkey);
458 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
459 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
460 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
461 qp_attr->qp_access_flags =
462 to_ib_qp_access_flags(be32_to_cpu(context->params2));
463 qp_attr->cap.max_send_wr = qp->sq.max;
464 qp_attr->cap.max_recv_wr = qp->rq.max;
465 qp_attr->cap.max_send_sge = qp->sq.max_gs;
466 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
467 qp_attr->cap.max_inline_data = qp->max_inline_data;
469 if (qp->transport == RC || qp->transport == UC) {
470 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
471 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
474 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
475 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
477 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
478 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
480 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
482 qp_attr->max_dest_rd_atomic =
483 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
484 qp_attr->min_rnr_timer =
485 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
486 qp_attr->port_num = qp_attr->ah_attr.port_num;
487 qp_attr->timeout = context->pri_path.ackto >> 3;
488 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
489 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
490 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
491 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
492 qp_init_attr->cap = qp_attr->cap;
495 mthca_free_mailbox(dev, mailbox);
499 static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
500 struct mthca_qp_path *path, u8 port)
502 path->g_mylmc = ah->src_path_bits & 0x7f;
503 path->rlid = cpu_to_be16(ah->dlid);
504 path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
506 if (ah->ah_flags & IB_AH_GRH) {
507 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
508 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
509 ah->grh.sgid_index, dev->limits.gid_table_len-1);
513 path->g_mylmc |= 1 << 7;
514 path->mgid_index = ah->grh.sgid_index;
515 path->hop_limit = ah->grh.hop_limit;
516 path->sl_tclass_flowlabel =
517 cpu_to_be32((ah->sl << 28) |
518 (ah->grh.traffic_class << 20) |
519 (ah->grh.flow_label));
520 memcpy(path->rgid, ah->grh.dgid.raw, 16);
522 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
527 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
529 struct mthca_dev *dev = to_mdev(ibqp->device);
530 struct mthca_qp *qp = to_mqp(ibqp);
531 enum ib_qp_state cur_state, new_state;
532 struct mthca_mailbox *mailbox;
533 struct mthca_qp_param *qp_param;
534 struct mthca_qp_context *qp_context;
539 if (attr_mask & IB_QP_CUR_STATE) {
540 cur_state = attr->cur_qp_state;
542 spin_lock_irq(&qp->sq.lock);
543 spin_lock(&qp->rq.lock);
544 cur_state = qp->state;
545 spin_unlock(&qp->rq.lock);
546 spin_unlock_irq(&qp->sq.lock);
549 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
551 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
552 mthca_dbg(dev, "Bad QP transition (transport %d) "
553 "%d->%d with attr 0x%08x\n",
554 qp->transport, cur_state, new_state,
559 if ((attr_mask & IB_QP_PKEY_INDEX) &&
560 attr->pkey_index >= dev->limits.pkey_table_len) {
561 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
562 attr->pkey_index, dev->limits.pkey_table_len-1);
566 if ((attr_mask & IB_QP_PORT) &&
567 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
568 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
572 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
573 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
574 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
575 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
579 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
580 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
581 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
582 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
586 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
588 return PTR_ERR(mailbox);
589 qp_param = mailbox->buf;
590 qp_context = &qp_param->context;
591 memset(qp_param, 0, sizeof *qp_param);
593 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
594 (to_mthca_st(qp->transport) << 16));
595 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
596 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
597 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
599 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
600 switch (attr->path_mig_state) {
601 case IB_MIG_MIGRATED:
602 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
605 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
608 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
613 /* leave tavor_sched_queue as 0 */
615 if (qp->transport == MLX || qp->transport == UD)
616 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
617 else if (attr_mask & IB_QP_PATH_MTU) {
618 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
619 mthca_dbg(dev, "path MTU (%u) is invalid\n",
623 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
626 if (mthca_is_memfree(dev)) {
628 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
629 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
632 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
633 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
636 /* leave arbel_sched_queue as 0 */
638 if (qp->ibqp.uobject)
639 qp_context->usr_page =
640 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
642 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
643 qp_context->local_qpn = cpu_to_be32(qp->qpn);
644 if (attr_mask & IB_QP_DEST_QPN) {
645 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
648 if (qp->transport == MLX)
649 qp_context->pri_path.port_pkey |=
650 cpu_to_be32(qp->port << 24);
652 if (attr_mask & IB_QP_PORT) {
653 qp_context->pri_path.port_pkey |=
654 cpu_to_be32(attr->port_num << 24);
655 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
659 if (attr_mask & IB_QP_PKEY_INDEX) {
660 qp_context->pri_path.port_pkey |=
661 cpu_to_be32(attr->pkey_index);
662 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
665 if (attr_mask & IB_QP_RNR_RETRY) {
666 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
667 attr->rnr_retry << 5;
668 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
669 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
672 if (attr_mask & IB_QP_AV) {
673 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
674 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
677 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
680 if (attr_mask & IB_QP_TIMEOUT) {
681 qp_context->pri_path.ackto = attr->timeout << 3;
682 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
685 if (attr_mask & IB_QP_ALT_PATH) {
686 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
687 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
688 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
692 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
693 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
698 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
699 attr->alt_ah_attr.port_num))
702 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
703 attr->alt_port_num << 24);
704 qp_context->alt_path.ackto = attr->alt_timeout << 3;
705 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
709 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
710 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
711 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
712 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
713 (MTHCA_FLIGHT_LIMIT << 24) |
715 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
716 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
717 if (attr_mask & IB_QP_RETRY_CNT) {
718 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
719 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
722 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
723 if (attr->max_rd_atomic) {
724 qp_context->params1 |=
725 cpu_to_be32(MTHCA_QP_BIT_SRE |
727 qp_context->params1 |=
728 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
730 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
733 if (attr_mask & IB_QP_SQ_PSN)
734 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
737 if (mthca_is_memfree(dev)) {
738 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
739 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
742 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
743 if (attr->max_dest_rd_atomic)
744 qp_context->params2 |=
745 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
747 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
750 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
751 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
752 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
753 MTHCA_QP_OPTPAR_RRE |
754 MTHCA_QP_OPTPAR_RAE);
757 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
760 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
762 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
763 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
764 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
766 if (attr_mask & IB_QP_RQ_PSN)
767 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
769 qp_context->ra_buff_indx =
770 cpu_to_be32(dev->qp_table.rdb_base +
771 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
772 dev->qp_table.rdb_shift));
774 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
776 if (mthca_is_memfree(dev))
777 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
779 if (attr_mask & IB_QP_QKEY) {
780 qp_context->qkey = cpu_to_be32(attr->qkey);
781 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
785 qp_context->srqn = cpu_to_be32(1 << 24 |
786 to_msrq(ibqp->srq)->srqn);
788 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
789 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
790 attr->en_sqd_async_notify)
793 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
794 mailbox, sqd_event, &status);
798 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
799 cur_state, new_state, status);
804 qp->state = new_state;
805 if (attr_mask & IB_QP_ACCESS_FLAGS)
806 qp->atomic_rd_en = attr->qp_access_flags;
807 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
808 qp->resp_depth = attr->max_dest_rd_atomic;
809 if (attr_mask & IB_QP_PORT)
810 qp->port = attr->port_num;
811 if (attr_mask & IB_QP_ALT_PATH)
812 qp->alt_port = attr->alt_port_num;
815 store_attrs(to_msqp(qp), attr, attr_mask);
818 * If we moved QP0 to RTR, bring the IB link up; if we moved
819 * QP0 to RESET or ERROR, bring the link back down.
821 if (is_qp0(dev, qp)) {
822 if (cur_state != IB_QPS_RTR &&
823 new_state == IB_QPS_RTR)
824 init_port(dev, qp->port);
826 if (cur_state != IB_QPS_RESET &&
827 cur_state != IB_QPS_ERR &&
828 (new_state == IB_QPS_RESET ||
829 new_state == IB_QPS_ERR))
830 mthca_CLOSE_IB(dev, qp->port, &status);
834 * If we moved a kernel QP to RESET, clean up all old CQ
835 * entries and reinitialize the QP.
837 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
839 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
840 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
841 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
842 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
844 mthca_wq_init(&qp->sq);
845 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
847 mthca_wq_init(&qp->rq);
848 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
850 if (mthca_is_memfree(dev)) {
857 mthca_free_mailbox(dev, mailbox);
861 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
864 * Calculate the maximum size of WQE s/g segments, excluding
865 * the next segment and other non-data segments.
867 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
869 switch (qp->transport) {
871 max_data_size -= 2 * sizeof (struct mthca_data_seg);
875 if (mthca_is_memfree(dev))
876 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
878 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
882 max_data_size -= sizeof (struct mthca_raddr_seg);
886 return max_data_size;
889 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
891 /* We don't support inline data for kernel QPs (yet). */
892 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
895 static void mthca_adjust_qp_caps(struct mthca_dev *dev,
899 int max_data_size = mthca_max_data_size(dev, qp,
900 min(dev->limits.max_desc_sz,
901 1 << qp->sq.wqe_shift));
903 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
905 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
906 max_data_size / sizeof (struct mthca_data_seg));
907 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
908 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
909 sizeof (struct mthca_next_seg)) /
910 sizeof (struct mthca_data_seg));
914 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
915 * rq.max_gs and sq.max_gs must all be assigned.
916 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
917 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
920 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
927 size = sizeof (struct mthca_next_seg) +
928 qp->rq.max_gs * sizeof (struct mthca_data_seg);
930 if (size > dev->limits.max_desc_sz)
933 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
937 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
938 switch (qp->transport) {
940 size += 2 * sizeof (struct mthca_data_seg);
944 size += mthca_is_memfree(dev) ?
945 sizeof (struct mthca_arbel_ud_seg) :
946 sizeof (struct mthca_tavor_ud_seg);
950 size += sizeof (struct mthca_raddr_seg);
954 size += sizeof (struct mthca_raddr_seg);
956 * An atomic op will require an atomic segment, a
957 * remote address segment and one scatter entry.
959 size = max_t(int, size,
960 sizeof (struct mthca_atomic_seg) +
961 sizeof (struct mthca_raddr_seg) +
962 sizeof (struct mthca_data_seg));
969 /* Make sure that we have enough space for a bind request */
970 size = max_t(int, size, sizeof (struct mthca_bind_seg));
972 size += sizeof (struct mthca_next_seg);
974 if (size > dev->limits.max_desc_sz)
977 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
981 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
982 1 << qp->sq.wqe_shift);
985 * If this is a userspace QP, we don't actually have to
986 * allocate anything. All we need is to calculate the WQE
987 * sizes and the send_wqe_offset, so we're done now.
989 if (pd->ibpd.uobject)
992 size = PAGE_ALIGN(qp->send_wqe_offset +
993 (qp->sq.max << qp->sq.wqe_shift));
995 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1000 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1001 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1012 static void mthca_free_wqe_buf(struct mthca_dev *dev,
1013 struct mthca_qp *qp)
1015 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1016 (qp->sq.max << qp->sq.wqe_shift)),
1017 &qp->queue, qp->is_direct, &qp->mr);
1021 static int mthca_map_memfree(struct mthca_dev *dev,
1022 struct mthca_qp *qp)
1026 if (mthca_is_memfree(dev)) {
1027 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1031 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1035 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1036 qp->qpn << dev->qp_table.rdb_shift);
1045 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1048 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1053 static void mthca_unmap_memfree(struct mthca_dev *dev,
1054 struct mthca_qp *qp)
1056 mthca_table_put(dev, dev->qp_table.rdb_table,
1057 qp->qpn << dev->qp_table.rdb_shift);
1058 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1059 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1062 static int mthca_alloc_memfree(struct mthca_dev *dev,
1063 struct mthca_qp *qp)
1067 if (mthca_is_memfree(dev)) {
1068 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1069 qp->qpn, &qp->rq.db);
1070 if (qp->rq.db_index < 0)
1073 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1074 qp->qpn, &qp->sq.db);
1075 if (qp->sq.db_index < 0)
1076 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1082 static void mthca_free_memfree(struct mthca_dev *dev,
1083 struct mthca_qp *qp)
1085 if (mthca_is_memfree(dev)) {
1086 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1087 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1091 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1092 struct mthca_pd *pd,
1093 struct mthca_cq *send_cq,
1094 struct mthca_cq *recv_cq,
1095 enum ib_sig_type send_policy,
1096 struct mthca_qp *qp)
1102 init_waitqueue_head(&qp->wait);
1103 qp->state = IB_QPS_RESET;
1104 qp->atomic_rd_en = 0;
1106 qp->sq_policy = send_policy;
1107 mthca_wq_init(&qp->sq);
1108 mthca_wq_init(&qp->rq);
1110 ret = mthca_map_memfree(dev, qp);
1114 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1116 mthca_unmap_memfree(dev, qp);
1120 mthca_adjust_qp_caps(dev, pd, qp);
1123 * If this is a userspace QP, we're done now. The doorbells
1124 * will be allocated and buffers will be initialized in
1127 if (pd->ibpd.uobject)
1130 ret = mthca_alloc_memfree(dev, qp);
1132 mthca_free_wqe_buf(dev, qp);
1133 mthca_unmap_memfree(dev, qp);
1137 if (mthca_is_memfree(dev)) {
1138 struct mthca_next_seg *next;
1139 struct mthca_data_seg *scatter;
1140 int size = (sizeof (struct mthca_next_seg) +
1141 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1143 for (i = 0; i < qp->rq.max; ++i) {
1144 next = get_recv_wqe(qp, i);
1145 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1147 next->ee_nds = cpu_to_be32(size);
1149 for (scatter = (void *) (next + 1);
1150 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1152 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1155 for (i = 0; i < qp->sq.max; ++i) {
1156 next = get_send_wqe(qp, i);
1157 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1159 qp->send_wqe_offset);
1163 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1164 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1169 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1170 struct mthca_pd *pd, struct mthca_qp *qp)
1172 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1174 /* Sanity check QP size before proceeding */
1175 if (cap->max_send_wr > dev->limits.max_wqes ||
1176 cap->max_recv_wr > dev->limits.max_wqes ||
1177 cap->max_send_sge > dev->limits.max_sg ||
1178 cap->max_recv_sge > dev->limits.max_sg ||
1179 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1183 * For MLX transport we need 2 extra S/G entries:
1184 * one for the header and one for the checksum at the end
1186 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
1189 if (mthca_is_memfree(dev)) {
1190 qp->rq.max = cap->max_recv_wr ?
1191 roundup_pow_of_two(cap->max_recv_wr) : 0;
1192 qp->sq.max = cap->max_send_wr ?
1193 roundup_pow_of_two(cap->max_send_wr) : 0;
1195 qp->rq.max = cap->max_recv_wr;
1196 qp->sq.max = cap->max_send_wr;
1199 qp->rq.max_gs = cap->max_recv_sge;
1200 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1201 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1202 MTHCA_INLINE_CHUNK_SIZE) /
1203 sizeof (struct mthca_data_seg));
1208 int mthca_alloc_qp(struct mthca_dev *dev,
1209 struct mthca_pd *pd,
1210 struct mthca_cq *send_cq,
1211 struct mthca_cq *recv_cq,
1212 enum ib_qp_type type,
1213 enum ib_sig_type send_policy,
1214 struct ib_qp_cap *cap,
1215 struct mthca_qp *qp)
1220 case IB_QPT_RC: qp->transport = RC; break;
1221 case IB_QPT_UC: qp->transport = UC; break;
1222 case IB_QPT_UD: qp->transport = UD; break;
1223 default: return -EINVAL;
1226 err = mthca_set_qp_size(dev, cap, pd, qp);
1230 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1234 /* initialize port to zero for error-catching. */
1237 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1240 mthca_free(&dev->qp_table.alloc, qp->qpn);
1244 spin_lock_irq(&dev->qp_table.lock);
1245 mthca_array_set(&dev->qp_table.qp,
1246 qp->qpn & (dev->limits.num_qps - 1), qp);
1247 spin_unlock_irq(&dev->qp_table.lock);
1252 int mthca_alloc_sqp(struct mthca_dev *dev,
1253 struct mthca_pd *pd,
1254 struct mthca_cq *send_cq,
1255 struct mthca_cq *recv_cq,
1256 enum ib_sig_type send_policy,
1257 struct ib_qp_cap *cap,
1260 struct mthca_sqp *sqp)
1262 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1265 sqp->qp.transport = MLX;
1266 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1270 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1271 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1272 &sqp->header_dma, GFP_KERNEL);
1273 if (!sqp->header_buf)
1276 spin_lock_irq(&dev->qp_table.lock);
1277 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1280 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1281 spin_unlock_irq(&dev->qp_table.lock);
1286 sqp->qp.port = port;
1288 sqp->qp.transport = MLX;
1290 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1291 send_policy, &sqp->qp);
1295 atomic_inc(&pd->sqp_count);
1301 * Lock CQs here, so that CQ polling code can do QP lookup
1302 * without taking a lock.
1304 spin_lock_irq(&send_cq->lock);
1305 if (send_cq != recv_cq)
1306 spin_lock(&recv_cq->lock);
1308 spin_lock(&dev->qp_table.lock);
1309 mthca_array_clear(&dev->qp_table.qp, mqpn);
1310 spin_unlock(&dev->qp_table.lock);
1312 if (send_cq != recv_cq)
1313 spin_unlock(&recv_cq->lock);
1314 spin_unlock_irq(&send_cq->lock);
1317 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1318 sqp->header_buf, sqp->header_dma);
1323 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1327 spin_lock_irq(&dev->qp_table.lock);
1329 spin_unlock_irq(&dev->qp_table.lock);
1334 void mthca_free_qp(struct mthca_dev *dev,
1335 struct mthca_qp *qp)
1338 struct mthca_cq *send_cq;
1339 struct mthca_cq *recv_cq;
1341 send_cq = to_mcq(qp->ibqp.send_cq);
1342 recv_cq = to_mcq(qp->ibqp.recv_cq);
1345 * Lock CQs here, so that CQ polling code can do QP lookup
1346 * without taking a lock.
1348 spin_lock_irq(&send_cq->lock);
1349 if (send_cq != recv_cq)
1350 spin_lock(&recv_cq->lock);
1352 spin_lock(&dev->qp_table.lock);
1353 mthca_array_clear(&dev->qp_table.qp,
1354 qp->qpn & (dev->limits.num_qps - 1));
1356 spin_unlock(&dev->qp_table.lock);
1358 if (send_cq != recv_cq)
1359 spin_unlock(&recv_cq->lock);
1360 spin_unlock_irq(&send_cq->lock);
1362 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1364 if (qp->state != IB_QPS_RESET)
1365 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1369 * If this is a userspace QP, the buffers, MR, CQs and so on
1370 * will be cleaned up in userspace, so all we have to do is
1371 * unref the mem-free tables and free the QPN in our table.
1373 if (!qp->ibqp.uobject) {
1374 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
1375 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1376 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1377 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
1378 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1380 mthca_free_memfree(dev, qp);
1381 mthca_free_wqe_buf(dev, qp);
1384 mthca_unmap_memfree(dev, qp);
1386 if (is_sqp(dev, qp)) {
1387 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1388 dma_free_coherent(&dev->pdev->dev,
1389 to_msqp(qp)->header_buf_size,
1390 to_msqp(qp)->header_buf,
1391 to_msqp(qp)->header_dma);
1393 mthca_free(&dev->qp_table.alloc, qp->qpn);
1396 /* Create UD header for an MLX send and build a data segment for it */
1397 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1398 int ind, struct ib_send_wr *wr,
1399 struct mthca_mlx_seg *mlx,
1400 struct mthca_data_seg *data)
1406 ib_ud_header_init(256, /* assume a MAD */
1407 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
1410 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1413 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1414 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1415 (sqp->ud_header.lrh.destination_lid ==
1416 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1417 (sqp->ud_header.lrh.service_level << 8));
1418 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1421 switch (wr->opcode) {
1423 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1424 sqp->ud_header.immediate_present = 0;
1426 case IB_WR_SEND_WITH_IMM:
1427 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1428 sqp->ud_header.immediate_present = 1;
1429 sqp->ud_header.immediate_data = wr->imm_data;
1435 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1436 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1437 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1438 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1439 if (!sqp->qp.ibqp.qp_num)
1440 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1441 sqp->pkey_index, &pkey);
1443 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1444 wr->wr.ud.pkey_index, &pkey);
1445 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1446 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1447 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1448 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1449 sqp->qkey : wr->wr.ud.remote_qkey);
1450 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1452 header_size = ib_ud_header_pack(&sqp->ud_header,
1454 ind * MTHCA_UD_HEADER_SIZE);
1456 data->byte_count = cpu_to_be32(header_size);
1457 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1458 data->addr = cpu_to_be64(sqp->header_dma +
1459 ind * MTHCA_UD_HEADER_SIZE);
1464 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1465 struct ib_cq *ib_cq)
1468 struct mthca_cq *cq;
1470 cur = wq->head - wq->tail;
1471 if (likely(cur + nreq < wq->max))
1475 spin_lock(&cq->lock);
1476 cur = wq->head - wq->tail;
1477 spin_unlock(&cq->lock);
1479 return cur + nreq >= wq->max;
1482 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1483 struct ib_send_wr **bad_wr)
1485 struct mthca_dev *dev = to_mdev(ibqp->device);
1486 struct mthca_qp *qp = to_mqp(ibqp);
1489 unsigned long flags;
1499 spin_lock_irqsave(&qp->sq.lock, flags);
1501 /* XXX check that state is OK to post send */
1503 ind = qp->sq.next_ind;
1505 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1506 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1507 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1508 " %d max, %d nreq)\n", qp->qpn,
1509 qp->sq.head, qp->sq.tail,
1516 wqe = get_send_wqe(qp, ind);
1517 prev_wqe = qp->sq.last;
1520 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1521 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1522 ((struct mthca_next_seg *) wqe)->flags =
1523 ((wr->send_flags & IB_SEND_SIGNALED) ?
1524 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1525 ((wr->send_flags & IB_SEND_SOLICITED) ?
1526 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1528 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1529 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1530 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1532 wqe += sizeof (struct mthca_next_seg);
1533 size = sizeof (struct mthca_next_seg) / 16;
1535 switch (qp->transport) {
1537 switch (wr->opcode) {
1538 case IB_WR_ATOMIC_CMP_AND_SWP:
1539 case IB_WR_ATOMIC_FETCH_AND_ADD:
1540 ((struct mthca_raddr_seg *) wqe)->raddr =
1541 cpu_to_be64(wr->wr.atomic.remote_addr);
1542 ((struct mthca_raddr_seg *) wqe)->rkey =
1543 cpu_to_be32(wr->wr.atomic.rkey);
1544 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1546 wqe += sizeof (struct mthca_raddr_seg);
1548 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1549 ((struct mthca_atomic_seg *) wqe)->swap_add =
1550 cpu_to_be64(wr->wr.atomic.swap);
1551 ((struct mthca_atomic_seg *) wqe)->compare =
1552 cpu_to_be64(wr->wr.atomic.compare_add);
1554 ((struct mthca_atomic_seg *) wqe)->swap_add =
1555 cpu_to_be64(wr->wr.atomic.compare_add);
1556 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1559 wqe += sizeof (struct mthca_atomic_seg);
1560 size += (sizeof (struct mthca_raddr_seg) +
1561 sizeof (struct mthca_atomic_seg)) / 16;
1564 case IB_WR_RDMA_WRITE:
1565 case IB_WR_RDMA_WRITE_WITH_IMM:
1566 case IB_WR_RDMA_READ:
1567 ((struct mthca_raddr_seg *) wqe)->raddr =
1568 cpu_to_be64(wr->wr.rdma.remote_addr);
1569 ((struct mthca_raddr_seg *) wqe)->rkey =
1570 cpu_to_be32(wr->wr.rdma.rkey);
1571 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1572 wqe += sizeof (struct mthca_raddr_seg);
1573 size += sizeof (struct mthca_raddr_seg) / 16;
1577 /* No extra segments required for sends */
1584 switch (wr->opcode) {
1585 case IB_WR_RDMA_WRITE:
1586 case IB_WR_RDMA_WRITE_WITH_IMM:
1587 ((struct mthca_raddr_seg *) wqe)->raddr =
1588 cpu_to_be64(wr->wr.rdma.remote_addr);
1589 ((struct mthca_raddr_seg *) wqe)->rkey =
1590 cpu_to_be32(wr->wr.rdma.rkey);
1591 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1592 wqe += sizeof (struct mthca_raddr_seg);
1593 size += sizeof (struct mthca_raddr_seg) / 16;
1597 /* No extra segments required for sends */
1604 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1605 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1606 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1607 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1608 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1609 cpu_to_be32(wr->wr.ud.remote_qpn);
1610 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1611 cpu_to_be32(wr->wr.ud.remote_qkey);
1613 wqe += sizeof (struct mthca_tavor_ud_seg);
1614 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1618 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1619 wqe - sizeof (struct mthca_next_seg),
1625 wqe += sizeof (struct mthca_data_seg);
1626 size += sizeof (struct mthca_data_seg) / 16;
1630 if (wr->num_sge > qp->sq.max_gs) {
1631 mthca_err(dev, "too many gathers\n");
1637 for (i = 0; i < wr->num_sge; ++i) {
1638 ((struct mthca_data_seg *) wqe)->byte_count =
1639 cpu_to_be32(wr->sg_list[i].length);
1640 ((struct mthca_data_seg *) wqe)->lkey =
1641 cpu_to_be32(wr->sg_list[i].lkey);
1642 ((struct mthca_data_seg *) wqe)->addr =
1643 cpu_to_be64(wr->sg_list[i].addr);
1644 wqe += sizeof (struct mthca_data_seg);
1645 size += sizeof (struct mthca_data_seg) / 16;
1648 /* Add one more inline data segment for ICRC */
1649 if (qp->transport == MLX) {
1650 ((struct mthca_data_seg *) wqe)->byte_count =
1651 cpu_to_be32((1 << 31) | 4);
1652 ((u32 *) wqe)[1] = 0;
1653 wqe += sizeof (struct mthca_data_seg);
1654 size += sizeof (struct mthca_data_seg) / 16;
1657 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1659 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1660 mthca_err(dev, "opcode invalid\n");
1666 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1667 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1668 qp->send_wqe_offset) |
1669 mthca_opcode[wr->opcode]);
1671 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1672 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
1673 ((wr->send_flags & IB_SEND_FENCE) ?
1674 MTHCA_NEXT_FENCE : 0));
1678 op0 = mthca_opcode[wr->opcode];
1682 if (unlikely(ind >= qp->sq.max))
1690 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1691 qp->send_wqe_offset) | f0 | op0);
1692 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1696 mthca_write64(doorbell,
1697 dev->kar + MTHCA_SEND_DOORBELL,
1698 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1701 qp->sq.next_ind = ind;
1702 qp->sq.head += nreq;
1704 spin_unlock_irqrestore(&qp->sq.lock, flags);
1708 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1709 struct ib_recv_wr **bad_wr)
1711 struct mthca_dev *dev = to_mdev(ibqp->device);
1712 struct mthca_qp *qp = to_mqp(ibqp);
1714 unsigned long flags;
1724 spin_lock_irqsave(&qp->rq.lock, flags);
1726 /* XXX check that state is OK to post receive */
1728 ind = qp->rq.next_ind;
1730 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1731 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1734 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1735 doorbell[1] = cpu_to_be32(qp->qpn << 8);
1739 mthca_write64(doorbell,
1740 dev->kar + MTHCA_RECEIVE_DOORBELL,
1741 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1743 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1747 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1748 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1749 " %d max, %d nreq)\n", qp->qpn,
1750 qp->rq.head, qp->rq.tail,
1757 wqe = get_recv_wqe(qp, ind);
1758 prev_wqe = qp->rq.last;
1761 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1762 ((struct mthca_next_seg *) wqe)->ee_nds =
1763 cpu_to_be32(MTHCA_NEXT_DBD);
1764 ((struct mthca_next_seg *) wqe)->flags = 0;
1766 wqe += sizeof (struct mthca_next_seg);
1767 size = sizeof (struct mthca_next_seg) / 16;
1769 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1775 for (i = 0; i < wr->num_sge; ++i) {
1776 ((struct mthca_data_seg *) wqe)->byte_count =
1777 cpu_to_be32(wr->sg_list[i].length);
1778 ((struct mthca_data_seg *) wqe)->lkey =
1779 cpu_to_be32(wr->sg_list[i].lkey);
1780 ((struct mthca_data_seg *) wqe)->addr =
1781 cpu_to_be64(wr->sg_list[i].addr);
1782 wqe += sizeof (struct mthca_data_seg);
1783 size += sizeof (struct mthca_data_seg) / 16;
1786 qp->wrid[ind] = wr->wr_id;
1788 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1789 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1791 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1792 cpu_to_be32(MTHCA_NEXT_DBD | size);
1798 if (unlikely(ind >= qp->rq.max))
1804 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1805 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1809 mthca_write64(doorbell,
1810 dev->kar + MTHCA_RECEIVE_DOORBELL,
1811 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1814 qp->rq.next_ind = ind;
1815 qp->rq.head += nreq;
1817 spin_unlock_irqrestore(&qp->rq.lock, flags);
1821 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1822 struct ib_send_wr **bad_wr)
1824 struct mthca_dev *dev = to_mdev(ibqp->device);
1825 struct mthca_qp *qp = to_mqp(ibqp);
1829 unsigned long flags;
1839 spin_lock_irqsave(&qp->sq.lock, flags);
1841 /* XXX check that state is OK to post send */
1843 ind = qp->sq.head & (qp->sq.max - 1);
1845 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1846 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1849 doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1850 ((qp->sq.head & 0xffff) << 8) |
1852 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1854 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1858 * Make sure that descriptors are written before
1862 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1865 * Make sure doorbell record is written before we
1866 * write MMIO send doorbell.
1869 mthca_write64(doorbell,
1870 dev->kar + MTHCA_SEND_DOORBELL,
1871 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1874 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1875 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1876 " %d max, %d nreq)\n", qp->qpn,
1877 qp->sq.head, qp->sq.tail,
1884 wqe = get_send_wqe(qp, ind);
1885 prev_wqe = qp->sq.last;
1888 ((struct mthca_next_seg *) wqe)->flags =
1889 ((wr->send_flags & IB_SEND_SIGNALED) ?
1890 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1891 ((wr->send_flags & IB_SEND_SOLICITED) ?
1892 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1894 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1895 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1896 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1898 wqe += sizeof (struct mthca_next_seg);
1899 size = sizeof (struct mthca_next_seg) / 16;
1901 switch (qp->transport) {
1903 switch (wr->opcode) {
1904 case IB_WR_ATOMIC_CMP_AND_SWP:
1905 case IB_WR_ATOMIC_FETCH_AND_ADD:
1906 ((struct mthca_raddr_seg *) wqe)->raddr =
1907 cpu_to_be64(wr->wr.atomic.remote_addr);
1908 ((struct mthca_raddr_seg *) wqe)->rkey =
1909 cpu_to_be32(wr->wr.atomic.rkey);
1910 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1912 wqe += sizeof (struct mthca_raddr_seg);
1914 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1915 ((struct mthca_atomic_seg *) wqe)->swap_add =
1916 cpu_to_be64(wr->wr.atomic.swap);
1917 ((struct mthca_atomic_seg *) wqe)->compare =
1918 cpu_to_be64(wr->wr.atomic.compare_add);
1920 ((struct mthca_atomic_seg *) wqe)->swap_add =
1921 cpu_to_be64(wr->wr.atomic.compare_add);
1922 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1925 wqe += sizeof (struct mthca_atomic_seg);
1926 size += (sizeof (struct mthca_raddr_seg) +
1927 sizeof (struct mthca_atomic_seg)) / 16;
1930 case IB_WR_RDMA_READ:
1931 case IB_WR_RDMA_WRITE:
1932 case IB_WR_RDMA_WRITE_WITH_IMM:
1933 ((struct mthca_raddr_seg *) wqe)->raddr =
1934 cpu_to_be64(wr->wr.rdma.remote_addr);
1935 ((struct mthca_raddr_seg *) wqe)->rkey =
1936 cpu_to_be32(wr->wr.rdma.rkey);
1937 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1938 wqe += sizeof (struct mthca_raddr_seg);
1939 size += sizeof (struct mthca_raddr_seg) / 16;
1943 /* No extra segments required for sends */
1950 switch (wr->opcode) {
1951 case IB_WR_RDMA_WRITE:
1952 case IB_WR_RDMA_WRITE_WITH_IMM:
1953 ((struct mthca_raddr_seg *) wqe)->raddr =
1954 cpu_to_be64(wr->wr.rdma.remote_addr);
1955 ((struct mthca_raddr_seg *) wqe)->rkey =
1956 cpu_to_be32(wr->wr.rdma.rkey);
1957 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1958 wqe += sizeof (struct mthca_raddr_seg);
1959 size += sizeof (struct mthca_raddr_seg) / 16;
1963 /* No extra segments required for sends */
1970 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
1971 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1972 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
1973 cpu_to_be32(wr->wr.ud.remote_qpn);
1974 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
1975 cpu_to_be32(wr->wr.ud.remote_qkey);
1977 wqe += sizeof (struct mthca_arbel_ud_seg);
1978 size += sizeof (struct mthca_arbel_ud_seg) / 16;
1982 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1983 wqe - sizeof (struct mthca_next_seg),
1989 wqe += sizeof (struct mthca_data_seg);
1990 size += sizeof (struct mthca_data_seg) / 16;
1994 if (wr->num_sge > qp->sq.max_gs) {
1995 mthca_err(dev, "too many gathers\n");
2001 for (i = 0; i < wr->num_sge; ++i) {
2002 ((struct mthca_data_seg *) wqe)->byte_count =
2003 cpu_to_be32(wr->sg_list[i].length);
2004 ((struct mthca_data_seg *) wqe)->lkey =
2005 cpu_to_be32(wr->sg_list[i].lkey);
2006 ((struct mthca_data_seg *) wqe)->addr =
2007 cpu_to_be64(wr->sg_list[i].addr);
2008 wqe += sizeof (struct mthca_data_seg);
2009 size += sizeof (struct mthca_data_seg) / 16;
2012 /* Add one more inline data segment for ICRC */
2013 if (qp->transport == MLX) {
2014 ((struct mthca_data_seg *) wqe)->byte_count =
2015 cpu_to_be32((1 << 31) | 4);
2016 ((u32 *) wqe)[1] = 0;
2017 wqe += sizeof (struct mthca_data_seg);
2018 size += sizeof (struct mthca_data_seg) / 16;
2021 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2023 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2024 mthca_err(dev, "opcode invalid\n");
2030 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2031 cpu_to_be32(((ind << qp->sq.wqe_shift) +
2032 qp->send_wqe_offset) |
2033 mthca_opcode[wr->opcode]);
2035 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2036 cpu_to_be32(MTHCA_NEXT_DBD | size |
2037 ((wr->send_flags & IB_SEND_FENCE) ?
2038 MTHCA_NEXT_FENCE : 0));
2042 op0 = mthca_opcode[wr->opcode];
2046 if (unlikely(ind >= qp->sq.max))
2052 doorbell[0] = cpu_to_be32((nreq << 24) |
2053 ((qp->sq.head & 0xffff) << 8) |
2055 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
2057 qp->sq.head += nreq;
2060 * Make sure that descriptors are written before
2064 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2067 * Make sure doorbell record is written before we
2068 * write MMIO send doorbell.
2071 mthca_write64(doorbell,
2072 dev->kar + MTHCA_SEND_DOORBELL,
2073 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2076 spin_unlock_irqrestore(&qp->sq.lock, flags);
2080 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2081 struct ib_recv_wr **bad_wr)
2083 struct mthca_dev *dev = to_mdev(ibqp->device);
2084 struct mthca_qp *qp = to_mqp(ibqp);
2085 unsigned long flags;
2092 spin_lock_irqsave(&qp->rq.lock, flags);
2094 /* XXX check that state is OK to post receive */
2096 ind = qp->rq.head & (qp->rq.max - 1);
2098 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2099 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2100 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2101 " %d max, %d nreq)\n", qp->qpn,
2102 qp->rq.head, qp->rq.tail,
2109 wqe = get_recv_wqe(qp, ind);
2111 ((struct mthca_next_seg *) wqe)->flags = 0;
2113 wqe += sizeof (struct mthca_next_seg);
2115 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2121 for (i = 0; i < wr->num_sge; ++i) {
2122 ((struct mthca_data_seg *) wqe)->byte_count =
2123 cpu_to_be32(wr->sg_list[i].length);
2124 ((struct mthca_data_seg *) wqe)->lkey =
2125 cpu_to_be32(wr->sg_list[i].lkey);
2126 ((struct mthca_data_seg *) wqe)->addr =
2127 cpu_to_be64(wr->sg_list[i].addr);
2128 wqe += sizeof (struct mthca_data_seg);
2131 if (i < qp->rq.max_gs) {
2132 ((struct mthca_data_seg *) wqe)->byte_count = 0;
2133 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
2134 ((struct mthca_data_seg *) wqe)->addr = 0;
2137 qp->wrid[ind] = wr->wr_id;
2140 if (unlikely(ind >= qp->rq.max))
2145 qp->rq.head += nreq;
2148 * Make sure that descriptors are written before
2152 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2155 spin_unlock_irqrestore(&qp->rq.lock, flags);
2159 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2160 int index, int *dbd, __be32 *new_wqe)
2162 struct mthca_next_seg *next;
2165 * For SRQs, all WQEs generate a CQE, so we're always at the
2166 * end of the doorbell chain.
2174 next = get_send_wqe(qp, index);
2176 next = get_recv_wqe(qp, index);
2178 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2179 if (next->ee_nds & cpu_to_be32(0x3f))
2180 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2181 (next->ee_nds & cpu_to_be32(0x3f));
2186 int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2192 spin_lock_init(&dev->qp_table.lock);
2195 * We reserve 2 extra QPs per port for the special QPs. The
2196 * special QP for port 1 has to be even, so round up.
2198 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2199 err = mthca_alloc_init(&dev->qp_table.alloc,
2200 dev->limits.num_qps,
2202 dev->qp_table.sqp_start +
2203 MTHCA_MAX_PORTS * 2);
2207 err = mthca_array_init(&dev->qp_table.qp,
2208 dev->limits.num_qps);
2210 mthca_alloc_cleanup(&dev->qp_table.alloc);
2214 for (i = 0; i < 2; ++i) {
2215 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2216 dev->qp_table.sqp_start + i * 2,
2221 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2222 "status %02x, aborting.\n",
2231 for (i = 0; i < 2; ++i)
2232 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2234 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2235 mthca_alloc_cleanup(&dev->qp_table.alloc);
2240 void mthca_cleanup_qp_table(struct mthca_dev *dev)
2245 for (i = 0; i < 2; ++i)
2246 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2248 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2249 mthca_alloc_cleanup(&dev->qp_table.alloc);