2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
35 #include <linux/init.h>
41 #include "mthca_dev.h"
42 #include "mthca_cmd.h"
43 #include "mthca_memfree.h"
46 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
47 MTHCA_ACK_REQ_FREQ = 10,
48 MTHCA_FLIGHT_LIMIT = 9,
49 MTHCA_UD_HEADER_SIZE = 72 /* largest UD header possible */
53 MTHCA_QP_STATE_RST = 0,
54 MTHCA_QP_STATE_INIT = 1,
55 MTHCA_QP_STATE_RTR = 2,
56 MTHCA_QP_STATE_RTS = 3,
57 MTHCA_QP_STATE_SQE = 4,
58 MTHCA_QP_STATE_SQD = 5,
59 MTHCA_QP_STATE_ERR = 6,
60 MTHCA_QP_STATE_DRAINING = 7
72 MTHCA_QP_PM_MIGRATED = 0x3,
73 MTHCA_QP_PM_ARMED = 0x0,
74 MTHCA_QP_PM_REARM = 0x1
78 /* qp_context flags */
79 MTHCA_QP_BIT_DE = 1 << 8,
81 MTHCA_QP_BIT_SRE = 1 << 15,
82 MTHCA_QP_BIT_SWE = 1 << 14,
83 MTHCA_QP_BIT_SAE = 1 << 13,
84 MTHCA_QP_BIT_SIC = 1 << 4,
85 MTHCA_QP_BIT_SSC = 1 << 3,
87 MTHCA_QP_BIT_RRE = 1 << 15,
88 MTHCA_QP_BIT_RWE = 1 << 14,
89 MTHCA_QP_BIT_RAE = 1 << 13,
90 MTHCA_QP_BIT_RIC = 1 << 4,
91 MTHCA_QP_BIT_RSC = 1 << 3
94 struct mthca_qp_path {
103 u32 sl_tclass_flowlabel;
105 } __attribute__((packed));
107 struct mthca_qp_context {
109 u32 tavor_sched_queue; /* Reserved on Arbel */
111 u8 rq_size_stride; /* Reserved on Tavor */
112 u8 sq_size_stride; /* Reserved on Tavor */
113 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
118 struct mthca_qp_path pri_path;
119 struct mthca_qp_path alt_path;
128 u32 snd_wqe_base_l; /* Next send WQE on Tavor */
129 u32 snd_db_index; /* (debugging only entries) */
136 u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
137 u32 rcv_db_index; /* (debugging only entries) */
141 u16 rq_wqe_counter; /* reserved on Tavor */
142 u16 sq_wqe_counter; /* reserved on Tavor */
144 } __attribute__((packed));
146 struct mthca_qp_param {
149 struct mthca_qp_context context;
151 } __attribute__((packed));
154 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
155 MTHCA_QP_OPTPAR_RRE = 1 << 1,
156 MTHCA_QP_OPTPAR_RAE = 1 << 2,
157 MTHCA_QP_OPTPAR_RWE = 1 << 3,
158 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
159 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
160 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
161 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
162 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
163 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
164 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
165 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
166 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
167 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
168 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
169 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
170 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
174 MTHCA_NEXT_DBD = 1 << 7,
175 MTHCA_NEXT_FENCE = 1 << 6,
176 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
177 MTHCA_NEXT_EVENT_GEN = 1 << 2,
178 MTHCA_NEXT_SOLICIT = 1 << 1,
180 MTHCA_MLX_VL15 = 1 << 17,
181 MTHCA_MLX_SLR = 1 << 16
185 MTHCA_INVAL_LKEY = 0x100
188 struct mthca_next_seg {
189 u32 nda_op; /* [31:6] next WQE [4:0] next opcode */
190 u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
191 u32 flags; /* [3] CQ [2] Event [1] Solicit */
192 u32 imm; /* immediate data */
195 struct mthca_tavor_ud_seg {
205 struct mthca_arbel_ud_seg {
212 struct mthca_bind_seg {
213 u32 flags; /* [31] Atomic [30] rem write [29] rem read */
221 struct mthca_raddr_seg {
227 struct mthca_atomic_seg {
232 struct mthca_data_seg {
238 struct mthca_mlx_seg {
241 u32 flags; /* [17] VL15 [16] SLR [14:12] static rate
242 [11:8] SL [3] C [2] E */
247 static const u8 mthca_opcode[] = {
248 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
249 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
250 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
251 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
252 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
253 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
254 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
257 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
259 return qp->qpn >= dev->qp_table.sqp_start &&
260 qp->qpn <= dev->qp_table.sqp_start + 3;
263 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
265 return qp->qpn >= dev->qp_table.sqp_start &&
266 qp->qpn <= dev->qp_table.sqp_start + 1;
269 static void *get_recv_wqe(struct mthca_qp *qp, int n)
272 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
274 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
275 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
278 static void *get_send_wqe(struct mthca_qp *qp, int n)
281 return qp->queue.direct.buf + qp->send_wqe_offset +
282 (n << qp->sq.wqe_shift);
284 return qp->queue.page_list[(qp->send_wqe_offset +
285 (n << qp->sq.wqe_shift)) >>
287 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
291 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
292 enum ib_event_type event_type)
295 struct ib_event event;
297 spin_lock(&dev->qp_table.lock);
298 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
300 atomic_inc(&qp->refcount);
301 spin_unlock(&dev->qp_table.lock);
304 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
308 event.device = &dev->ib_dev;
309 event.event = event_type;
310 event.element.qp = &qp->ibqp;
311 if (qp->ibqp.event_handler)
312 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
314 if (atomic_dec_and_test(&qp->refcount))
318 static int to_mthca_state(enum ib_qp_state ib_state)
321 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
322 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
323 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
324 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
325 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
326 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
327 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
332 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
334 static int to_mthca_st(int transport)
337 case RC: return MTHCA_QP_ST_RC;
338 case UC: return MTHCA_QP_ST_UC;
339 case UD: return MTHCA_QP_ST_UD;
340 case RD: return MTHCA_QP_ST_RD;
341 case MLX: return MTHCA_QP_ST_MLX;
346 static const struct {
348 u32 req_param[NUM_TRANS];
349 u32 opt_param[NUM_TRANS];
350 } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
352 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
353 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
355 .trans = MTHCA_TRANS_RST2INIT,
357 [UD] = (IB_QP_PKEY_INDEX |
360 [UC] = (IB_QP_PKEY_INDEX |
363 [RC] = (IB_QP_PKEY_INDEX |
366 [MLX] = (IB_QP_PKEY_INDEX |
369 /* bug-for-bug compatibility with VAPI: */
376 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
377 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
379 .trans = MTHCA_TRANS_INIT2INIT,
381 [UD] = (IB_QP_PKEY_INDEX |
384 [UC] = (IB_QP_PKEY_INDEX |
387 [RC] = (IB_QP_PKEY_INDEX |
390 [MLX] = (IB_QP_PKEY_INDEX |
395 .trans = MTHCA_TRANS_INIT2RTR,
401 IB_QP_MAX_DEST_RD_ATOMIC),
406 IB_QP_MAX_DEST_RD_ATOMIC |
407 IB_QP_MIN_RNR_TIMER),
410 [UD] = (IB_QP_PKEY_INDEX |
412 [UC] = (IB_QP_ALT_PATH |
415 [RC] = (IB_QP_ALT_PATH |
418 [MLX] = (IB_QP_PKEY_INDEX |
424 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
425 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
427 .trans = MTHCA_TRANS_RTR2RTS,
430 [UC] = (IB_QP_SQ_PSN |
431 IB_QP_MAX_QP_RD_ATOMIC),
432 [RC] = (IB_QP_TIMEOUT |
436 IB_QP_MAX_QP_RD_ATOMIC),
437 [MLX] = IB_QP_SQ_PSN,
440 [UD] = (IB_QP_CUR_STATE |
442 [UC] = (IB_QP_CUR_STATE |
446 IB_QP_PATH_MIG_STATE),
447 [RC] = (IB_QP_CUR_STATE |
451 IB_QP_MIN_RNR_TIMER |
452 IB_QP_PATH_MIG_STATE),
453 [MLX] = (IB_QP_CUR_STATE |
459 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
460 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
462 .trans = MTHCA_TRANS_RTS2RTS,
464 [UD] = (IB_QP_CUR_STATE |
466 [UC] = (IB_QP_ACCESS_FLAGS |
468 IB_QP_PATH_MIG_STATE),
469 [RC] = (IB_QP_ACCESS_FLAGS |
471 IB_QP_PATH_MIG_STATE |
472 IB_QP_MIN_RNR_TIMER),
473 [MLX] = (IB_QP_CUR_STATE |
478 .trans = MTHCA_TRANS_RTS2SQD,
482 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
483 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
485 .trans = MTHCA_TRANS_SQD2RTS,
487 [UD] = (IB_QP_CUR_STATE |
489 [UC] = (IB_QP_CUR_STATE |
492 IB_QP_PATH_MIG_STATE),
493 [RC] = (IB_QP_CUR_STATE |
496 IB_QP_MIN_RNR_TIMER |
497 IB_QP_PATH_MIG_STATE),
498 [MLX] = (IB_QP_CUR_STATE |
503 .trans = MTHCA_TRANS_SQD2SQD,
505 [UD] = (IB_QP_PKEY_INDEX |
508 IB_QP_MAX_QP_RD_ATOMIC |
509 IB_QP_MAX_DEST_RD_ATOMIC |
514 IB_QP_PATH_MIG_STATE),
519 IB_QP_MAX_QP_RD_ATOMIC |
520 IB_QP_MAX_DEST_RD_ATOMIC |
525 IB_QP_MIN_RNR_TIMER |
526 IB_QP_PATH_MIG_STATE),
527 [MLX] = (IB_QP_PKEY_INDEX |
533 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
534 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
536 .trans = MTHCA_TRANS_SQERR2RTS,
538 [UD] = (IB_QP_CUR_STATE |
540 [UC] = (IB_QP_CUR_STATE),
541 [RC] = (IB_QP_CUR_STATE |
542 IB_QP_MIN_RNR_TIMER),
543 [MLX] = (IB_QP_CUR_STATE |
549 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
550 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
554 static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
557 if (attr_mask & IB_QP_PKEY_INDEX)
558 sqp->pkey_index = attr->pkey_index;
559 if (attr_mask & IB_QP_QKEY)
560 sqp->qkey = attr->qkey;
561 if (attr_mask & IB_QP_SQ_PSN)
562 sqp->send_psn = attr->sq_psn;
565 static void init_port(struct mthca_dev *dev, int port)
569 struct mthca_init_ib_param param;
571 memset(¶m, 0, sizeof param);
575 param.vl_cap = dev->limits.vl_cap;
576 param.mtu_cap = dev->limits.mtu_cap;
577 param.gid_cap = dev->limits.gid_table_len;
578 param.pkey_cap = dev->limits.pkey_table_len;
580 err = mthca_INIT_IB(dev, ¶m, port, &status);
582 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
584 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
587 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
589 struct mthca_dev *dev = to_mdev(ibqp->device);
590 struct mthca_qp *qp = to_mqp(ibqp);
591 enum ib_qp_state cur_state, new_state;
592 struct mthca_mailbox *mailbox;
593 struct mthca_qp_param *qp_param;
594 struct mthca_qp_context *qp_context;
595 u32 req_param, opt_param;
599 if (attr_mask & IB_QP_CUR_STATE) {
600 if (attr->cur_qp_state != IB_QPS_RTR &&
601 attr->cur_qp_state != IB_QPS_RTS &&
602 attr->cur_qp_state != IB_QPS_SQD &&
603 attr->cur_qp_state != IB_QPS_SQE)
606 cur_state = attr->cur_qp_state;
608 spin_lock_irq(&qp->sq.lock);
609 spin_lock(&qp->rq.lock);
610 cur_state = qp->state;
611 spin_unlock(&qp->rq.lock);
612 spin_unlock_irq(&qp->sq.lock);
615 if (attr_mask & IB_QP_STATE) {
616 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
618 new_state = attr->qp_state;
620 new_state = cur_state;
622 if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
623 mthca_dbg(dev, "Illegal QP transition "
624 "%d->%d\n", cur_state, new_state);
628 req_param = state_table[cur_state][new_state].req_param[qp->transport];
629 opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
631 if ((req_param & attr_mask) != req_param) {
632 mthca_dbg(dev, "QP transition "
633 "%d->%d missing req attr 0x%08x\n",
634 cur_state, new_state,
635 req_param & ~attr_mask);
639 if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
640 mthca_dbg(dev, "QP transition (transport %d) "
641 "%d->%d has extra attr 0x%08x\n",
643 cur_state, new_state,
644 attr_mask & ~(req_param | opt_param |
649 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
651 return PTR_ERR(mailbox);
652 qp_param = mailbox->buf;
653 qp_context = &qp_param->context;
654 memset(qp_param, 0, sizeof *qp_param);
656 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
657 (to_mthca_st(qp->transport) << 16));
658 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
659 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
660 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
662 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
663 switch (attr->path_mig_state) {
664 case IB_MIG_MIGRATED:
665 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
668 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
671 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
676 /* leave tavor_sched_queue as 0 */
678 if (qp->transport == MLX || qp->transport == UD)
679 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
680 else if (attr_mask & IB_QP_PATH_MTU)
681 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
683 if (mthca_is_memfree(dev)) {
684 qp_context->rq_size_stride =
685 ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4);
686 qp_context->sq_size_stride =
687 ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4);
690 /* leave arbel_sched_queue as 0 */
692 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
693 qp_context->local_qpn = cpu_to_be32(qp->qpn);
694 if (attr_mask & IB_QP_DEST_QPN) {
695 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
698 if (qp->transport == MLX)
699 qp_context->pri_path.port_pkey |=
700 cpu_to_be32(to_msqp(qp)->port << 24);
702 if (attr_mask & IB_QP_PORT) {
703 qp_context->pri_path.port_pkey |=
704 cpu_to_be32(attr->port_num << 24);
705 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
709 if (attr_mask & IB_QP_PKEY_INDEX) {
710 qp_context->pri_path.port_pkey |=
711 cpu_to_be32(attr->pkey_index);
712 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
715 if (attr_mask & IB_QP_RNR_RETRY) {
716 qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
717 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
720 if (attr_mask & IB_QP_AV) {
721 qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
722 qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
723 qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
724 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
725 qp_context->pri_path.g_mylmc |= 1 << 7;
726 qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
727 qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
728 qp_context->pri_path.sl_tclass_flowlabel =
729 cpu_to_be32((attr->ah_attr.sl << 28) |
730 (attr->ah_attr.grh.traffic_class << 20) |
731 (attr->ah_attr.grh.flow_label));
732 memcpy(qp_context->pri_path.rgid,
733 attr->ah_attr.grh.dgid.raw, 16);
735 qp_context->pri_path.sl_tclass_flowlabel =
736 cpu_to_be32(attr->ah_attr.sl << 28);
738 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
741 if (attr_mask & IB_QP_TIMEOUT) {
742 qp_context->pri_path.ackto = attr->timeout;
743 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
749 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
750 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
751 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
752 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
753 (MTHCA_FLIGHT_LIMIT << 24) |
757 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
758 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
759 if (attr_mask & IB_QP_RETRY_CNT) {
760 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
761 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
764 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
765 qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
766 ffs(attr->max_rd_atomic) - 1 : 0,
768 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
771 if (attr_mask & IB_QP_SQ_PSN)
772 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
773 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
775 if (mthca_is_memfree(dev)) {
776 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
777 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
780 if (attr_mask & IB_QP_ACCESS_FLAGS) {
782 * Only enable RDMA/atomics if we have responder
783 * resources set to a non-zero value.
785 if (qp->resp_depth) {
786 qp_context->params2 |=
787 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
788 MTHCA_QP_BIT_RWE : 0);
789 qp_context->params2 |=
790 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
791 MTHCA_QP_BIT_RRE : 0);
792 qp_context->params2 |=
793 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
794 MTHCA_QP_BIT_RAE : 0);
797 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
798 MTHCA_QP_OPTPAR_RRE |
799 MTHCA_QP_OPTPAR_RAE);
801 qp->atomic_rd_en = attr->qp_access_flags;
804 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
807 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
809 * Lowering our responder resources to zero.
810 * Turn off RDMA/atomics as responder.
811 * (RWE/RRE/RAE in params2 already zero)
813 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
814 MTHCA_QP_OPTPAR_RRE |
815 MTHCA_QP_OPTPAR_RAE);
818 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
820 * Increasing our responder resources from
821 * zero. Turn on RDMA/atomics as appropriate.
823 qp_context->params2 |=
824 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
825 MTHCA_QP_BIT_RWE : 0);
826 qp_context->params2 |=
827 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
828 MTHCA_QP_BIT_RRE : 0);
829 qp_context->params2 |=
830 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
831 MTHCA_QP_BIT_RAE : 0);
833 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
834 MTHCA_QP_OPTPAR_RRE |
835 MTHCA_QP_OPTPAR_RAE);
839 1 << rra_max < attr->max_dest_rd_atomic &&
840 rra_max < dev->qp_table.rdb_shift;
844 qp_context->params2 |= cpu_to_be32(rra_max << 21);
845 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
847 qp->resp_depth = attr->max_dest_rd_atomic;
850 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
852 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
853 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
854 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
856 if (attr_mask & IB_QP_RQ_PSN)
857 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
859 qp_context->ra_buff_indx =
860 cpu_to_be32(dev->qp_table.rdb_base +
861 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
862 dev->qp_table.rdb_shift));
864 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
866 if (mthca_is_memfree(dev))
867 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
869 if (attr_mask & IB_QP_QKEY) {
870 qp_context->qkey = cpu_to_be32(attr->qkey);
871 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
874 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
875 qp->qpn, 0, mailbox, 0, &status);
877 mthca_warn(dev, "modify QP %d returned status %02x.\n",
878 state_table[cur_state][new_state].trans, status);
883 qp->state = new_state;
885 mthca_free_mailbox(dev, mailbox);
888 store_attrs(to_msqp(qp), attr, attr_mask);
891 * If we are moving QP0 to RTR, bring the IB link up; if we
892 * are moving QP0 to RESET or ERROR, bring the link back down.
894 if (is_qp0(dev, qp)) {
895 if (cur_state != IB_QPS_RTR &&
896 new_state == IB_QPS_RTR)
897 init_port(dev, to_msqp(qp)->port);
899 if (cur_state != IB_QPS_RESET &&
900 cur_state != IB_QPS_ERR &&
901 (new_state == IB_QPS_RESET ||
902 new_state == IB_QPS_ERR))
903 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
910 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
911 * rq.max_gs and sq.max_gs must all be assigned.
912 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
913 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
916 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
924 u64 *dma_list = NULL;
927 size = sizeof (struct mthca_next_seg) +
928 qp->rq.max_gs * sizeof (struct mthca_data_seg);
930 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
934 size = sizeof (struct mthca_next_seg) +
935 qp->sq.max_gs * sizeof (struct mthca_data_seg);
936 switch (qp->transport) {
938 size += 2 * sizeof (struct mthca_data_seg);
941 if (mthca_is_memfree(dev))
942 size += sizeof (struct mthca_arbel_ud_seg);
944 size += sizeof (struct mthca_tavor_ud_seg);
947 /* bind seg is as big as atomic + raddr segs */
948 size += sizeof (struct mthca_bind_seg);
951 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
955 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
956 1 << qp->sq.wqe_shift);
957 size = PAGE_ALIGN(qp->send_wqe_offset +
958 (qp->sq.max << qp->sq.wqe_shift));
960 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
965 if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {
968 shift = get_order(size) + PAGE_SHIFT;
971 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
974 qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
976 if (!qp->queue.direct.buf)
979 pci_unmap_addr_set(&qp->queue.direct, mapping, t);
981 memset(qp->queue.direct.buf, 0, size);
983 while (t & ((1 << shift) - 1)) {
988 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
992 for (i = 0; i < npages; ++i)
993 dma_list[i] = t + i * (1 << shift);
996 npages = size / PAGE_SIZE;
1000 mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
1002 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1006 qp->queue.page_list = kmalloc(npages *
1007 sizeof *qp->queue.page_list,
1009 if (!qp->queue.page_list)
1012 for (i = 0; i < npages; ++i) {
1013 qp->queue.page_list[i].buf =
1014 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
1016 if (!qp->queue.page_list[i].buf)
1019 memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
1021 pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
1026 err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
1028 MTHCA_MPT_FLAG_LOCAL_READ,
1037 if (qp->is_direct) {
1038 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1039 pci_unmap_addr(&qp->queue.direct, mapping));
1041 for (i = 0; i < npages; ++i) {
1042 if (qp->queue.page_list[i].buf)
1043 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1044 qp->queue.page_list[i].buf,
1045 pci_unmap_addr(&qp->queue.page_list[i],
1056 static int mthca_alloc_memfree(struct mthca_dev *dev,
1057 struct mthca_qp *qp)
1061 if (mthca_is_memfree(dev)) {
1062 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1066 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1070 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1071 qp->qpn << dev->qp_table.rdb_shift);
1075 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1076 qp->qpn, &qp->rq.db);
1077 if (qp->rq.db_index < 0) {
1082 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1083 qp->qpn, &qp->sq.db);
1084 if (qp->sq.db_index < 0) {
1093 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1096 mthca_table_put(dev, dev->qp_table.rdb_table,
1097 qp->qpn << dev->qp_table.rdb_shift);
1100 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1103 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1108 static void mthca_free_memfree(struct mthca_dev *dev,
1109 struct mthca_qp *qp)
1111 if (mthca_is_memfree(dev)) {
1112 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1113 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1116 mthca_table_put(dev, dev->qp_table.rdb_table,
1117 qp->qpn << dev->qp_table.rdb_shift);
1118 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1119 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1122 static void mthca_wq_init(struct mthca_wq* wq)
1124 spin_lock_init(&wq->lock);
1126 wq->last_comp = wq->max - 1;
1132 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1133 struct mthca_pd *pd,
1134 struct mthca_cq *send_cq,
1135 struct mthca_cq *recv_cq,
1136 enum ib_sig_type send_policy,
1137 struct mthca_qp *qp)
1142 atomic_set(&qp->refcount, 1);
1143 qp->state = IB_QPS_RESET;
1144 qp->atomic_rd_en = 0;
1146 qp->sq_policy = send_policy;
1147 mthca_wq_init(&qp->sq);
1148 mthca_wq_init(&qp->rq);
1150 ret = mthca_alloc_memfree(dev, qp);
1154 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1156 mthca_free_memfree(dev, qp);
1160 if (mthca_is_memfree(dev)) {
1161 struct mthca_next_seg *next;
1162 struct mthca_data_seg *scatter;
1163 int size = (sizeof (struct mthca_next_seg) +
1164 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1166 for (i = 0; i < qp->rq.max; ++i) {
1167 next = get_recv_wqe(qp, i);
1168 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1170 next->ee_nds = cpu_to_be32(size);
1172 for (scatter = (void *) (next + 1);
1173 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1175 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1178 for (i = 0; i < qp->sq.max; ++i) {
1179 next = get_send_wqe(qp, i);
1180 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1182 qp->send_wqe_offset);
1189 static void mthca_align_qp_size(struct mthca_dev *dev, struct mthca_qp *qp)
1193 if (!mthca_is_memfree(dev))
1196 for (i = 0; 1 << i < qp->rq.max; ++i)
1199 qp->rq.max = 1 << i;
1201 for (i = 0; 1 << i < qp->sq.max; ++i)
1204 qp->sq.max = 1 << i;
1207 int mthca_alloc_qp(struct mthca_dev *dev,
1208 struct mthca_pd *pd,
1209 struct mthca_cq *send_cq,
1210 struct mthca_cq *recv_cq,
1211 enum ib_qp_type type,
1212 enum ib_sig_type send_policy,
1213 struct mthca_qp *qp)
1217 mthca_align_qp_size(dev, qp);
1220 case IB_QPT_RC: qp->transport = RC; break;
1221 case IB_QPT_UC: qp->transport = UC; break;
1222 case IB_QPT_UD: qp->transport = UD; break;
1223 default: return -EINVAL;
1226 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1230 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1233 mthca_free(&dev->qp_table.alloc, qp->qpn);
1237 spin_lock_irq(&dev->qp_table.lock);
1238 mthca_array_set(&dev->qp_table.qp,
1239 qp->qpn & (dev->limits.num_qps - 1), qp);
1240 spin_unlock_irq(&dev->qp_table.lock);
1245 int mthca_alloc_sqp(struct mthca_dev *dev,
1246 struct mthca_pd *pd,
1247 struct mthca_cq *send_cq,
1248 struct mthca_cq *recv_cq,
1249 enum ib_sig_type send_policy,
1252 struct mthca_sqp *sqp)
1255 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1257 mthca_align_qp_size(dev, &sqp->qp);
1259 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1260 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1261 &sqp->header_dma, GFP_KERNEL);
1262 if (!sqp->header_buf)
1265 spin_lock_irq(&dev->qp_table.lock);
1266 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1269 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1270 spin_unlock_irq(&dev->qp_table.lock);
1277 sqp->qp.transport = MLX;
1279 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1280 send_policy, &sqp->qp);
1284 atomic_inc(&pd->sqp_count);
1290 * Lock CQs here, so that CQ polling code can do QP lookup
1291 * without taking a lock.
1293 spin_lock_irq(&send_cq->lock);
1294 if (send_cq != recv_cq)
1295 spin_lock(&recv_cq->lock);
1297 spin_lock(&dev->qp_table.lock);
1298 mthca_array_clear(&dev->qp_table.qp, mqpn);
1299 spin_unlock(&dev->qp_table.lock);
1301 if (send_cq != recv_cq)
1302 spin_unlock(&recv_cq->lock);
1303 spin_unlock_irq(&send_cq->lock);
1306 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1307 sqp->header_buf, sqp->header_dma);
1312 void mthca_free_qp(struct mthca_dev *dev,
1313 struct mthca_qp *qp)
1318 struct mthca_cq *send_cq;
1319 struct mthca_cq *recv_cq;
1321 send_cq = to_mcq(qp->ibqp.send_cq);
1322 recv_cq = to_mcq(qp->ibqp.recv_cq);
1325 * Lock CQs here, so that CQ polling code can do QP lookup
1326 * without taking a lock.
1328 spin_lock_irq(&send_cq->lock);
1329 if (send_cq != recv_cq)
1330 spin_lock(&recv_cq->lock);
1332 spin_lock(&dev->qp_table.lock);
1333 mthca_array_clear(&dev->qp_table.qp,
1334 qp->qpn & (dev->limits.num_qps - 1));
1335 spin_unlock(&dev->qp_table.lock);
1337 if (send_cq != recv_cq)
1338 spin_unlock(&recv_cq->lock);
1339 spin_unlock_irq(&send_cq->lock);
1341 atomic_dec(&qp->refcount);
1342 wait_event(qp->wait, !atomic_read(&qp->refcount));
1344 if (qp->state != IB_QPS_RESET)
1345 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
1347 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
1348 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1349 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
1351 mthca_free_mr(dev, &qp->mr);
1353 size = PAGE_ALIGN(qp->send_wqe_offset +
1354 (qp->sq.max << qp->sq.wqe_shift));
1356 if (qp->is_direct) {
1357 pci_free_consistent(dev->pdev, size,
1358 qp->queue.direct.buf,
1359 pci_unmap_addr(&qp->queue.direct, mapping));
1361 for (i = 0; i < size / PAGE_SIZE; ++i) {
1362 pci_free_consistent(dev->pdev, PAGE_SIZE,
1363 qp->queue.page_list[i].buf,
1364 pci_unmap_addr(&qp->queue.page_list[i],
1371 mthca_free_memfree(dev, qp);
1373 if (is_sqp(dev, qp)) {
1374 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1375 dma_free_coherent(&dev->pdev->dev,
1376 to_msqp(qp)->header_buf_size,
1377 to_msqp(qp)->header_buf,
1378 to_msqp(qp)->header_dma);
1380 mthca_free(&dev->qp_table.alloc, qp->qpn);
1383 /* Create UD header for an MLX send and build a data segment for it */
1384 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1385 int ind, struct ib_send_wr *wr,
1386 struct mthca_mlx_seg *mlx,
1387 struct mthca_data_seg *data)
1392 ib_ud_header_init(256, /* assume a MAD */
1393 sqp->ud_header.grh_present,
1396 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1399 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1400 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1401 (sqp->ud_header.lrh.destination_lid == 0xffff ?
1402 MTHCA_MLX_SLR : 0) |
1403 (sqp->ud_header.lrh.service_level << 8));
1404 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1407 switch (wr->opcode) {
1409 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1410 sqp->ud_header.immediate_present = 0;
1412 case IB_WR_SEND_WITH_IMM:
1413 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1414 sqp->ud_header.immediate_present = 1;
1415 sqp->ud_header.immediate_data = wr->imm_data;
1421 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1422 if (sqp->ud_header.lrh.destination_lid == 0xffff)
1423 sqp->ud_header.lrh.source_lid = 0xffff;
1424 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1425 if (!sqp->qp.ibqp.qp_num)
1426 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1428 &sqp->ud_header.bth.pkey);
1430 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1431 wr->wr.ud.pkey_index,
1432 &sqp->ud_header.bth.pkey);
1433 cpu_to_be16s(&sqp->ud_header.bth.pkey);
1434 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1435 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1436 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1437 sqp->qkey : wr->wr.ud.remote_qkey);
1438 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1440 header_size = ib_ud_header_pack(&sqp->ud_header,
1442 ind * MTHCA_UD_HEADER_SIZE);
1444 data->byte_count = cpu_to_be32(header_size);
1445 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1446 data->addr = cpu_to_be64(sqp->header_dma +
1447 ind * MTHCA_UD_HEADER_SIZE);
1452 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1453 struct ib_cq *ib_cq)
1456 struct mthca_cq *cq;
1458 cur = wq->head - wq->tail;
1459 if (likely(cur + nreq < wq->max))
1463 spin_lock(&cq->lock);
1464 cur = wq->head - wq->tail;
1465 spin_unlock(&cq->lock);
1467 return cur + nreq >= wq->max;
1470 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1471 struct ib_send_wr **bad_wr)
1473 struct mthca_dev *dev = to_mdev(ibqp->device);
1474 struct mthca_qp *qp = to_mqp(ibqp);
1477 unsigned long flags;
1487 spin_lock_irqsave(&qp->sq.lock, flags);
1489 /* XXX check that state is OK to post send */
1491 ind = qp->sq.next_ind;
1493 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1494 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1495 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1496 " %d max, %d nreq)\n", qp->qpn,
1497 qp->sq.head, qp->sq.tail,
1504 wqe = get_send_wqe(qp, ind);
1505 prev_wqe = qp->sq.last;
1508 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1509 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1510 ((struct mthca_next_seg *) wqe)->flags =
1511 ((wr->send_flags & IB_SEND_SIGNALED) ?
1512 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1513 ((wr->send_flags & IB_SEND_SOLICITED) ?
1514 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1516 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1517 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1518 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1520 wqe += sizeof (struct mthca_next_seg);
1521 size = sizeof (struct mthca_next_seg) / 16;
1523 switch (qp->transport) {
1525 switch (wr->opcode) {
1526 case IB_WR_ATOMIC_CMP_AND_SWP:
1527 case IB_WR_ATOMIC_FETCH_AND_ADD:
1528 ((struct mthca_raddr_seg *) wqe)->raddr =
1529 cpu_to_be64(wr->wr.atomic.remote_addr);
1530 ((struct mthca_raddr_seg *) wqe)->rkey =
1531 cpu_to_be32(wr->wr.atomic.rkey);
1532 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1534 wqe += sizeof (struct mthca_raddr_seg);
1536 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1537 ((struct mthca_atomic_seg *) wqe)->swap_add =
1538 cpu_to_be64(wr->wr.atomic.swap);
1539 ((struct mthca_atomic_seg *) wqe)->compare =
1540 cpu_to_be64(wr->wr.atomic.compare_add);
1542 ((struct mthca_atomic_seg *) wqe)->swap_add =
1543 cpu_to_be64(wr->wr.atomic.compare_add);
1544 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1547 wqe += sizeof (struct mthca_atomic_seg);
1548 size += sizeof (struct mthca_raddr_seg) / 16 +
1549 sizeof (struct mthca_atomic_seg);
1552 case IB_WR_RDMA_WRITE:
1553 case IB_WR_RDMA_WRITE_WITH_IMM:
1554 case IB_WR_RDMA_READ:
1555 ((struct mthca_raddr_seg *) wqe)->raddr =
1556 cpu_to_be64(wr->wr.rdma.remote_addr);
1557 ((struct mthca_raddr_seg *) wqe)->rkey =
1558 cpu_to_be32(wr->wr.rdma.rkey);
1559 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1560 wqe += sizeof (struct mthca_raddr_seg);
1561 size += sizeof (struct mthca_raddr_seg) / 16;
1565 /* No extra segments required for sends */
1572 switch (wr->opcode) {
1573 case IB_WR_RDMA_WRITE:
1574 case IB_WR_RDMA_WRITE_WITH_IMM:
1575 ((struct mthca_raddr_seg *) wqe)->raddr =
1576 cpu_to_be64(wr->wr.rdma.remote_addr);
1577 ((struct mthca_raddr_seg *) wqe)->rkey =
1578 cpu_to_be32(wr->wr.rdma.rkey);
1579 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1580 wqe += sizeof (struct mthca_raddr_seg);
1581 size += sizeof (struct mthca_raddr_seg) / 16;
1585 /* No extra segments required for sends */
1592 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1593 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1594 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1595 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1596 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1597 cpu_to_be32(wr->wr.ud.remote_qpn);
1598 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1599 cpu_to_be32(wr->wr.ud.remote_qkey);
1601 wqe += sizeof (struct mthca_tavor_ud_seg);
1602 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1606 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1607 wqe - sizeof (struct mthca_next_seg),
1613 wqe += sizeof (struct mthca_data_seg);
1614 size += sizeof (struct mthca_data_seg) / 16;
1618 if (wr->num_sge > qp->sq.max_gs) {
1619 mthca_err(dev, "too many gathers\n");
1625 for (i = 0; i < wr->num_sge; ++i) {
1626 ((struct mthca_data_seg *) wqe)->byte_count =
1627 cpu_to_be32(wr->sg_list[i].length);
1628 ((struct mthca_data_seg *) wqe)->lkey =
1629 cpu_to_be32(wr->sg_list[i].lkey);
1630 ((struct mthca_data_seg *) wqe)->addr =
1631 cpu_to_be64(wr->sg_list[i].addr);
1632 wqe += sizeof (struct mthca_data_seg);
1633 size += sizeof (struct mthca_data_seg) / 16;
1636 /* Add one more inline data segment for ICRC */
1637 if (qp->transport == MLX) {
1638 ((struct mthca_data_seg *) wqe)->byte_count =
1639 cpu_to_be32((1 << 31) | 4);
1640 ((u32 *) wqe)[1] = 0;
1641 wqe += sizeof (struct mthca_data_seg);
1642 size += sizeof (struct mthca_data_seg) / 16;
1645 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1647 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1648 mthca_err(dev, "opcode invalid\n");
1655 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1656 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1657 qp->send_wqe_offset) |
1658 mthca_opcode[wr->opcode]);
1660 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1661 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1666 op0 = mthca_opcode[wr->opcode];
1670 if (unlikely(ind >= qp->sq.max))
1678 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1679 qp->send_wqe_offset) | f0 | op0);
1680 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1684 mthca_write64(doorbell,
1685 dev->kar + MTHCA_SEND_DOORBELL,
1686 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1689 qp->sq.next_ind = ind;
1690 qp->sq.head += nreq;
1692 spin_unlock_irqrestore(&qp->sq.lock, flags);
1696 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1697 struct ib_recv_wr **bad_wr)
1699 struct mthca_dev *dev = to_mdev(ibqp->device);
1700 struct mthca_qp *qp = to_mqp(ibqp);
1701 unsigned long flags;
1711 spin_lock_irqsave(&qp->rq.lock, flags);
1713 /* XXX check that state is OK to post receive */
1715 ind = qp->rq.next_ind;
1717 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1718 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1719 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1720 " %d max, %d nreq)\n", qp->qpn,
1721 qp->rq.head, qp->rq.tail,
1728 wqe = get_recv_wqe(qp, ind);
1729 prev_wqe = qp->rq.last;
1732 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1733 ((struct mthca_next_seg *) wqe)->ee_nds =
1734 cpu_to_be32(MTHCA_NEXT_DBD);
1735 ((struct mthca_next_seg *) wqe)->flags = 0;
1737 wqe += sizeof (struct mthca_next_seg);
1738 size = sizeof (struct mthca_next_seg) / 16;
1740 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1746 for (i = 0; i < wr->num_sge; ++i) {
1747 ((struct mthca_data_seg *) wqe)->byte_count =
1748 cpu_to_be32(wr->sg_list[i].length);
1749 ((struct mthca_data_seg *) wqe)->lkey =
1750 cpu_to_be32(wr->sg_list[i].lkey);
1751 ((struct mthca_data_seg *) wqe)->addr =
1752 cpu_to_be64(wr->sg_list[i].addr);
1753 wqe += sizeof (struct mthca_data_seg);
1754 size += sizeof (struct mthca_data_seg) / 16;
1757 qp->wrid[ind] = wr->wr_id;
1759 if (likely(prev_wqe)) {
1760 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1761 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1763 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1764 cpu_to_be32(MTHCA_NEXT_DBD | size);
1771 if (unlikely(ind >= qp->rq.max))
1779 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1780 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1784 mthca_write64(doorbell,
1785 dev->kar + MTHCA_RECEIVE_DOORBELL,
1786 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1789 qp->rq.next_ind = ind;
1790 qp->rq.head += nreq;
1792 spin_unlock_irqrestore(&qp->rq.lock, flags);
1796 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1797 struct ib_send_wr **bad_wr)
1799 struct mthca_dev *dev = to_mdev(ibqp->device);
1800 struct mthca_qp *qp = to_mqp(ibqp);
1803 unsigned long flags;
1813 spin_lock_irqsave(&qp->sq.lock, flags);
1815 /* XXX check that state is OK to post send */
1817 ind = qp->sq.head & (qp->sq.max - 1);
1819 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1820 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1821 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1822 " %d max, %d nreq)\n", qp->qpn,
1823 qp->sq.head, qp->sq.tail,
1830 wqe = get_send_wqe(qp, ind);
1831 prev_wqe = qp->sq.last;
1834 ((struct mthca_next_seg *) wqe)->flags =
1835 ((wr->send_flags & IB_SEND_SIGNALED) ?
1836 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1837 ((wr->send_flags & IB_SEND_SOLICITED) ?
1838 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1840 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1841 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1842 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1844 wqe += sizeof (struct mthca_next_seg);
1845 size = sizeof (struct mthca_next_seg) / 16;
1847 switch (qp->transport) {
1849 switch (wr->opcode) {
1850 case IB_WR_ATOMIC_CMP_AND_SWP:
1851 case IB_WR_ATOMIC_FETCH_AND_ADD:
1852 ((struct mthca_raddr_seg *) wqe)->raddr =
1853 cpu_to_be64(wr->wr.atomic.remote_addr);
1854 ((struct mthca_raddr_seg *) wqe)->rkey =
1855 cpu_to_be32(wr->wr.atomic.rkey);
1856 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1858 wqe += sizeof (struct mthca_raddr_seg);
1860 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1861 ((struct mthca_atomic_seg *) wqe)->swap_add =
1862 cpu_to_be64(wr->wr.atomic.swap);
1863 ((struct mthca_atomic_seg *) wqe)->compare =
1864 cpu_to_be64(wr->wr.atomic.compare_add);
1866 ((struct mthca_atomic_seg *) wqe)->swap_add =
1867 cpu_to_be64(wr->wr.atomic.compare_add);
1868 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1871 wqe += sizeof (struct mthca_atomic_seg);
1872 size += sizeof (struct mthca_raddr_seg) / 16 +
1873 sizeof (struct mthca_atomic_seg);
1876 case IB_WR_RDMA_READ:
1877 case IB_WR_RDMA_WRITE:
1878 case IB_WR_RDMA_WRITE_WITH_IMM:
1879 ((struct mthca_raddr_seg *) wqe)->raddr =
1880 cpu_to_be64(wr->wr.rdma.remote_addr);
1881 ((struct mthca_raddr_seg *) wqe)->rkey =
1882 cpu_to_be32(wr->wr.rdma.rkey);
1883 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1884 wqe += sizeof (struct mthca_raddr_seg);
1885 size += sizeof (struct mthca_raddr_seg) / 16;
1889 /* No extra segments required for sends */
1896 switch (wr->opcode) {
1897 case IB_WR_RDMA_WRITE:
1898 case IB_WR_RDMA_WRITE_WITH_IMM:
1899 ((struct mthca_raddr_seg *) wqe)->raddr =
1900 cpu_to_be64(wr->wr.rdma.remote_addr);
1901 ((struct mthca_raddr_seg *) wqe)->rkey =
1902 cpu_to_be32(wr->wr.rdma.rkey);
1903 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1904 wqe += sizeof (struct mthca_raddr_seg);
1905 size += sizeof (struct mthca_raddr_seg) / 16;
1909 /* No extra segments required for sends */
1916 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
1917 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1918 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
1919 cpu_to_be32(wr->wr.ud.remote_qpn);
1920 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
1921 cpu_to_be32(wr->wr.ud.remote_qkey);
1923 wqe += sizeof (struct mthca_arbel_ud_seg);
1924 size += sizeof (struct mthca_arbel_ud_seg) / 16;
1928 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1929 wqe - sizeof (struct mthca_next_seg),
1935 wqe += sizeof (struct mthca_data_seg);
1936 size += sizeof (struct mthca_data_seg) / 16;
1940 if (wr->num_sge > qp->sq.max_gs) {
1941 mthca_err(dev, "too many gathers\n");
1947 for (i = 0; i < wr->num_sge; ++i) {
1948 ((struct mthca_data_seg *) wqe)->byte_count =
1949 cpu_to_be32(wr->sg_list[i].length);
1950 ((struct mthca_data_seg *) wqe)->lkey =
1951 cpu_to_be32(wr->sg_list[i].lkey);
1952 ((struct mthca_data_seg *) wqe)->addr =
1953 cpu_to_be64(wr->sg_list[i].addr);
1954 wqe += sizeof (struct mthca_data_seg);
1955 size += sizeof (struct mthca_data_seg) / 16;
1958 /* Add one more inline data segment for ICRC */
1959 if (qp->transport == MLX) {
1960 ((struct mthca_data_seg *) wqe)->byte_count =
1961 cpu_to_be32((1 << 31) | 4);
1962 ((u32 *) wqe)[1] = 0;
1963 wqe += sizeof (struct mthca_data_seg);
1964 size += sizeof (struct mthca_data_seg) / 16;
1967 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1969 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1970 mthca_err(dev, "opcode invalid\n");
1976 if (likely(prev_wqe)) {
1977 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1978 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1979 qp->send_wqe_offset) |
1980 mthca_opcode[wr->opcode]);
1982 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1983 cpu_to_be32(MTHCA_NEXT_DBD | size);
1988 op0 = mthca_opcode[wr->opcode];
1992 if (unlikely(ind >= qp->sq.max))
2000 doorbell[0] = cpu_to_be32((nreq << 24) |
2001 ((qp->sq.head & 0xffff) << 8) |
2003 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
2005 qp->sq.head += nreq;
2008 * Make sure that descriptors are written before
2012 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2015 * Make sure doorbell record is written before we
2016 * write MMIO send doorbell.
2019 mthca_write64(doorbell,
2020 dev->kar + MTHCA_SEND_DOORBELL,
2021 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2024 spin_unlock_irqrestore(&qp->sq.lock, flags);
2028 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2029 struct ib_recv_wr **bad_wr)
2031 struct mthca_dev *dev = to_mdev(ibqp->device);
2032 struct mthca_qp *qp = to_mqp(ibqp);
2033 unsigned long flags;
2040 spin_lock_irqsave(&qp->rq.lock, flags);
2042 /* XXX check that state is OK to post receive */
2044 ind = qp->rq.head & (qp->rq.max - 1);
2046 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2047 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2048 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2049 " %d max, %d nreq)\n", qp->qpn,
2050 qp->rq.head, qp->rq.tail,
2057 wqe = get_recv_wqe(qp, ind);
2059 ((struct mthca_next_seg *) wqe)->flags = 0;
2061 wqe += sizeof (struct mthca_next_seg);
2063 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2069 for (i = 0; i < wr->num_sge; ++i) {
2070 ((struct mthca_data_seg *) wqe)->byte_count =
2071 cpu_to_be32(wr->sg_list[i].length);
2072 ((struct mthca_data_seg *) wqe)->lkey =
2073 cpu_to_be32(wr->sg_list[i].lkey);
2074 ((struct mthca_data_seg *) wqe)->addr =
2075 cpu_to_be64(wr->sg_list[i].addr);
2076 wqe += sizeof (struct mthca_data_seg);
2079 if (i < qp->rq.max_gs) {
2080 ((struct mthca_data_seg *) wqe)->byte_count = 0;
2081 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
2082 ((struct mthca_data_seg *) wqe)->addr = 0;
2085 qp->wrid[ind] = wr->wr_id;
2088 if (unlikely(ind >= qp->rq.max))
2093 qp->rq.head += nreq;
2096 * Make sure that descriptors are written before
2100 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2103 spin_unlock_irqrestore(&qp->rq.lock, flags);
2107 int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2108 int index, int *dbd, u32 *new_wqe)
2110 struct mthca_next_seg *next;
2113 next = get_send_wqe(qp, index);
2115 next = get_recv_wqe(qp, index);
2117 if (mthca_is_memfree(dev))
2120 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2121 if (next->ee_nds & cpu_to_be32(0x3f))
2122 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2123 (next->ee_nds & cpu_to_be32(0x3f));
2130 int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2136 spin_lock_init(&dev->qp_table.lock);
2139 * We reserve 2 extra QPs per port for the special QPs. The
2140 * special QP for port 1 has to be even, so round up.
2142 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2143 err = mthca_alloc_init(&dev->qp_table.alloc,
2144 dev->limits.num_qps,
2146 dev->qp_table.sqp_start +
2147 MTHCA_MAX_PORTS * 2);
2151 err = mthca_array_init(&dev->qp_table.qp,
2152 dev->limits.num_qps);
2154 mthca_alloc_cleanup(&dev->qp_table.alloc);
2158 for (i = 0; i < 2; ++i) {
2159 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2160 dev->qp_table.sqp_start + i * 2,
2165 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2166 "status %02x, aborting.\n",
2175 for (i = 0; i < 2; ++i)
2176 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2178 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2179 mthca_alloc_cleanup(&dev->qp_table.alloc);
2184 void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2189 for (i = 0; i < 2; ++i)
2190 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2192 mthca_alloc_cleanup(&dev->qp_table.alloc);