2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ipath_kernel.h"
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_UC_##x
41 * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
44 * Return 1 if constructed; otherwise, return 0.
46 int ipath_make_uc_req(struct ipath_qp *qp)
48 struct ipath_other_headers *ohdr;
49 struct ipath_swqe *wqe;
53 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
56 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
59 ohdr = &qp->s_hdr.u.oth;
60 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
61 ohdr = &qp->s_hdr.u.l.oth;
63 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
65 bth0 = 1 << 22; /* Set M bit */
67 /* Get the next send request. */
68 wqe = get_swqe_ptr(qp, qp->s_cur);
70 switch (qp->s_state) {
72 /* Check if send work queue is empty. */
73 if (qp->s_cur == qp->s_head)
76 * Start a new request.
78 qp->s_psn = wqe->psn = qp->s_next_psn;
79 qp->s_sge.sge = wqe->sg_list[0];
80 qp->s_sge.sg_list = wqe->sg_list + 1;
81 qp->s_sge.num_sge = wqe->wr.num_sge;
82 qp->s_len = len = wqe->length;
83 switch (wqe->wr.opcode) {
85 case IB_WR_SEND_WITH_IMM:
87 qp->s_state = OP(SEND_FIRST);
91 if (wqe->wr.opcode == IB_WR_SEND)
92 qp->s_state = OP(SEND_ONLY);
95 OP(SEND_ONLY_WITH_IMMEDIATE);
96 /* Immediate data comes after the BTH */
97 ohdr->u.imm_data = wqe->wr.ex.imm_data;
100 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
103 if (++qp->s_cur >= qp->s_size)
107 case IB_WR_RDMA_WRITE:
108 case IB_WR_RDMA_WRITE_WITH_IMM:
109 ohdr->u.rc.reth.vaddr =
110 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
111 ohdr->u.rc.reth.rkey =
112 cpu_to_be32(wqe->wr.wr.rdma.rkey);
113 ohdr->u.rc.reth.length = cpu_to_be32(len);
114 hwords += sizeof(struct ib_reth) / 4;
116 qp->s_state = OP(RDMA_WRITE_FIRST);
120 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
121 qp->s_state = OP(RDMA_WRITE_ONLY);
124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
125 /* Immediate data comes after the RETH */
126 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
128 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
132 if (++qp->s_cur >= qp->s_size)
142 qp->s_state = OP(SEND_MIDDLE);
144 case OP(SEND_MIDDLE):
150 if (wqe->wr.opcode == IB_WR_SEND)
151 qp->s_state = OP(SEND_LAST);
153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
154 /* Immediate data comes after the BTH */
155 ohdr->u.imm_data = wqe->wr.ex.imm_data;
158 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
161 if (++qp->s_cur >= qp->s_size)
165 case OP(RDMA_WRITE_FIRST):
166 qp->s_state = OP(RDMA_WRITE_MIDDLE);
168 case OP(RDMA_WRITE_MIDDLE):
174 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
175 qp->s_state = OP(RDMA_WRITE_LAST);
178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
179 /* Immediate data comes after the BTH */
180 ohdr->u.imm_data = wqe->wr.ex.imm_data;
182 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
186 if (++qp->s_cur >= qp->s_size)
191 qp->s_hdrwords = hwords;
192 qp->s_cur_sge = &qp->s_sge;
193 qp->s_cur_size = len;
194 ipath_make_ruc_header(to_idev(qp->ibqp.device),
195 qp, ohdr, bth0 | (qp->s_state << 24),
196 qp->s_next_psn++ & IPATH_PSN_MASK);
204 * ipath_uc_rcv - handle an incoming UC packet
205 * @dev: the device the packet came in on
206 * @hdr: the header of the packet
207 * @has_grh: true if the packet has a GRH
208 * @data: the packet data
209 * @tlen: the length of the packet
210 * @qp: the QP for this packet.
212 * This is called from ipath_qp_rcv() to process an incoming UC packet
214 * Called at interrupt level.
216 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
217 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
219 struct ipath_other_headers *ohdr;
225 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
226 struct ib_reth *reth;
229 /* Validate the SLID. See Ch. 9.6.1.5 */
230 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
236 hdrsize = 8 + 12; /* LRH + BTH */
237 psn = be32_to_cpu(ohdr->bth[2]);
240 ohdr = &hdr->u.l.oth;
241 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
243 * The header with GRH is 60 bytes and the
244 * core driver sets the eager header buffer
245 * size to 56 bytes so the last 4 bytes of
246 * the BTH header (PSN) is in the data buffer.
248 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
249 if (header_in_data) {
250 psn = be32_to_cpu(((__be32 *) data)[0]);
251 data += sizeof(__be32);
253 psn = be32_to_cpu(ohdr->bth[2]);
256 * The opcode is in the low byte when its in network order
257 * (top byte when in host order).
259 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
264 /* Compare the PSN verses the expected PSN. */
265 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
267 * Handle a sequence error.
268 * Silently drop any current message.
272 qp->r_state = OP(SEND_LAST);
276 case OP(SEND_ONLY_WITH_IMMEDIATE):
279 case OP(RDMA_WRITE_FIRST):
280 case OP(RDMA_WRITE_ONLY):
281 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
290 /* Check for opcode sequence errors. */
291 switch (qp->r_state) {
293 case OP(SEND_MIDDLE):
294 if (opcode == OP(SEND_MIDDLE) ||
295 opcode == OP(SEND_LAST) ||
296 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
300 case OP(RDMA_WRITE_FIRST):
301 case OP(RDMA_WRITE_MIDDLE):
302 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
303 opcode == OP(RDMA_WRITE_LAST) ||
304 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
309 if (opcode == OP(SEND_FIRST) ||
310 opcode == OP(SEND_ONLY) ||
311 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
312 opcode == OP(RDMA_WRITE_FIRST) ||
313 opcode == OP(RDMA_WRITE_ONLY) ||
314 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
319 /* OK, process the packet. */
323 case OP(SEND_ONLY_WITH_IMMEDIATE):
325 if (qp->r_reuse_sge) {
327 qp->r_sge = qp->s_rdma_read_sge;
328 } else if (!ipath_get_rwqe(qp, 0)) {
332 /* Save the WQE so we can reuse it in case of an error. */
333 qp->s_rdma_read_sge = qp->r_sge;
335 if (opcode == OP(SEND_ONLY))
337 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
340 case OP(SEND_MIDDLE):
341 /* Check for invalid length PMTU or posted rwqe len. */
342 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
347 qp->r_rcv_len += pmtu;
348 if (unlikely(qp->r_rcv_len > qp->r_len)) {
353 ipath_copy_sge(&qp->r_sge, data, pmtu);
356 case OP(SEND_LAST_WITH_IMMEDIATE):
358 if (header_in_data) {
359 wc.imm_data = *(__be32 *) data;
360 data += sizeof(__be32);
362 /* Immediate data comes after BTH */
363 wc.imm_data = ohdr->u.imm_data;
366 wc.wc_flags = IB_WC_WITH_IMM;
370 /* Get the number of bytes the message was padded by. */
371 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
372 /* Check for invalid length. */
373 /* XXX LAST len should be >= 1 */
374 if (unlikely(tlen < (hdrsize + pad + 4))) {
379 /* Don't count the CRC. */
380 tlen -= (hdrsize + pad + 4);
381 wc.byte_len = tlen + qp->r_rcv_len;
382 if (unlikely(wc.byte_len > qp->r_len)) {
387 /* XXX Need to free SGEs */
389 ipath_copy_sge(&qp->r_sge, data, tlen);
390 wc.wr_id = qp->r_wr_id;
391 wc.status = IB_WC_SUCCESS;
392 wc.opcode = IB_WC_RECV;
395 wc.src_qp = qp->remote_qpn;
397 wc.slid = qp->remote_ah_attr.dlid;
398 wc.sl = qp->remote_ah_attr.sl;
399 wc.dlid_path_bits = 0;
401 /* Signal completion event if the solicited bit is set. */
402 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
404 __constant_cpu_to_be32(1 << 23)) != 0);
407 case OP(RDMA_WRITE_FIRST):
408 case OP(RDMA_WRITE_ONLY):
409 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
411 /* RETH comes after BTH */
413 reth = &ohdr->u.rc.reth;
415 reth = (struct ib_reth *)data;
416 data += sizeof(*reth);
418 hdrsize += sizeof(*reth);
419 qp->r_len = be32_to_cpu(reth->length);
421 if (qp->r_len != 0) {
422 u32 rkey = be32_to_cpu(reth->rkey);
423 u64 vaddr = be64_to_cpu(reth->vaddr);
427 ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
429 IB_ACCESS_REMOTE_WRITE);
435 qp->r_sge.sg_list = NULL;
436 qp->r_sge.sge.mr = NULL;
437 qp->r_sge.sge.vaddr = NULL;
438 qp->r_sge.sge.length = 0;
439 qp->r_sge.sge.sge_length = 0;
441 if (unlikely(!(qp->qp_access_flags &
442 IB_ACCESS_REMOTE_WRITE))) {
446 if (opcode == OP(RDMA_WRITE_ONLY))
448 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
451 case OP(RDMA_WRITE_MIDDLE):
452 /* Check for invalid length PMTU or posted rwqe len. */
453 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
457 qp->r_rcv_len += pmtu;
458 if (unlikely(qp->r_rcv_len > qp->r_len)) {
462 ipath_copy_sge(&qp->r_sge, data, pmtu);
465 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
467 if (header_in_data) {
468 wc.imm_data = *(__be32 *) data;
469 data += sizeof(__be32);
471 /* Immediate data comes after BTH */
472 wc.imm_data = ohdr->u.imm_data;
475 wc.wc_flags = IB_WC_WITH_IMM;
477 /* Get the number of bytes the message was padded by. */
478 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
479 /* Check for invalid length. */
480 /* XXX LAST len should be >= 1 */
481 if (unlikely(tlen < (hdrsize + pad + 4))) {
485 /* Don't count the CRC. */
486 tlen -= (hdrsize + pad + 4);
487 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
493 else if (!ipath_get_rwqe(qp, 1)) {
497 wc.byte_len = qp->r_len;
500 case OP(RDMA_WRITE_LAST):
502 /* Get the number of bytes the message was padded by. */
503 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
504 /* Check for invalid length. */
505 /* XXX LAST len should be >= 1 */
506 if (unlikely(tlen < (hdrsize + pad + 4))) {
510 /* Don't count the CRC. */
511 tlen -= (hdrsize + pad + 4);
512 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
516 ipath_copy_sge(&qp->r_sge, data, tlen);
520 /* Drop packet for unknown opcodes. */
525 qp->r_state = opcode;