2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ipath_kernel.h"
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_RC_##x
41 * ipath_init_restart- initialize the qp->s_sge after a restart
42 * @qp: the QP who's SGE we're restarting
43 * @wqe: the work queue to initialize the QP's SGE from
45 * The QP s_lock should be held and interrupts disabled.
47 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
49 struct ipath_ibdev *dev;
52 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) *
53 ib_mtu_enum_to_int(qp->path_mtu);
54 qp->s_sge.sge = wqe->sg_list[0];
55 qp->s_sge.sg_list = wqe->sg_list + 1;
56 qp->s_sge.num_sge = wqe->wr.num_sge;
57 ipath_skip_sge(&qp->s_sge, len);
58 qp->s_len = wqe->length - len;
59 dev = to_idev(qp->ibqp.device);
60 spin_lock(&dev->pending_lock);
61 if (list_empty(&qp->timerwait))
62 list_add_tail(&qp->timerwait,
63 &dev->pending[dev->pending_index]);
64 spin_unlock(&dev->pending_lock);
68 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
73 * Return bth0 if constructed; otherwise, return 0.
74 * Note the QP s_lock must be held.
76 u32 ipath_make_rc_ack(struct ipath_qp *qp,
77 struct ipath_other_headers *ohdr,
84 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
88 * Send a response. Note that we are in the responder's
89 * side of the QP context.
91 switch (qp->s_ack_state) {
92 case OP(RDMA_READ_REQUEST):
93 qp->s_cur_sge = &qp->s_rdma_sge;
97 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
99 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
100 qp->s_rdma_len -= len;
101 bth0 = qp->s_ack_state << 24;
102 ohdr->u.aeth = ipath_compute_aeth(qp);
106 case OP(RDMA_READ_RESPONSE_FIRST):
107 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
109 case OP(RDMA_READ_RESPONSE_MIDDLE):
110 qp->s_cur_sge = &qp->s_rdma_sge;
111 len = qp->s_rdma_len;
115 ohdr->u.aeth = ipath_compute_aeth(qp);
117 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
119 qp->s_rdma_len -= len;
120 bth0 = qp->s_ack_state << 24;
123 case OP(RDMA_READ_RESPONSE_LAST):
124 case OP(RDMA_READ_RESPONSE_ONLY):
126 * We have to prevent new requests from changing
127 * the r_sge state while a ipath_verbs_send()
130 qp->s_ack_state = OP(ACKNOWLEDGE);
134 case OP(COMPARE_SWAP):
136 qp->s_cur_sge = NULL;
139 * Set the s_ack_state so the receive interrupt handler
140 * won't try to send an ACK (out of order) until this one
143 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
144 bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24;
145 ohdr->u.at.aeth = ipath_compute_aeth(qp);
146 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
147 hwords += sizeof(ohdr->u.at) / 4;
151 /* Send a regular ACK. */
152 qp->s_cur_sge = NULL;
155 * Set the s_ack_state so the receive interrupt handler
156 * won't try to send an ACK (out of order) until this one
159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
160 bth0 = OP(ACKNOWLEDGE) << 24;
162 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
164 IPATH_AETH_CREDIT_SHIFT));
166 ohdr->u.aeth = ipath_compute_aeth(qp);
169 qp->s_hdrwords = hwords;
170 qp->s_cur_size = len;
177 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
178 * @qp: a pointer to the QP
179 * @ohdr: a pointer to the IB header being constructed
180 * @pmtu: the path MTU
181 * @bth0p: pointer to the BTH opcode word
182 * @bth2p: pointer to the BTH PSN word
184 * Return 1 if constructed; otherwise, return 0.
185 * Note the QP s_lock must be held and interrupts disabled.
187 int ipath_make_rc_req(struct ipath_qp *qp,
188 struct ipath_other_headers *ohdr,
189 u32 pmtu, u32 *bth0p, u32 *bth2p)
191 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
192 struct ipath_sge_state *ss;
193 struct ipath_swqe *wqe;
200 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
204 /* Limit the number of packets sent without an ACK. */
205 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
206 qp->s_wait_credit = 1;
208 spin_lock(&dev->pending_lock);
209 if (list_empty(&qp->timerwait))
210 list_add_tail(&qp->timerwait,
211 &dev->pending[dev->pending_index]);
212 spin_unlock(&dev->pending_lock);
216 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
220 /* Send a request. */
221 wqe = get_swqe_ptr(qp, qp->s_cur);
222 switch (qp->s_state) {
225 * Resend an old request or start a new one.
227 * We keep track of the current SWQE so that
228 * we don't reset the "furthest progress" state
229 * if we need to back up.
232 if (qp->s_cur == qp->s_tail) {
233 /* Check if send work queue is empty. */
234 if (qp->s_tail == qp->s_head)
236 wqe->psn = qp->s_next_psn;
240 * Note that we have to be careful not to modify the
241 * original work request since we may need to resend
244 qp->s_sge.sge = wqe->sg_list[0];
245 qp->s_sge.sg_list = wqe->sg_list + 1;
246 qp->s_sge.num_sge = wqe->wr.num_sge;
247 qp->s_len = len = wqe->length;
250 switch (wqe->wr.opcode) {
252 case IB_WR_SEND_WITH_IMM:
253 /* If no credit, return. */
254 if (qp->s_lsn != (u32) -1 &&
255 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
257 wqe->lpsn = wqe->psn;
259 wqe->lpsn += (len - 1) / pmtu;
260 qp->s_state = OP(SEND_FIRST);
264 if (wqe->wr.opcode == IB_WR_SEND)
265 qp->s_state = OP(SEND_ONLY);
267 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
268 /* Immediate data comes after the BTH */
269 ohdr->u.imm_data = wqe->wr.imm_data;
272 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
274 bth2 = 1 << 31; /* Request ACK. */
275 if (++qp->s_cur == qp->s_size)
279 case IB_WR_RDMA_WRITE:
280 if (newreq && qp->s_lsn != (u32) -1)
283 case IB_WR_RDMA_WRITE_WITH_IMM:
284 /* If no credit, return. */
285 if (qp->s_lsn != (u32) -1 &&
286 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
288 ohdr->u.rc.reth.vaddr =
289 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
290 ohdr->u.rc.reth.rkey =
291 cpu_to_be32(wqe->wr.wr.rdma.rkey);
292 ohdr->u.rc.reth.length = cpu_to_be32(len);
293 hwords += sizeof(struct ib_reth) / 4;
294 wqe->lpsn = wqe->psn;
296 wqe->lpsn += (len - 1) / pmtu;
297 qp->s_state = OP(RDMA_WRITE_FIRST);
301 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
302 qp->s_state = OP(RDMA_WRITE_ONLY);
305 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
306 /* Immediate data comes after RETH */
307 ohdr->u.rc.imm_data = wqe->wr.imm_data;
309 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
312 bth2 = 1 << 31; /* Request ACK. */
313 if (++qp->s_cur == qp->s_size)
317 case IB_WR_RDMA_READ:
318 ohdr->u.rc.reth.vaddr =
319 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
320 ohdr->u.rc.reth.rkey =
321 cpu_to_be32(wqe->wr.wr.rdma.rkey);
322 ohdr->u.rc.reth.length = cpu_to_be32(len);
323 qp->s_state = OP(RDMA_READ_REQUEST);
324 hwords += sizeof(ohdr->u.rc.reth) / 4;
326 if (qp->s_lsn != (u32) -1)
329 * Adjust s_next_psn to count the
330 * expected number of responses.
333 qp->s_next_psn += (len - 1) / pmtu;
334 wqe->lpsn = qp->s_next_psn++;
338 if (++qp->s_cur == qp->s_size)
342 case IB_WR_ATOMIC_CMP_AND_SWP:
343 case IB_WR_ATOMIC_FETCH_AND_ADD:
344 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
345 qp->s_state = OP(COMPARE_SWAP);
347 qp->s_state = OP(FETCH_ADD);
348 ohdr->u.atomic_eth.vaddr = cpu_to_be64(
349 wqe->wr.wr.atomic.remote_addr);
350 ohdr->u.atomic_eth.rkey = cpu_to_be32(
351 wqe->wr.wr.atomic.rkey);
352 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
353 wqe->wr.wr.atomic.swap);
354 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
355 wqe->wr.wr.atomic.compare_add);
356 hwords += sizeof(struct ib_atomic_eth) / 4;
358 if (qp->s_lsn != (u32) -1)
360 wqe->lpsn = wqe->psn;
362 if (++qp->s_cur == qp->s_size)
373 if (qp->s_tail >= qp->s_size)
376 bth2 |= qp->s_psn++ & IPATH_PSN_MASK;
377 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
378 qp->s_next_psn = qp->s_psn;
380 * Put the QP on the pending list so lost ACKs will cause
381 * a retry. More than one request can be pending so the
382 * QP may already be on the dev->pending list.
384 spin_lock(&dev->pending_lock);
385 if (list_empty(&qp->timerwait))
386 list_add_tail(&qp->timerwait,
387 &dev->pending[dev->pending_index]);
388 spin_unlock(&dev->pending_lock);
391 case OP(RDMA_READ_RESPONSE_FIRST):
393 * This case can only happen if a send is restarted.
394 * See ipath_restart_rc().
396 ipath_init_restart(qp, wqe);
399 qp->s_state = OP(SEND_MIDDLE);
401 case OP(SEND_MIDDLE):
402 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
403 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
404 qp->s_next_psn = qp->s_psn;
411 if (wqe->wr.opcode == IB_WR_SEND)
412 qp->s_state = OP(SEND_LAST);
414 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
415 /* Immediate data comes after the BTH */
416 ohdr->u.imm_data = wqe->wr.imm_data;
419 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
421 bth2 |= 1 << 31; /* Request ACK. */
423 if (qp->s_cur >= qp->s_size)
427 case OP(RDMA_READ_RESPONSE_LAST):
429 * This case can only happen if a RDMA write is restarted.
430 * See ipath_restart_rc().
432 ipath_init_restart(qp, wqe);
434 case OP(RDMA_WRITE_FIRST):
435 qp->s_state = OP(RDMA_WRITE_MIDDLE);
437 case OP(RDMA_WRITE_MIDDLE):
438 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
439 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
440 qp->s_next_psn = qp->s_psn;
447 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
448 qp->s_state = OP(RDMA_WRITE_LAST);
450 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
451 /* Immediate data comes after the BTH */
452 ohdr->u.imm_data = wqe->wr.imm_data;
454 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
457 bth2 |= 1 << 31; /* Request ACK. */
459 if (qp->s_cur >= qp->s_size)
463 case OP(RDMA_READ_RESPONSE_MIDDLE):
465 * This case can only happen if a RDMA read is restarted.
466 * See ipath_restart_rc().
468 ipath_init_restart(qp, wqe);
469 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
470 ohdr->u.rc.reth.vaddr =
471 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
472 ohdr->u.rc.reth.rkey =
473 cpu_to_be32(wqe->wr.wr.rdma.rkey);
474 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
475 qp->s_state = OP(RDMA_READ_REQUEST);
476 hwords += sizeof(ohdr->u.rc.reth) / 4;
477 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
478 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
479 qp->s_next_psn = qp->s_psn;
483 if (qp->s_cur == qp->s_size)
487 case OP(RDMA_READ_REQUEST):
488 case OP(COMPARE_SWAP):
491 * We shouldn't start anything new until this request is
492 * finished. The ACK will handle rescheduling us. XXX The
493 * number of outstanding ones is negotiated at connection
494 * setup time (see pg. 258,289)? XXX Also, if we support
495 * multiple outstanding requests, we need to check the WQE
496 * IB_SEND_FENCE flag and not send a new request if a RDMA
497 * read or atomic is pending.
501 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
502 bth2 |= 1 << 31; /* Request ACK. */
504 qp->s_hdrwords = hwords;
506 qp->s_cur_size = len;
507 *bth0p = bth0 | (qp->s_state << 24);
516 * send_rc_ack - Construct an ACK packet and send it
517 * @qp: a pointer to the QP
519 * This is called from ipath_rc_rcv() and only uses the receive
521 * Note that RDMA reads are handled in the send side QP state and tasklet.
523 static void send_rc_ack(struct ipath_qp *qp)
525 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
529 struct ipath_ib_header hdr;
530 struct ipath_other_headers *ohdr;
532 /* Construct the header. */
534 lrh0 = IPATH_LRH_BTH;
535 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
537 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
538 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
539 &qp->remote_ah_attr.grh,
542 lrh0 = IPATH_LRH_GRH;
544 /* read pkey_index w/o lock (its atomic) */
545 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index);
547 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
549 IPATH_AETH_CREDIT_SHIFT));
551 ohdr->u.aeth = ipath_compute_aeth(qp);
552 if (qp->r_ack_state >= OP(COMPARE_SWAP)) {
553 bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24;
554 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
555 hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
557 bth0 |= OP(ACKNOWLEDGE) << 24;
558 lrh0 |= qp->remote_ah_attr.sl << 4;
559 hdr.lrh[0] = cpu_to_be16(lrh0);
560 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
561 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
562 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
563 ohdr->bth[0] = cpu_to_be32(bth0);
564 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
565 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
568 * If we can send the ACK, clear the ACK state.
570 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
571 qp->r_ack_state = OP(ACKNOWLEDGE);
572 dev->n_unicast_xmit++;
575 * We are out of PIO buffers at the moment.
576 * Pass responsibility for sending the ACK to the
577 * send tasklet so that when a PIO buffer becomes
578 * available, the ACK is sent ahead of other outgoing
582 spin_lock_irq(&qp->s_lock);
583 /* Don't coalesce if a RDMA read or atomic is pending. */
584 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
585 qp->s_ack_state < OP(RDMA_READ_REQUEST)) {
586 qp->s_ack_state = qp->r_ack_state;
587 qp->s_nak_state = qp->r_nak_state;
588 qp->s_ack_psn = qp->r_ack_psn;
589 qp->r_ack_state = OP(ACKNOWLEDGE);
591 spin_unlock_irq(&qp->s_lock);
593 /* Call ipath_do_rc_send() in another thread. */
594 tasklet_hi_schedule(&qp->s_task);
599 * reset_psn - reset the QP state to send starting from PSN
601 * @psn: the packet sequence number to restart at
603 * This is called from ipath_rc_rcv() to process an incoming RC ACK
605 * Called at interrupt level with the QP s_lock held.
607 static void reset_psn(struct ipath_qp *qp, u32 psn)
610 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
616 * If we are starting the request from the beginning,
617 * let the normal send code handle initialization.
619 if (ipath_cmp24(psn, wqe->psn) <= 0) {
620 qp->s_state = OP(SEND_LAST);
624 /* Find the work request opcode corresponding to the given PSN. */
625 opcode = wqe->wr.opcode;
629 if (++n == qp->s_size)
633 wqe = get_swqe_ptr(qp, n);
634 diff = ipath_cmp24(psn, wqe->psn);
639 * If we are starting the request from the beginning,
640 * let the normal send code handle initialization.
643 qp->s_state = OP(SEND_LAST);
646 opcode = wqe->wr.opcode;
650 * Set the state to restart in the middle of a request.
651 * Don't change the s_sge, s_cur_sge, or s_cur_size.
652 * See ipath_do_rc_send().
656 case IB_WR_SEND_WITH_IMM:
657 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
660 case IB_WR_RDMA_WRITE:
661 case IB_WR_RDMA_WRITE_WITH_IMM:
662 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
665 case IB_WR_RDMA_READ:
666 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
671 * This case shouldn't happen since its only
674 qp->s_state = OP(SEND_LAST);
681 * ipath_restart_rc - back up requester to resend the last un-ACKed request
682 * @qp: the QP to restart
683 * @psn: packet sequence number for the request
684 * @wc: the work completion request
686 * The QP s_lock should be held and interrupts disabled.
688 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
690 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
691 struct ipath_ibdev *dev;
694 * If there are no requests pending, we are done.
696 if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
697 qp->s_last == qp->s_tail)
700 if (qp->s_retry == 0) {
701 wc->wr_id = wqe->wr.wr_id;
702 wc->status = IB_WC_RETRY_EXC_ERR;
703 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
706 wc->qp_num = qp->ibqp.qp_num;
707 wc->src_qp = qp->remote_qpn;
709 wc->slid = qp->remote_ah_attr.dlid;
710 wc->sl = qp->remote_ah_attr.sl;
711 wc->dlid_path_bits = 0;
713 ipath_sqerror_qp(qp, wc);
719 * Remove the QP from the timeout queue.
720 * Note: it may already have been removed by ipath_ib_timer().
722 dev = to_idev(qp->ibqp.device);
723 spin_lock(&dev->pending_lock);
724 if (!list_empty(&qp->timerwait))
725 list_del_init(&qp->timerwait);
726 spin_unlock(&dev->pending_lock);
728 if (wqe->wr.opcode == IB_WR_RDMA_READ)
731 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
736 tasklet_hi_schedule(&qp->s_task);
742 static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
744 if (qp->s_wait_credit) {
745 qp->s_wait_credit = 0;
746 tasklet_hi_schedule(&qp->s_task);
748 qp->s_last_psn = psn;
752 * do_rc_ack - process an incoming RC ACK
753 * @qp: the QP the ACK came in on
754 * @psn: the packet sequence number of the ACK
755 * @opcode: the opcode of the request that resulted in the ACK
757 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
759 * Called at interrupt level with the QP s_lock held and interrupts disabled.
760 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
762 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
764 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
766 struct ipath_swqe *wqe;
770 * Remove the QP from the timeout queue (or RNR timeout queue).
771 * If ipath_ib_timer() has already removed it,
772 * it's OK since we hold the QP s_lock and ipath_restart_rc()
773 * just won't find anything to restart if we ACK everything.
775 spin_lock(&dev->pending_lock);
776 if (!list_empty(&qp->timerwait))
777 list_del_init(&qp->timerwait);
778 spin_unlock(&dev->pending_lock);
781 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
782 * requests and implicitly NAK RDMA read and atomic requests issued
783 * before the NAK'ed request. The MSN won't include the NAK'ed
784 * request but will include an ACK'ed request(s).
786 wqe = get_swqe_ptr(qp, qp->s_last);
788 /* Nothing is pending to ACK/NAK. */
789 if (qp->s_last == qp->s_tail)
793 * The MSN might be for a later WQE than the PSN indicates so
794 * only complete WQEs that the PSN finishes.
796 while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
797 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
798 if (ipath_cmp24(aeth, wqe->ssn) < 0)
801 * If this request is a RDMA read or atomic, and the ACK is
802 * for a later operation, this ACK NAKs the RDMA read or
803 * atomic. In other words, only a RDMA_READ_LAST or ONLY
804 * can ACK a RDMA read and likewise for atomic ops. Note
805 * that the NAK case can only happen if relaxed ordering is
806 * used and requests are sent after an RDMA read or atomic
807 * is sent but before the response is received.
809 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
810 opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
811 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
812 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
813 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
814 ipath_cmp24(wqe->psn, psn) != 0))) {
816 * The last valid PSN seen is the previous
819 update_last_psn(qp, wqe->psn - 1);
820 /* Retry this request. */
821 ipath_restart_rc(qp, wqe->psn, &wc);
823 * No need to process the ACK/NAK since we are
824 * restarting an earlier request.
828 /* Post a send completion queue entry if requested. */
829 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
830 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
831 wc.wr_id = wqe->wr.wr_id;
832 wc.status = IB_WC_SUCCESS;
833 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
835 wc.byte_len = wqe->length;
836 wc.qp_num = qp->ibqp.qp_num;
837 wc.src_qp = qp->remote_qpn;
839 wc.slid = qp->remote_ah_attr.dlid;
840 wc.sl = qp->remote_ah_attr.sl;
841 wc.dlid_path_bits = 0;
843 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
845 qp->s_retry = qp->s_retry_cnt;
847 * If we are completing a request which is in the process of
848 * being resent, we can stop resending it since we know the
849 * responder has already seen it.
851 if (qp->s_last == qp->s_cur) {
852 if (++qp->s_cur >= qp->s_size)
854 wqe = get_swqe_ptr(qp, qp->s_cur);
855 qp->s_state = OP(SEND_LAST);
856 qp->s_psn = wqe->psn;
858 if (++qp->s_last >= qp->s_size)
860 wqe = get_swqe_ptr(qp, qp->s_last);
861 if (qp->s_last == qp->s_tail)
865 switch (aeth >> 29) {
868 /* If this is a partial ACK, reset the retransmit timer. */
869 if (qp->s_last != qp->s_tail) {
870 spin_lock(&dev->pending_lock);
871 list_add_tail(&qp->timerwait,
872 &dev->pending[dev->pending_index]);
873 spin_unlock(&dev->pending_lock);
875 ipath_get_credit(qp, aeth);
876 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
877 qp->s_retry = qp->s_retry_cnt;
878 update_last_psn(qp, psn);
882 case 1: /* RNR NAK */
884 if (qp->s_rnr_retry == 0) {
885 if (qp->s_last == qp->s_tail)
888 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
891 if (qp->s_rnr_retry_cnt < 7)
893 if (qp->s_last == qp->s_tail)
896 /* The last valid PSN is the previous PSN. */
897 update_last_psn(qp, psn - 1);
899 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
904 ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
905 IPATH_AETH_CREDIT_MASK];
906 ipath_insert_rnr_queue(qp);
910 /* The last valid PSN seen is the previous request's. */
911 if (qp->s_last != qp->s_tail)
912 update_last_psn(qp, wqe->psn - 1);
913 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
914 IPATH_AETH_CREDIT_MASK) {
915 case 0: /* PSN sequence error */
918 * Back up to the responder's expected PSN. XXX
919 * Note that we might get a NAK in the middle of an
920 * RDMA READ response which terminates the RDMA
923 if (qp->s_last == qp->s_tail)
926 if (ipath_cmp24(psn, wqe->psn) < 0)
929 /* Retry the request. */
930 ipath_restart_rc(qp, psn, &wc);
933 case 1: /* Invalid Request */
934 wc.status = IB_WC_REM_INV_REQ_ERR;
938 case 2: /* Remote Access Error */
939 wc.status = IB_WC_REM_ACCESS_ERR;
943 case 3: /* Remote Operation Error */
944 wc.status = IB_WC_REM_OP_ERR;
947 wc.wr_id = wqe->wr.wr_id;
948 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
951 wc.qp_num = qp->ibqp.qp_num;
952 wc.src_qp = qp->remote_qpn;
954 wc.slid = qp->remote_ah_attr.dlid;
955 wc.sl = qp->remote_ah_attr.sl;
956 wc.dlid_path_bits = 0;
958 ipath_sqerror_qp(qp, &wc);
962 /* Ignore other reserved NAK error codes */
965 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
968 default: /* 2: reserved */
970 /* Ignore reserved NAK codes. */
979 * ipath_rc_rcv_resp - process an incoming RC response packet
980 * @dev: the device this packet came in on
981 * @ohdr: the other headers for this packet
982 * @data: the packet data
983 * @tlen: the packet length
984 * @qp: the QP for this packet
985 * @opcode: the opcode for this packet
986 * @psn: the packet sequence number for this packet
987 * @hdrsize: the header length
988 * @pmtu: the path MTU
989 * @header_in_data: true if part of the header data is in the data buffer
991 * This is called from ipath_rc_rcv() to process an incoming RC response
992 * packet for the given QP.
993 * Called at interrupt level.
995 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
996 struct ipath_other_headers *ohdr,
997 void *data, u32 tlen,
1000 u32 psn, u32 hdrsize, u32 pmtu,
1003 unsigned long flags;
1009 spin_lock_irqsave(&qp->s_lock, flags);
1011 /* Ignore invalid responses. */
1012 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1015 /* Ignore duplicate responses. */
1016 diff = ipath_cmp24(psn, qp->s_last_psn);
1017 if (unlikely(diff <= 0)) {
1018 /* Update credits for "ghost" ACKs */
1019 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1020 if (!header_in_data)
1021 aeth = be32_to_cpu(ohdr->u.aeth);
1023 aeth = be32_to_cpu(((__be32 *) data)[0]);
1024 data += sizeof(__be32);
1026 if ((aeth >> 29) == 0)
1027 ipath_get_credit(qp, aeth);
1033 case OP(ACKNOWLEDGE):
1034 case OP(ATOMIC_ACKNOWLEDGE):
1035 case OP(RDMA_READ_RESPONSE_FIRST):
1036 if (!header_in_data)
1037 aeth = be32_to_cpu(ohdr->u.aeth);
1039 aeth = be32_to_cpu(((__be32 *) data)[0]);
1040 data += sizeof(__be32);
1042 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1043 *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
1044 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1045 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1049 * do_rc_ack() has already checked the PSN so skip
1050 * the sequence check.
1054 case OP(RDMA_READ_RESPONSE_MIDDLE):
1055 /* no AETH, no ACK */
1056 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1058 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1062 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1064 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1066 if (unlikely(pmtu >= qp->s_len))
1068 /* We got a response so update the timeout. */
1069 if (unlikely(qp->s_last == qp->s_tail ||
1070 get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1073 spin_lock(&dev->pending_lock);
1074 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1075 list_move_tail(&qp->timerwait,
1076 &dev->pending[dev->pending_index]);
1077 spin_unlock(&dev->pending_lock);
1079 * Update the RDMA receive state but do the copy w/o
1080 * holding the locks and blocking interrupts.
1081 * XXX Yet another place that affects relaxed RDMA order
1082 * since we don't want s_sge modified.
1085 update_last_psn(qp, psn);
1086 spin_unlock_irqrestore(&qp->s_lock, flags);
1087 ipath_copy_sge(&qp->s_sge, data, pmtu);
1090 case OP(RDMA_READ_RESPONSE_LAST):
1091 /* ACKs READ req. */
1092 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1094 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1098 case OP(RDMA_READ_RESPONSE_ONLY):
1099 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1102 * Get the number of bytes the message was padded by.
1104 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1106 * Check that the data size is >= 1 && <= pmtu.
1107 * Remember to account for the AETH header (4) and
1110 if (unlikely(tlen <= (hdrsize + pad + 8))) {
1111 /* XXX Need to generate an error CQ entry. */
1114 tlen -= hdrsize + pad + 8;
1115 if (unlikely(tlen != qp->s_len)) {
1116 /* XXX Need to generate an error CQ entry. */
1119 if (!header_in_data)
1120 aeth = be32_to_cpu(ohdr->u.aeth);
1122 aeth = be32_to_cpu(((__be32 *) data)[0]);
1123 data += sizeof(__be32);
1125 ipath_copy_sge(&qp->s_sge, data, tlen);
1126 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1128 * Change the state so we contimue
1129 * processing new requests and wake up the
1130 * tasklet if there are posted sends.
1132 qp->s_state = OP(SEND_LAST);
1133 if (qp->s_tail != qp->s_head)
1134 tasklet_hi_schedule(&qp->s_task);
1140 spin_unlock_irqrestore(&qp->s_lock, flags);
1146 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1147 * @dev: the device this packet came in on
1148 * @ohdr: the other headers for this packet
1149 * @data: the packet data
1150 * @qp: the QP for this packet
1151 * @opcode: the opcode for this packet
1152 * @psn: the packet sequence number for this packet
1153 * @diff: the difference between the PSN and the expected PSN
1154 * @header_in_data: true if part of the header data is in the data buffer
1156 * This is called from ipath_rc_rcv() to process an unexpected
1157 * incoming RC packet for the given QP.
1158 * Called at interrupt level.
1159 * Return 1 if no more processing is needed; otherwise return 0 to
1160 * schedule a response to be sent and the s_lock unlocked.
1162 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1163 struct ipath_other_headers *ohdr,
1165 struct ipath_qp *qp,
1171 struct ib_reth *reth;
1175 * Packet sequence error.
1176 * A NAK will ACK earlier sends and RDMA writes.
1177 * Don't queue the NAK if a RDMA read, atomic, or
1178 * NAK is pending though.
1180 if (qp->s_ack_state != OP(ACKNOWLEDGE) ||
1181 qp->r_nak_state != 0)
1183 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1184 qp->r_ack_state = OP(SEND_ONLY);
1185 qp->r_nak_state = IB_NAK_PSN_ERROR;
1186 /* Use the expected PSN. */
1187 qp->r_ack_psn = qp->r_psn;
1193 * Handle a duplicate request. Don't re-execute SEND, RDMA
1194 * write or atomic op. Don't NAK errors, just silently drop
1195 * the duplicate request. Note that r_sge, r_len, and
1196 * r_rcv_len may be in use so don't modify them.
1198 * We are supposed to ACK the earliest duplicate PSN but we
1199 * can coalesce an outstanding duplicate ACK. We have to
1200 * send the earliest so that RDMA reads can be restarted at
1201 * the requester's expected PSN.
1203 if (opcode == OP(RDMA_READ_REQUEST)) {
1204 /* RETH comes after BTH */
1205 if (!header_in_data)
1206 reth = &ohdr->u.rc.reth;
1208 reth = (struct ib_reth *)data;
1209 data += sizeof(*reth);
1212 * If we receive a duplicate RDMA request, it means the
1213 * requester saw a sequence error and needs to restart
1214 * from an earlier point. We can abort the current
1215 * RDMA read send in that case.
1217 spin_lock_irq(&qp->s_lock);
1218 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1219 (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) {
1221 * We are already sending earlier requested data.
1222 * Don't abort it to send later out of sequence data.
1224 spin_unlock_irq(&qp->s_lock);
1227 qp->s_rdma_len = be32_to_cpu(reth->length);
1228 if (qp->s_rdma_len != 0) {
1229 u32 rkey = be32_to_cpu(reth->rkey);
1230 u64 vaddr = be64_to_cpu(reth->vaddr);
1234 * Address range must be a subset of the original
1235 * request and start on pmtu boundaries.
1237 ok = ipath_rkey_ok(qp, &qp->s_rdma_sge,
1238 qp->s_rdma_len, vaddr, rkey,
1239 IB_ACCESS_REMOTE_READ);
1240 if (unlikely(!ok)) {
1241 spin_unlock_irq(&qp->s_lock);
1245 qp->s_rdma_sge.sg_list = NULL;
1246 qp->s_rdma_sge.num_sge = 0;
1247 qp->s_rdma_sge.sge.mr = NULL;
1248 qp->s_rdma_sge.sge.vaddr = NULL;
1249 qp->s_rdma_sge.sge.length = 0;
1250 qp->s_rdma_sge.sge.sge_length = 0;
1252 qp->s_ack_state = opcode;
1253 qp->s_ack_psn = psn;
1254 spin_unlock_irq(&qp->s_lock);
1255 tasklet_hi_schedule(&qp->s_task);
1260 * A pending RDMA read will ACK anything before it so
1261 * ignore earlier duplicate requests.
1263 if (qp->s_ack_state != OP(ACKNOWLEDGE))
1267 * If an ACK is pending, don't replace the pending ACK
1268 * with an earlier one since the later one will ACK the earlier.
1269 * Also, if we already have a pending atomic, send it.
1271 if (qp->r_ack_state != OP(ACKNOWLEDGE) &&
1272 (ipath_cmp24(psn, qp->r_ack_psn) <= 0 ||
1273 qp->r_ack_state >= OP(COMPARE_SWAP)))
1276 case OP(COMPARE_SWAP):
1279 * Check for the PSN of the last atomic operation
1280 * performed and resend the result if found.
1282 if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn)
1286 qp->r_ack_state = opcode;
1287 qp->r_nak_state = 0;
1288 qp->r_ack_psn = psn;
1296 static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1298 spin_lock_irq(&qp->s_lock);
1299 qp->state = IB_QPS_ERR;
1300 ipath_error_qp(qp, err);
1301 spin_unlock_irq(&qp->s_lock);
1305 * ipath_rc_rcv - process an incoming RC packet
1306 * @dev: the device this packet came in on
1307 * @hdr: the header of this packet
1308 * @has_grh: true if the header has a GRH
1309 * @data: the packet data
1310 * @tlen: the packet length
1311 * @qp: the QP for this packet
1313 * This is called from ipath_qp_rcv() to process an incoming RC packet
1315 * Called at interrupt level.
1317 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1318 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1320 struct ipath_other_headers *ohdr;
1326 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1328 struct ib_reth *reth;
1331 /* Validate the SLID. See Ch. 9.6.1.5 */
1332 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
1338 hdrsize = 8 + 12; /* LRH + BTH */
1339 psn = be32_to_cpu(ohdr->bth[2]);
1342 ohdr = &hdr->u.l.oth;
1343 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1345 * The header with GRH is 60 bytes and the core driver sets
1346 * the eager header buffer size to 56 bytes so the last 4
1347 * bytes of the BTH header (PSN) is in the data buffer.
1349 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
1350 if (header_in_data) {
1351 psn = be32_to_cpu(((__be32 *) data)[0]);
1352 data += sizeof(__be32);
1354 psn = be32_to_cpu(ohdr->bth[2]);
1358 * Process responses (ACKs) before anything else. Note that the
1359 * packet sequence number will be for something in the send work
1360 * queue rather than the expected receive packet sequence number.
1361 * In other words, this QP is the requester.
1363 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1364 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1365 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1366 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1367 hdrsize, pmtu, header_in_data);
1371 /* Compute 24 bits worth of difference. */
1372 diff = ipath_cmp24(psn, qp->r_psn);
1373 if (unlikely(diff)) {
1374 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1375 psn, diff, header_in_data))
1380 /* Check for opcode sequence errors. */
1381 switch (qp->r_state) {
1382 case OP(SEND_FIRST):
1383 case OP(SEND_MIDDLE):
1384 if (opcode == OP(SEND_MIDDLE) ||
1385 opcode == OP(SEND_LAST) ||
1386 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1390 * A NAK will ACK earlier sends and RDMA writes.
1391 * Don't queue the NAK if a RDMA read, atomic, or NAK
1392 * is pending though.
1394 if (qp->r_ack_state >= OP(COMPARE_SWAP))
1396 ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
1397 qp->r_ack_state = OP(SEND_ONLY);
1398 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1399 qp->r_ack_psn = qp->r_psn;
1402 case OP(RDMA_WRITE_FIRST):
1403 case OP(RDMA_WRITE_MIDDLE):
1404 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1405 opcode == OP(RDMA_WRITE_LAST) ||
1406 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1411 if (opcode == OP(SEND_MIDDLE) ||
1412 opcode == OP(SEND_LAST) ||
1413 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1414 opcode == OP(RDMA_WRITE_MIDDLE) ||
1415 opcode == OP(RDMA_WRITE_LAST) ||
1416 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1419 * Note that it is up to the requester to not send a new
1420 * RDMA read or atomic operation before receiving an ACK
1421 * for the previous operation.
1429 /* OK, process the packet. */
1431 case OP(SEND_FIRST):
1432 if (!ipath_get_rwqe(qp, 0)) {
1435 * A RNR NAK will ACK earlier sends and RDMA writes.
1436 * Don't queue the NAK if a RDMA read or atomic
1437 * is pending though.
1439 if (qp->r_ack_state >= OP(COMPARE_SWAP))
1441 qp->r_ack_state = OP(SEND_ONLY);
1442 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1443 qp->r_ack_psn = qp->r_psn;
1448 case OP(SEND_MIDDLE):
1449 case OP(RDMA_WRITE_MIDDLE):
1451 /* Check for invalid length PMTU or posted rwqe len. */
1452 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1454 qp->r_rcv_len += pmtu;
1455 if (unlikely(qp->r_rcv_len > qp->r_len))
1457 ipath_copy_sge(&qp->r_sge, data, pmtu);
1460 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1462 if (!ipath_get_rwqe(qp, 1))
1467 case OP(SEND_ONLY_WITH_IMMEDIATE):
1468 if (!ipath_get_rwqe(qp, 0))
1471 if (opcode == OP(SEND_ONLY))
1474 case OP(SEND_LAST_WITH_IMMEDIATE):
1476 if (header_in_data) {
1477 wc.imm_data = *(__be32 *) data;
1478 data += sizeof(__be32);
1480 /* Immediate data comes after BTH */
1481 wc.imm_data = ohdr->u.imm_data;
1484 wc.wc_flags = IB_WC_WITH_IMM;
1487 case OP(RDMA_WRITE_LAST):
1489 /* Get the number of bytes the message was padded by. */
1490 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1491 /* Check for invalid length. */
1492 /* XXX LAST len should be >= 1 */
1493 if (unlikely(tlen < (hdrsize + pad + 4)))
1495 /* Don't count the CRC. */
1496 tlen -= (hdrsize + pad + 4);
1497 wc.byte_len = tlen + qp->r_rcv_len;
1498 if (unlikely(wc.byte_len > qp->r_len))
1500 ipath_copy_sge(&qp->r_sge, data, tlen);
1502 if (!qp->r_wrid_valid)
1504 qp->r_wrid_valid = 0;
1505 wc.wr_id = qp->r_wr_id;
1506 wc.status = IB_WC_SUCCESS;
1507 wc.opcode = IB_WC_RECV;
1509 wc.qp_num = qp->ibqp.qp_num;
1510 wc.src_qp = qp->remote_qpn;
1512 wc.slid = qp->remote_ah_attr.dlid;
1513 wc.sl = qp->remote_ah_attr.sl;
1514 wc.dlid_path_bits = 0;
1516 /* Signal completion event if the solicited bit is set. */
1517 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1519 __constant_cpu_to_be32(1 << 23)) != 0);
1522 case OP(RDMA_WRITE_FIRST):
1523 case OP(RDMA_WRITE_ONLY):
1524 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1526 /* RETH comes after BTH */
1527 if (!header_in_data)
1528 reth = &ohdr->u.rc.reth;
1530 reth = (struct ib_reth *)data;
1531 data += sizeof(*reth);
1533 hdrsize += sizeof(*reth);
1534 qp->r_len = be32_to_cpu(reth->length);
1536 if (qp->r_len != 0) {
1537 u32 rkey = be32_to_cpu(reth->rkey);
1538 u64 vaddr = be64_to_cpu(reth->vaddr);
1541 /* Check rkey & NAK */
1542 ok = ipath_rkey_ok(qp, &qp->r_sge,
1543 qp->r_len, vaddr, rkey,
1544 IB_ACCESS_REMOTE_WRITE);
1548 qp->r_sge.sg_list = NULL;
1549 qp->r_sge.sge.mr = NULL;
1550 qp->r_sge.sge.vaddr = NULL;
1551 qp->r_sge.sge.length = 0;
1552 qp->r_sge.sge.sge_length = 0;
1554 if (unlikely(!(qp->qp_access_flags &
1555 IB_ACCESS_REMOTE_WRITE)))
1557 if (opcode == OP(RDMA_WRITE_FIRST))
1559 else if (opcode == OP(RDMA_WRITE_ONLY))
1561 if (!ipath_get_rwqe(qp, 1))
1565 case OP(RDMA_READ_REQUEST):
1566 /* RETH comes after BTH */
1567 if (!header_in_data)
1568 reth = &ohdr->u.rc.reth;
1570 reth = (struct ib_reth *)data;
1571 data += sizeof(*reth);
1573 if (unlikely(!(qp->qp_access_flags &
1574 IB_ACCESS_REMOTE_READ)))
1576 spin_lock_irq(&qp->s_lock);
1577 qp->s_rdma_len = be32_to_cpu(reth->length);
1578 if (qp->s_rdma_len != 0) {
1579 u32 rkey = be32_to_cpu(reth->rkey);
1580 u64 vaddr = be64_to_cpu(reth->vaddr);
1583 /* Check rkey & NAK */
1584 ok = ipath_rkey_ok(qp, &qp->s_rdma_sge,
1585 qp->s_rdma_len, vaddr, rkey,
1586 IB_ACCESS_REMOTE_READ);
1587 if (unlikely(!ok)) {
1588 spin_unlock_irq(&qp->s_lock);
1592 * Update the next expected PSN. We add 1 later
1593 * below, so only add the remainder here.
1595 if (qp->s_rdma_len > pmtu)
1596 qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
1598 qp->s_rdma_sge.sg_list = NULL;
1599 qp->s_rdma_sge.num_sge = 0;
1600 qp->s_rdma_sge.sge.mr = NULL;
1601 qp->s_rdma_sge.sge.vaddr = NULL;
1602 qp->s_rdma_sge.sge.length = 0;
1603 qp->s_rdma_sge.sge.sge_length = 0;
1606 * We need to increment the MSN here instead of when we
1607 * finish sending the result since a duplicate request would
1608 * increment it more than once.
1612 qp->s_ack_state = opcode;
1613 qp->s_ack_psn = psn;
1614 spin_unlock_irq(&qp->s_lock);
1617 qp->r_state = opcode;
1618 qp->r_nak_state = 0;
1620 /* Call ipath_do_rc_send() in another thread. */
1621 tasklet_hi_schedule(&qp->s_task);
1625 case OP(COMPARE_SWAP):
1626 case OP(FETCH_ADD): {
1627 struct ib_atomic_eth *ateth;
1632 if (!header_in_data)
1633 ateth = &ohdr->u.atomic_eth;
1635 ateth = (struct ib_atomic_eth *)data;
1636 data += sizeof(*ateth);
1638 vaddr = be64_to_cpu(ateth->vaddr);
1639 if (unlikely(vaddr & (sizeof(u64) - 1)))
1641 rkey = be32_to_cpu(ateth->rkey);
1642 /* Check rkey & NAK */
1643 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
1644 sizeof(u64), vaddr, rkey,
1645 IB_ACCESS_REMOTE_ATOMIC)))
1647 if (unlikely(!(qp->qp_access_flags &
1648 IB_ACCESS_REMOTE_ATOMIC)))
1650 /* Perform atomic OP and save result. */
1651 sdata = be64_to_cpu(ateth->swap_data);
1652 spin_lock_irq(&dev->pending_lock);
1653 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
1654 if (opcode == OP(FETCH_ADD))
1655 *(u64 *) qp->r_sge.sge.vaddr =
1656 qp->r_atomic_data + sdata;
1657 else if (qp->r_atomic_data ==
1658 be64_to_cpu(ateth->compare_data))
1659 *(u64 *) qp->r_sge.sge.vaddr = sdata;
1660 spin_unlock_irq(&dev->pending_lock);
1662 qp->r_atomic_psn = psn & IPATH_PSN_MASK;
1668 /* Drop packet for unknown opcodes. */
1672 qp->r_state = opcode;
1673 qp->r_nak_state = 0;
1674 /* Send an ACK if requested or required. */
1675 if (psn & (1 << 31)) {
1677 * Coalesce ACKs unless there is a RDMA READ or
1680 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1681 qp->r_ack_state = opcode;
1682 qp->r_ack_psn = psn;
1690 * A NAK will ACK earlier sends and RDMA writes.
1691 * Don't queue the NAK if a RDMA read, atomic, or NAK
1692 * is pending though.
1694 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1695 ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
1696 qp->r_ack_state = OP(RDMA_WRITE_ONLY);
1697 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1698 qp->r_ack_psn = qp->r_psn;
1701 /* Send ACK right away unless the send tasklet has a pending ACK. */
1702 if (qp->s_ack_state == OP(ACKNOWLEDGE))