2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ipath_kernel.h"
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_RC_##x
40 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
45 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
46 ss->sge = wqe->sg_list[0];
47 ss->sg_list = wqe->sg_list + 1;
48 ss->num_sge = wqe->wr.num_sge;
49 ipath_skip_sge(ss, len);
50 return wqe->length - len;
54 * ipath_init_restart- initialize the qp->s_sge after a restart
55 * @qp: the QP who's SGE we're restarting
56 * @wqe: the work queue to initialize the QP's SGE from
58 * The QP s_lock should be held and interrupts disabled.
60 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
62 struct ipath_ibdev *dev;
64 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
65 ib_mtu_enum_to_int(qp->path_mtu));
66 dev = to_idev(qp->ibqp.device);
67 spin_lock(&dev->pending_lock);
68 if (list_empty(&qp->timerwait))
69 list_add_tail(&qp->timerwait,
70 &dev->pending[dev->pending_index]);
71 spin_unlock(&dev->pending_lock);
75 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
76 * @qp: a pointer to the QP
77 * @ohdr: a pointer to the IB header being constructed
80 * Return 1 if constructed; otherwise, return 0.
81 * Note that we are in the responder's side of the QP context.
82 * Note the QP s_lock must be held.
84 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
85 struct ipath_other_headers *ohdr, u32 pmtu)
87 struct ipath_ack_entry *e;
93 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
96 switch (qp->s_ack_state) {
97 case OP(RDMA_READ_RESPONSE_LAST):
98 case OP(RDMA_READ_RESPONSE_ONLY):
99 case OP(ATOMIC_ACKNOWLEDGE):
101 * We can increment the tail pointer now that the last
102 * response has been sent instead of only being
105 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
106 qp->s_tail_ack_queue = 0;
109 case OP(ACKNOWLEDGE):
110 /* Check for no next entry in the queue. */
111 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
112 if (qp->s_flags & IPATH_S_ACK_PENDING)
114 qp->s_ack_state = OP(ACKNOWLEDGE);
118 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
119 if (e->opcode == OP(RDMA_READ_REQUEST)) {
120 /* Copy SGE state in case we need to resend */
121 qp->s_ack_rdma_sge = e->rdma_sge;
122 qp->s_cur_sge = &qp->s_ack_rdma_sge;
123 len = e->rdma_sge.sge.sge_length;
126 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
128 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
131 ohdr->u.aeth = ipath_compute_aeth(qp);
133 qp->s_ack_rdma_psn = e->psn;
134 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
136 /* COMPARE_SWAP or FETCH_ADD */
137 qp->s_cur_sge = NULL;
139 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
140 ohdr->u.at.aeth = ipath_compute_aeth(qp);
141 ohdr->u.at.atomic_ack_eth[0] =
142 cpu_to_be32(e->atomic_data >> 32);
143 ohdr->u.at.atomic_ack_eth[1] =
144 cpu_to_be32(e->atomic_data);
145 hwords += sizeof(ohdr->u.at) / sizeof(u32);
149 bth0 = qp->s_ack_state << 24;
152 case OP(RDMA_READ_RESPONSE_FIRST):
153 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
155 case OP(RDMA_READ_RESPONSE_MIDDLE):
156 len = qp->s_ack_rdma_sge.sge.sge_length;
160 ohdr->u.aeth = ipath_compute_aeth(qp);
162 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
163 qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
165 bth0 = qp->s_ack_state << 24;
166 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
172 * Send a regular ACK.
173 * Set the s_ack_state so we wait until after sending
174 * the ACK before setting s_ack_state to ACKNOWLEDGE
177 qp->s_ack_state = OP(SEND_ONLY);
178 qp->s_flags &= ~IPATH_S_ACK_PENDING;
179 qp->s_cur_sge = NULL;
182 cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
184 IPATH_AETH_CREDIT_SHIFT));
186 ohdr->u.aeth = ipath_compute_aeth(qp);
189 bth0 = OP(ACKNOWLEDGE) << 24;
190 bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
192 qp->s_hdrwords = hwords;
193 qp->s_cur_size = len;
194 ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
202 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
203 * @qp: a pointer to the QP
205 * Return 1 if constructed; otherwise, return 0.
207 int ipath_make_rc_req(struct ipath_qp *qp)
209 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
210 struct ipath_other_headers *ohdr;
211 struct ipath_sge_state *ss;
212 struct ipath_swqe *wqe;
217 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
222 ohdr = &qp->s_hdr.u.oth;
223 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
224 ohdr = &qp->s_hdr.u.l.oth;
227 * The lock is needed to synchronize between the sending tasklet,
228 * the receive interrupt handler, and timeout resends.
230 spin_lock_irqsave(&qp->s_lock, flags);
232 /* Sending responses has higher priority over sending requests. */
233 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
234 (qp->s_flags & IPATH_S_ACK_PENDING) ||
235 qp->s_ack_state != OP(ACKNOWLEDGE)) &&
236 ipath_make_rc_ack(dev, qp, ohdr, pmtu))
239 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
240 qp->s_rnr_timeout || qp->s_wait_credit)
243 /* Limit the number of packets sent without an ACK. */
244 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
245 qp->s_wait_credit = 1;
250 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
252 bth0 = 1 << 22; /* Set M bit */
254 /* Send a request. */
255 wqe = get_swqe_ptr(qp, qp->s_cur);
256 switch (qp->s_state) {
259 * Resend an old request or start a new one.
261 * We keep track of the current SWQE so that
262 * we don't reset the "furthest progress" state
263 * if we need to back up.
266 if (qp->s_cur == qp->s_tail) {
267 /* Check if send work queue is empty. */
268 if (qp->s_tail == qp->s_head)
271 * If a fence is requested, wait for previous
272 * RDMA read and atomic operations to finish.
274 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
275 qp->s_num_rd_atomic) {
276 qp->s_flags |= IPATH_S_FENCE_PENDING;
279 wqe->psn = qp->s_next_psn;
283 * Note that we have to be careful not to modify the
284 * original work request since we may need to resend
290 switch (wqe->wr.opcode) {
292 case IB_WR_SEND_WITH_IMM:
293 /* If no credit, return. */
294 if (qp->s_lsn != (u32) -1 &&
295 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
297 wqe->lpsn = wqe->psn;
299 wqe->lpsn += (len - 1) / pmtu;
300 qp->s_state = OP(SEND_FIRST);
304 if (wqe->wr.opcode == IB_WR_SEND)
305 qp->s_state = OP(SEND_ONLY);
307 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
308 /* Immediate data comes after the BTH */
309 ohdr->u.imm_data = wqe->wr.imm_data;
312 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
314 bth2 = 1 << 31; /* Request ACK. */
315 if (++qp->s_cur == qp->s_size)
319 case IB_WR_RDMA_WRITE:
320 if (newreq && qp->s_lsn != (u32) -1)
323 case IB_WR_RDMA_WRITE_WITH_IMM:
324 /* If no credit, return. */
325 if (qp->s_lsn != (u32) -1 &&
326 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
328 ohdr->u.rc.reth.vaddr =
329 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
330 ohdr->u.rc.reth.rkey =
331 cpu_to_be32(wqe->wr.wr.rdma.rkey);
332 ohdr->u.rc.reth.length = cpu_to_be32(len);
333 hwords += sizeof(struct ib_reth) / sizeof(u32);
334 wqe->lpsn = wqe->psn;
336 wqe->lpsn += (len - 1) / pmtu;
337 qp->s_state = OP(RDMA_WRITE_FIRST);
341 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
342 qp->s_state = OP(RDMA_WRITE_ONLY);
345 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
346 /* Immediate data comes after RETH */
347 ohdr->u.rc.imm_data = wqe->wr.imm_data;
349 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
352 bth2 = 1 << 31; /* Request ACK. */
353 if (++qp->s_cur == qp->s_size)
357 case IB_WR_RDMA_READ:
359 * Don't allow more operations to be started
360 * than the QP limits allow.
363 if (qp->s_num_rd_atomic >=
364 qp->s_max_rd_atomic) {
365 qp->s_flags |= IPATH_S_RDMAR_PENDING;
368 qp->s_num_rd_atomic++;
369 if (qp->s_lsn != (u32) -1)
372 * Adjust s_next_psn to count the
373 * expected number of responses.
376 qp->s_next_psn += (len - 1) / pmtu;
377 wqe->lpsn = qp->s_next_psn++;
379 ohdr->u.rc.reth.vaddr =
380 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
381 ohdr->u.rc.reth.rkey =
382 cpu_to_be32(wqe->wr.wr.rdma.rkey);
383 ohdr->u.rc.reth.length = cpu_to_be32(len);
384 qp->s_state = OP(RDMA_READ_REQUEST);
385 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
388 if (++qp->s_cur == qp->s_size)
392 case IB_WR_ATOMIC_CMP_AND_SWP:
393 case IB_WR_ATOMIC_FETCH_AND_ADD:
395 * Don't allow more operations to be started
396 * than the QP limits allow.
399 if (qp->s_num_rd_atomic >=
400 qp->s_max_rd_atomic) {
401 qp->s_flags |= IPATH_S_RDMAR_PENDING;
404 qp->s_num_rd_atomic++;
405 if (qp->s_lsn != (u32) -1)
407 wqe->lpsn = wqe->psn;
409 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
410 qp->s_state = OP(COMPARE_SWAP);
411 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
412 wqe->wr.wr.atomic.swap);
413 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
414 wqe->wr.wr.atomic.compare_add);
416 qp->s_state = OP(FETCH_ADD);
417 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
418 wqe->wr.wr.atomic.compare_add);
419 ohdr->u.atomic_eth.compare_data = 0;
421 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
422 wqe->wr.wr.atomic.remote_addr >> 32);
423 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
424 wqe->wr.wr.atomic.remote_addr);
425 ohdr->u.atomic_eth.rkey = cpu_to_be32(
426 wqe->wr.wr.atomic.rkey);
427 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
430 if (++qp->s_cur == qp->s_size)
437 qp->s_sge.sge = wqe->sg_list[0];
438 qp->s_sge.sg_list = wqe->sg_list + 1;
439 qp->s_sge.num_sge = wqe->wr.num_sge;
440 qp->s_len = wqe->length;
443 if (qp->s_tail >= qp->s_size)
446 bth2 |= qp->s_psn & IPATH_PSN_MASK;
447 if (wqe->wr.opcode == IB_WR_RDMA_READ)
448 qp->s_psn = wqe->lpsn + 1;
451 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
452 qp->s_next_psn = qp->s_psn;
455 * Put the QP on the pending list so lost ACKs will cause
456 * a retry. More than one request can be pending so the
457 * QP may already be on the dev->pending list.
459 spin_lock(&dev->pending_lock);
460 if (list_empty(&qp->timerwait))
461 list_add_tail(&qp->timerwait,
462 &dev->pending[dev->pending_index]);
463 spin_unlock(&dev->pending_lock);
466 case OP(RDMA_READ_RESPONSE_FIRST):
468 * This case can only happen if a send is restarted.
469 * See ipath_restart_rc().
471 ipath_init_restart(qp, wqe);
474 qp->s_state = OP(SEND_MIDDLE);
476 case OP(SEND_MIDDLE):
477 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
478 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
479 qp->s_next_psn = qp->s_psn;
486 if (wqe->wr.opcode == IB_WR_SEND)
487 qp->s_state = OP(SEND_LAST);
489 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
490 /* Immediate data comes after the BTH */
491 ohdr->u.imm_data = wqe->wr.imm_data;
494 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
496 bth2 |= 1 << 31; /* Request ACK. */
498 if (qp->s_cur >= qp->s_size)
502 case OP(RDMA_READ_RESPONSE_LAST):
504 * This case can only happen if a RDMA write is restarted.
505 * See ipath_restart_rc().
507 ipath_init_restart(qp, wqe);
509 case OP(RDMA_WRITE_FIRST):
510 qp->s_state = OP(RDMA_WRITE_MIDDLE);
512 case OP(RDMA_WRITE_MIDDLE):
513 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
514 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
515 qp->s_next_psn = qp->s_psn;
522 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
523 qp->s_state = OP(RDMA_WRITE_LAST);
525 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
526 /* Immediate data comes after the BTH */
527 ohdr->u.imm_data = wqe->wr.imm_data;
529 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
532 bth2 |= 1 << 31; /* Request ACK. */
534 if (qp->s_cur >= qp->s_size)
538 case OP(RDMA_READ_RESPONSE_MIDDLE):
540 * This case can only happen if a RDMA read is restarted.
541 * See ipath_restart_rc().
543 ipath_init_restart(qp, wqe);
544 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
545 ohdr->u.rc.reth.vaddr =
546 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
547 ohdr->u.rc.reth.rkey =
548 cpu_to_be32(wqe->wr.wr.rdma.rkey);
549 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
550 qp->s_state = OP(RDMA_READ_REQUEST);
551 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
552 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
553 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
554 qp->s_next_psn = qp->s_psn;
558 if (qp->s_cur == qp->s_size)
562 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
563 bth2 |= 1 << 31; /* Request ACK. */
565 qp->s_hdrwords = hwords;
567 qp->s_cur_size = len;
568 ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
572 spin_unlock_irqrestore(&qp->s_lock, flags);
577 * send_rc_ack - Construct an ACK packet and send it
578 * @qp: a pointer to the QP
580 * This is called from ipath_rc_rcv() and only uses the receive
582 * Note that RDMA reads and atomics are handled in the
583 * send side QP state and tasklet.
585 static void send_rc_ack(struct ipath_qp *qp)
587 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
591 struct ipath_ib_header hdr;
592 struct ipath_other_headers *ohdr;
595 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
596 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
597 (qp->s_flags & IPATH_S_ACK_PENDING) ||
598 qp->s_ack_state != OP(ACKNOWLEDGE))
601 /* Construct the header. */
603 lrh0 = IPATH_LRH_BTH;
604 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
606 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
607 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
608 &qp->remote_ah_attr.grh,
611 lrh0 = IPATH_LRH_GRH;
613 /* read pkey_index w/o lock (its atomic) */
614 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
615 (OP(ACKNOWLEDGE) << 24) | (1 << 22);
617 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
619 IPATH_AETH_CREDIT_SHIFT));
621 ohdr->u.aeth = ipath_compute_aeth(qp);
622 lrh0 |= qp->remote_ah_attr.sl << 4;
623 hdr.lrh[0] = cpu_to_be16(lrh0);
624 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
625 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
626 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
627 ohdr->bth[0] = cpu_to_be32(bth0);
628 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
629 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
632 * If we can send the ACK, clear the ACK state.
634 if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
635 dev->n_unicast_xmit++;
640 * We are out of PIO buffers at the moment.
641 * Pass responsibility for sending the ACK to the
642 * send tasklet so that when a PIO buffer becomes
643 * available, the ACK is sent ahead of other outgoing
649 spin_lock_irqsave(&qp->s_lock, flags);
650 qp->s_flags |= IPATH_S_ACK_PENDING;
651 qp->s_nak_state = qp->r_nak_state;
652 qp->s_ack_psn = qp->r_ack_psn;
653 spin_unlock_irqrestore(&qp->s_lock, flags);
655 /* Call ipath_do_rc_send() in another thread. */
656 tasklet_hi_schedule(&qp->s_task);
663 * reset_psn - reset the QP state to send starting from PSN
665 * @psn: the packet sequence number to restart at
667 * This is called from ipath_rc_rcv() to process an incoming RC ACK
669 * Called at interrupt level with the QP s_lock held.
671 static void reset_psn(struct ipath_qp *qp, u32 psn)
674 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
680 * If we are starting the request from the beginning,
681 * let the normal send code handle initialization.
683 if (ipath_cmp24(psn, wqe->psn) <= 0) {
684 qp->s_state = OP(SEND_LAST);
688 /* Find the work request opcode corresponding to the given PSN. */
689 opcode = wqe->wr.opcode;
693 if (++n == qp->s_size)
697 wqe = get_swqe_ptr(qp, n);
698 diff = ipath_cmp24(psn, wqe->psn);
703 * If we are starting the request from the beginning,
704 * let the normal send code handle initialization.
707 qp->s_state = OP(SEND_LAST);
710 opcode = wqe->wr.opcode;
714 * Set the state to restart in the middle of a request.
715 * Don't change the s_sge, s_cur_sge, or s_cur_size.
716 * See ipath_do_rc_send().
720 case IB_WR_SEND_WITH_IMM:
721 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
724 case IB_WR_RDMA_WRITE:
725 case IB_WR_RDMA_WRITE_WITH_IMM:
726 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
729 case IB_WR_RDMA_READ:
730 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
735 * This case shouldn't happen since its only
738 qp->s_state = OP(SEND_LAST);
745 * ipath_restart_rc - back up requester to resend the last un-ACKed request
746 * @qp: the QP to restart
747 * @psn: packet sequence number for the request
748 * @wc: the work completion request
750 * The QP s_lock should be held and interrupts disabled.
752 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
754 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
755 struct ipath_ibdev *dev;
757 if (qp->s_retry == 0) {
758 wc->wr_id = wqe->wr.wr_id;
759 wc->status = IB_WC_RETRY_EXC_ERR;
760 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
765 wc->src_qp = qp->remote_qpn;
768 wc->slid = qp->remote_ah_attr.dlid;
769 wc->sl = qp->remote_ah_attr.sl;
770 wc->dlid_path_bits = 0;
772 ipath_sqerror_qp(qp, wc);
778 * Remove the QP from the timeout queue.
779 * Note: it may already have been removed by ipath_ib_timer().
781 dev = to_idev(qp->ibqp.device);
782 spin_lock(&dev->pending_lock);
783 if (!list_empty(&qp->timerwait))
784 list_del_init(&qp->timerwait);
785 spin_unlock(&dev->pending_lock);
787 if (wqe->wr.opcode == IB_WR_RDMA_READ)
790 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
793 tasklet_hi_schedule(&qp->s_task);
799 static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
801 if (qp->s_wait_credit) {
802 qp->s_wait_credit = 0;
803 tasklet_hi_schedule(&qp->s_task);
805 qp->s_last_psn = psn;
809 * do_rc_ack - process an incoming RC ACK
810 * @qp: the QP the ACK came in on
811 * @psn: the packet sequence number of the ACK
812 * @opcode: the opcode of the request that resulted in the ACK
814 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
816 * Called at interrupt level with the QP s_lock held and interrupts disabled.
817 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
819 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
822 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
824 struct ipath_swqe *wqe;
830 * Remove the QP from the timeout queue (or RNR timeout queue).
831 * If ipath_ib_timer() has already removed it,
832 * it's OK since we hold the QP s_lock and ipath_restart_rc()
833 * just won't find anything to restart if we ACK everything.
835 spin_lock(&dev->pending_lock);
836 if (!list_empty(&qp->timerwait))
837 list_del_init(&qp->timerwait);
838 spin_unlock(&dev->pending_lock);
841 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
842 * requests and implicitly NAK RDMA read and atomic requests issued
843 * before the NAK'ed request. The MSN won't include the NAK'ed
844 * request but will include an ACK'ed request(s).
849 wqe = get_swqe_ptr(qp, qp->s_last);
852 * The MSN might be for a later WQE than the PSN indicates so
853 * only complete WQEs that the PSN finishes.
855 while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
857 * RDMA_READ_RESPONSE_ONLY is a special case since
858 * we want to generate completion events for everything
859 * before the RDMA read, copy the data, then generate
860 * the completion for the read.
862 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
863 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
869 * If this request is a RDMA read or atomic, and the ACK is
870 * for a later operation, this ACK NAKs the RDMA read or
871 * atomic. In other words, only a RDMA_READ_LAST or ONLY
872 * can ACK a RDMA read and likewise for atomic ops. Note
873 * that the NAK case can only happen if relaxed ordering is
874 * used and requests are sent after an RDMA read or atomic
875 * is sent but before the response is received.
877 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
878 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
879 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
880 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
881 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
883 * The last valid PSN seen is the previous
886 update_last_psn(qp, wqe->psn - 1);
887 /* Retry this request. */
888 ipath_restart_rc(qp, wqe->psn, &wc);
890 * No need to process the ACK/NAK since we are
891 * restarting an earlier request.
895 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
896 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
897 *(u64 *) wqe->sg_list[0].vaddr = val;
898 if (qp->s_num_rd_atomic &&
899 (wqe->wr.opcode == IB_WR_RDMA_READ ||
900 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
901 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
902 qp->s_num_rd_atomic--;
903 /* Restart sending task if fence is complete */
904 if ((qp->s_flags & IPATH_S_FENCE_PENDING) &&
905 !qp->s_num_rd_atomic) {
906 qp->s_flags &= ~IPATH_S_FENCE_PENDING;
907 tasklet_hi_schedule(&qp->s_task);
908 } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) {
909 qp->s_flags &= ~IPATH_S_RDMAR_PENDING;
910 tasklet_hi_schedule(&qp->s_task);
913 /* Post a send completion queue entry if requested. */
914 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
915 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
916 wc.wr_id = wqe->wr.wr_id;
917 wc.status = IB_WC_SUCCESS;
918 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
920 wc.byte_len = wqe->length;
923 wc.src_qp = qp->remote_qpn;
926 wc.slid = qp->remote_ah_attr.dlid;
927 wc.sl = qp->remote_ah_attr.sl;
928 wc.dlid_path_bits = 0;
930 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
932 qp->s_retry = qp->s_retry_cnt;
934 * If we are completing a request which is in the process of
935 * being resent, we can stop resending it since we know the
936 * responder has already seen it.
938 if (qp->s_last == qp->s_cur) {
939 if (++qp->s_cur >= qp->s_size)
941 qp->s_last = qp->s_cur;
942 if (qp->s_last == qp->s_tail)
944 wqe = get_swqe_ptr(qp, qp->s_cur);
945 qp->s_state = OP(SEND_LAST);
946 qp->s_psn = wqe->psn;
948 if (++qp->s_last >= qp->s_size)
950 if (qp->s_last == qp->s_tail)
952 wqe = get_swqe_ptr(qp, qp->s_last);
956 switch (aeth >> 29) {
959 /* If this is a partial ACK, reset the retransmit timer. */
960 if (qp->s_last != qp->s_tail) {
961 spin_lock(&dev->pending_lock);
962 if (list_empty(&qp->timerwait))
963 list_add_tail(&qp->timerwait,
964 &dev->pending[dev->pending_index]);
965 spin_unlock(&dev->pending_lock);
967 * If we get a partial ACK for a resent operation,
968 * we can stop resending the earlier packets and
969 * continue with the next packet the receiver wants.
971 if (ipath_cmp24(qp->s_psn, psn) <= 0) {
972 reset_psn(qp, psn + 1);
973 tasklet_hi_schedule(&qp->s_task);
975 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
976 qp->s_state = OP(SEND_LAST);
979 ipath_get_credit(qp, aeth);
980 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
981 qp->s_retry = qp->s_retry_cnt;
982 update_last_psn(qp, psn);
986 case 1: /* RNR NAK */
988 if (qp->s_last == qp->s_tail)
990 if (qp->s_rnr_retry == 0) {
991 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
994 if (qp->s_rnr_retry_cnt < 7)
997 /* The last valid PSN is the previous PSN. */
998 update_last_psn(qp, psn - 1);
1000 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1001 dev->n_rc_resends++;
1003 dev->n_rc_resends +=
1004 (qp->s_psn - psn) & IPATH_PSN_MASK;
1009 ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
1010 IPATH_AETH_CREDIT_MASK];
1011 ipath_insert_rnr_queue(qp);
1015 if (qp->s_last == qp->s_tail)
1017 /* The last valid PSN is the previous PSN. */
1018 update_last_psn(qp, psn - 1);
1019 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
1020 IPATH_AETH_CREDIT_MASK) {
1021 case 0: /* PSN sequence error */
1024 * Back up to the responder's expected PSN.
1025 * Note that we might get a NAK in the middle of an
1026 * RDMA READ response which terminates the RDMA
1029 ipath_restart_rc(qp, psn, &wc);
1032 case 1: /* Invalid Request */
1033 wc.status = IB_WC_REM_INV_REQ_ERR;
1034 dev->n_other_naks++;
1037 case 2: /* Remote Access Error */
1038 wc.status = IB_WC_REM_ACCESS_ERR;
1039 dev->n_other_naks++;
1042 case 3: /* Remote Operation Error */
1043 wc.status = IB_WC_REM_OP_ERR;
1044 dev->n_other_naks++;
1046 wc.wr_id = wqe->wr.wr_id;
1047 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1052 wc.src_qp = qp->remote_qpn;
1055 wc.slid = qp->remote_ah_attr.dlid;
1056 wc.sl = qp->remote_ah_attr.sl;
1057 wc.dlid_path_bits = 0;
1059 ipath_sqerror_qp(qp, &wc);
1063 /* Ignore other reserved NAK error codes */
1066 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1069 default: /* 2: reserved */
1071 /* Ignore reserved NAK codes. */
1080 * ipath_rc_rcv_resp - process an incoming RC response packet
1081 * @dev: the device this packet came in on
1082 * @ohdr: the other headers for this packet
1083 * @data: the packet data
1084 * @tlen: the packet length
1085 * @qp: the QP for this packet
1086 * @opcode: the opcode for this packet
1087 * @psn: the packet sequence number for this packet
1088 * @hdrsize: the header length
1089 * @pmtu: the path MTU
1090 * @header_in_data: true if part of the header data is in the data buffer
1092 * This is called from ipath_rc_rcv() to process an incoming RC response
1093 * packet for the given QP.
1094 * Called at interrupt level.
1096 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1097 struct ipath_other_headers *ohdr,
1098 void *data, u32 tlen,
1099 struct ipath_qp *qp,
1101 u32 psn, u32 hdrsize, u32 pmtu,
1104 struct ipath_swqe *wqe;
1105 unsigned long flags;
1112 spin_lock_irqsave(&qp->s_lock, flags);
1114 /* Ignore invalid responses. */
1115 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1118 /* Ignore duplicate responses. */
1119 diff = ipath_cmp24(psn, qp->s_last_psn);
1120 if (unlikely(diff <= 0)) {
1121 /* Update credits for "ghost" ACKs */
1122 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1123 if (!header_in_data)
1124 aeth = be32_to_cpu(ohdr->u.aeth);
1126 aeth = be32_to_cpu(((__be32 *) data)[0]);
1127 data += sizeof(__be32);
1129 if ((aeth >> 29) == 0)
1130 ipath_get_credit(qp, aeth);
1135 if (unlikely(qp->s_last == qp->s_tail))
1137 wqe = get_swqe_ptr(qp, qp->s_last);
1140 case OP(ACKNOWLEDGE):
1141 case OP(ATOMIC_ACKNOWLEDGE):
1142 case OP(RDMA_READ_RESPONSE_FIRST):
1143 if (!header_in_data)
1144 aeth = be32_to_cpu(ohdr->u.aeth);
1146 aeth = be32_to_cpu(((__be32 *) data)[0]);
1147 data += sizeof(__be32);
1149 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1150 if (!header_in_data) {
1151 __be32 *p = ohdr->u.at.atomic_ack_eth;
1153 val = ((u64) be32_to_cpu(p[0]) << 32) |
1156 val = be64_to_cpu(((__be64 *) data)[0]);
1159 if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
1160 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1163 wqe = get_swqe_ptr(qp, qp->s_last);
1164 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1167 * If this is a response to a resent RDMA read, we
1168 * have to be careful to copy the data to the right
1171 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1175 case OP(RDMA_READ_RESPONSE_MIDDLE):
1176 /* no AETH, no ACK */
1177 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1179 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1182 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1185 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1187 if (unlikely(pmtu >= qp->s_rdma_read_len))
1190 /* We got a response so update the timeout. */
1191 spin_lock(&dev->pending_lock);
1192 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1193 list_move_tail(&qp->timerwait,
1194 &dev->pending[dev->pending_index]);
1195 spin_unlock(&dev->pending_lock);
1197 * Update the RDMA receive state but do the copy w/o
1198 * holding the locks and blocking interrupts.
1200 qp->s_rdma_read_len -= pmtu;
1201 update_last_psn(qp, psn);
1202 spin_unlock_irqrestore(&qp->s_lock, flags);
1203 ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
1206 case OP(RDMA_READ_RESPONSE_ONLY):
1207 if (!header_in_data)
1208 aeth = be32_to_cpu(ohdr->u.aeth);
1210 aeth = be32_to_cpu(((__be32 *) data)[0]);
1211 if (!do_rc_ack(qp, aeth, psn, opcode, 0))
1213 /* Get the number of bytes the message was padded by. */
1214 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1216 * Check that the data size is >= 0 && <= pmtu.
1217 * Remember to account for the AETH header (4) and
1220 if (unlikely(tlen < (hdrsize + pad + 8)))
1223 * If this is a response to a resent RDMA read, we
1224 * have to be careful to copy the data to the right
1227 wqe = get_swqe_ptr(qp, qp->s_last);
1228 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1232 case OP(RDMA_READ_RESPONSE_LAST):
1233 /* ACKs READ req. */
1234 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1236 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1239 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1241 /* Get the number of bytes the message was padded by. */
1242 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1244 * Check that the data size is >= 1 && <= pmtu.
1245 * Remember to account for the AETH header (4) and
1248 if (unlikely(tlen <= (hdrsize + pad + 8)))
1251 tlen -= hdrsize + pad + 8;
1252 if (unlikely(tlen != qp->s_rdma_read_len))
1254 if (!header_in_data)
1255 aeth = be32_to_cpu(ohdr->u.aeth);
1257 aeth = be32_to_cpu(((__be32 *) data)[0]);
1258 data += sizeof(__be32);
1260 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
1261 (void) do_rc_ack(qp, aeth, psn,
1262 OP(RDMA_READ_RESPONSE_LAST), 0);
1267 spin_unlock_irqrestore(&qp->s_lock, flags);
1271 wc.status = IB_WC_LOC_QP_OP_ERR;
1275 wc.status = IB_WC_LOC_LEN_ERR;
1277 wc.wr_id = wqe->wr.wr_id;
1278 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1283 wc.src_qp = qp->remote_qpn;
1286 wc.slid = qp->remote_ah_attr.dlid;
1287 wc.sl = qp->remote_ah_attr.sl;
1288 wc.dlid_path_bits = 0;
1290 ipath_sqerror_qp(qp, &wc);
1291 spin_unlock_irqrestore(&qp->s_lock, flags);
1297 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1298 * @dev: the device this packet came in on
1299 * @ohdr: the other headers for this packet
1300 * @data: the packet data
1301 * @qp: the QP for this packet
1302 * @opcode: the opcode for this packet
1303 * @psn: the packet sequence number for this packet
1304 * @diff: the difference between the PSN and the expected PSN
1305 * @header_in_data: true if part of the header data is in the data buffer
1307 * This is called from ipath_rc_rcv() to process an unexpected
1308 * incoming RC packet for the given QP.
1309 * Called at interrupt level.
1310 * Return 1 if no more processing is needed; otherwise return 0 to
1311 * schedule a response to be sent.
1313 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1314 struct ipath_other_headers *ohdr,
1316 struct ipath_qp *qp,
1322 struct ipath_ack_entry *e;
1325 unsigned long flags;
1329 * Packet sequence error.
1330 * A NAK will ACK earlier sends and RDMA writes.
1331 * Don't queue the NAK if we already sent one.
1333 if (!qp->r_nak_state) {
1334 qp->r_nak_state = IB_NAK_PSN_ERROR;
1335 /* Use the expected PSN. */
1336 qp->r_ack_psn = qp->r_psn;
1343 * Handle a duplicate request. Don't re-execute SEND, RDMA
1344 * write or atomic op. Don't NAK errors, just silently drop
1345 * the duplicate request. Note that r_sge, r_len, and
1346 * r_rcv_len may be in use so don't modify them.
1348 * We are supposed to ACK the earliest duplicate PSN but we
1349 * can coalesce an outstanding duplicate ACK. We have to
1350 * send the earliest so that RDMA reads can be restarted at
1351 * the requester's expected PSN.
1353 * First, find where this duplicate PSN falls within the
1354 * ACKs previously sent.
1356 psn &= IPATH_PSN_MASK;
1359 spin_lock_irqsave(&qp->s_lock, flags);
1360 for (i = qp->r_head_ack_queue; ; i = prev) {
1361 if (i == qp->s_tail_ack_queue)
1366 prev = IPATH_MAX_RDMA_ATOMIC;
1367 if (prev == qp->r_head_ack_queue) {
1371 e = &qp->s_ack_queue[prev];
1376 if (ipath_cmp24(psn, e->psn) >= 0) {
1377 if (prev == qp->s_tail_ack_queue)
1383 case OP(RDMA_READ_REQUEST): {
1384 struct ib_reth *reth;
1389 * If we didn't find the RDMA read request in the ack queue,
1390 * or the send tasklet is already backed up to send an
1391 * earlier entry, we can ignore this request.
1393 if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
1395 /* RETH comes after BTH */
1396 if (!header_in_data)
1397 reth = &ohdr->u.rc.reth;
1399 reth = (struct ib_reth *)data;
1400 data += sizeof(*reth);
1403 * Address range must be a subset of the original
1404 * request and start on pmtu boundaries.
1405 * We reuse the old ack_queue slot since the requester
1406 * should not back up and request an earlier PSN for the
1409 offset = ((psn - e->psn) & IPATH_PSN_MASK) *
1410 ib_mtu_enum_to_int(qp->path_mtu);
1411 len = be32_to_cpu(reth->length);
1412 if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
1415 u32 rkey = be32_to_cpu(reth->rkey);
1416 u64 vaddr = be64_to_cpu(reth->vaddr);
1419 ok = ipath_rkey_ok(qp, &e->rdma_sge,
1421 IB_ACCESS_REMOTE_READ);
1425 e->rdma_sge.sg_list = NULL;
1426 e->rdma_sge.num_sge = 0;
1427 e->rdma_sge.sge.mr = NULL;
1428 e->rdma_sge.sge.vaddr = NULL;
1429 e->rdma_sge.sge.length = 0;
1430 e->rdma_sge.sge.sge_length = 0;
1433 qp->s_ack_state = OP(ACKNOWLEDGE);
1434 qp->s_tail_ack_queue = prev;
1438 case OP(COMPARE_SWAP):
1439 case OP(FETCH_ADD): {
1441 * If we didn't find the atomic request in the ack queue
1442 * or the send tasklet is already backed up to send an
1443 * earlier entry, we can ignore this request.
1445 if (!e || e->opcode != (u8) opcode || old_req)
1447 qp->s_ack_state = OP(ACKNOWLEDGE);
1448 qp->s_tail_ack_queue = prev;
1456 * Resend the most recent ACK if this request is
1457 * after all the previous RDMA reads and atomics.
1459 if (i == qp->r_head_ack_queue) {
1460 spin_unlock_irqrestore(&qp->s_lock, flags);
1461 qp->r_nak_state = 0;
1462 qp->r_ack_psn = qp->r_psn - 1;
1466 * Try to send a simple ACK to work around a Mellanox bug
1467 * which doesn't accept a RDMA read response or atomic
1468 * response as an ACK for earlier SENDs or RDMA writes.
1470 if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
1471 !(qp->s_flags & IPATH_S_ACK_PENDING) &&
1472 qp->s_ack_state == OP(ACKNOWLEDGE)) {
1473 spin_unlock_irqrestore(&qp->s_lock, flags);
1474 qp->r_nak_state = 0;
1475 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1479 * Resend the RDMA read or atomic op which
1480 * ACKs this duplicate request.
1482 qp->s_ack_state = OP(ACKNOWLEDGE);
1483 qp->s_tail_ack_queue = i;
1486 qp->r_nak_state = 0;
1487 tasklet_hi_schedule(&qp->s_task);
1490 spin_unlock_irqrestore(&qp->s_lock, flags);
1498 static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1500 unsigned long flags;
1503 spin_lock_irqsave(&qp->s_lock, flags);
1504 qp->state = IB_QPS_ERR;
1505 lastwqe = ipath_error_qp(qp, err);
1506 spin_unlock_irqrestore(&qp->s_lock, flags);
1511 ev.device = qp->ibqp.device;
1512 ev.element.qp = &qp->ibqp;
1513 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1514 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1518 static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
1520 unsigned long flags;
1524 if (next > IPATH_MAX_RDMA_ATOMIC)
1526 spin_lock_irqsave(&qp->s_lock, flags);
1527 if (n == qp->s_tail_ack_queue) {
1528 qp->s_tail_ack_queue = next;
1529 qp->s_ack_state = OP(ACKNOWLEDGE);
1531 spin_unlock_irqrestore(&qp->s_lock, flags);
1535 * ipath_rc_rcv - process an incoming RC packet
1536 * @dev: the device this packet came in on
1537 * @hdr: the header of this packet
1538 * @has_grh: true if the header has a GRH
1539 * @data: the packet data
1540 * @tlen: the packet length
1541 * @qp: the QP for this packet
1543 * This is called from ipath_qp_rcv() to process an incoming RC packet
1545 * Called at interrupt level.
1547 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1548 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1550 struct ipath_other_headers *ohdr;
1556 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1558 struct ib_reth *reth;
1561 /* Validate the SLID. See Ch. 9.6.1.5 */
1562 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
1568 hdrsize = 8 + 12; /* LRH + BTH */
1569 psn = be32_to_cpu(ohdr->bth[2]);
1572 ohdr = &hdr->u.l.oth;
1573 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1575 * The header with GRH is 60 bytes and the core driver sets
1576 * the eager header buffer size to 56 bytes so the last 4
1577 * bytes of the BTH header (PSN) is in the data buffer.
1579 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
1580 if (header_in_data) {
1581 psn = be32_to_cpu(((__be32 *) data)[0]);
1582 data += sizeof(__be32);
1584 psn = be32_to_cpu(ohdr->bth[2]);
1588 * Process responses (ACKs) before anything else. Note that the
1589 * packet sequence number will be for something in the send work
1590 * queue rather than the expected receive packet sequence number.
1591 * In other words, this QP is the requester.
1593 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1594 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1595 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1596 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1597 hdrsize, pmtu, header_in_data);
1601 /* Compute 24 bits worth of difference. */
1602 diff = ipath_cmp24(psn, qp->r_psn);
1603 if (unlikely(diff)) {
1604 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1605 psn, diff, header_in_data))
1610 /* Check for opcode sequence errors. */
1611 switch (qp->r_state) {
1612 case OP(SEND_FIRST):
1613 case OP(SEND_MIDDLE):
1614 if (opcode == OP(SEND_MIDDLE) ||
1615 opcode == OP(SEND_LAST) ||
1616 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1619 ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
1620 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1621 qp->r_ack_psn = qp->r_psn;
1624 case OP(RDMA_WRITE_FIRST):
1625 case OP(RDMA_WRITE_MIDDLE):
1626 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1627 opcode == OP(RDMA_WRITE_LAST) ||
1628 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1633 if (opcode == OP(SEND_MIDDLE) ||
1634 opcode == OP(SEND_LAST) ||
1635 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1636 opcode == OP(RDMA_WRITE_MIDDLE) ||
1637 opcode == OP(RDMA_WRITE_LAST) ||
1638 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1641 * Note that it is up to the requester to not send a new
1642 * RDMA read or atomic operation before receiving an ACK
1643 * for the previous operation.
1651 /* OK, process the packet. */
1653 case OP(SEND_FIRST):
1654 if (!ipath_get_rwqe(qp, 0)) {
1657 * A RNR NAK will ACK earlier sends and RDMA writes.
1658 * Don't queue the NAK if a RDMA read or atomic
1659 * is pending though.
1661 if (qp->r_nak_state)
1663 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1664 qp->r_ack_psn = qp->r_psn;
1669 case OP(SEND_MIDDLE):
1670 case OP(RDMA_WRITE_MIDDLE):
1672 /* Check for invalid length PMTU or posted rwqe len. */
1673 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1675 qp->r_rcv_len += pmtu;
1676 if (unlikely(qp->r_rcv_len > qp->r_len))
1678 ipath_copy_sge(&qp->r_sge, data, pmtu);
1681 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1683 if (!ipath_get_rwqe(qp, 1))
1688 case OP(SEND_ONLY_WITH_IMMEDIATE):
1689 if (!ipath_get_rwqe(qp, 0))
1692 if (opcode == OP(SEND_ONLY))
1695 case OP(SEND_LAST_WITH_IMMEDIATE):
1697 if (header_in_data) {
1698 wc.imm_data = *(__be32 *) data;
1699 data += sizeof(__be32);
1701 /* Immediate data comes after BTH */
1702 wc.imm_data = ohdr->u.imm_data;
1705 wc.wc_flags = IB_WC_WITH_IMM;
1708 case OP(RDMA_WRITE_LAST):
1710 /* Get the number of bytes the message was padded by. */
1711 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1712 /* Check for invalid length. */
1713 /* XXX LAST len should be >= 1 */
1714 if (unlikely(tlen < (hdrsize + pad + 4)))
1716 /* Don't count the CRC. */
1717 tlen -= (hdrsize + pad + 4);
1718 wc.byte_len = tlen + qp->r_rcv_len;
1719 if (unlikely(wc.byte_len > qp->r_len))
1721 ipath_copy_sge(&qp->r_sge, data, tlen);
1723 if (!qp->r_wrid_valid)
1725 qp->r_wrid_valid = 0;
1726 wc.wr_id = qp->r_wr_id;
1727 wc.status = IB_WC_SUCCESS;
1728 wc.opcode = IB_WC_RECV;
1731 wc.src_qp = qp->remote_qpn;
1733 wc.slid = qp->remote_ah_attr.dlid;
1734 wc.sl = qp->remote_ah_attr.sl;
1735 wc.dlid_path_bits = 0;
1737 /* Signal completion event if the solicited bit is set. */
1738 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1740 __constant_cpu_to_be32(1 << 23)) != 0);
1743 case OP(RDMA_WRITE_FIRST):
1744 case OP(RDMA_WRITE_ONLY):
1745 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1746 if (unlikely(!(qp->qp_access_flags &
1747 IB_ACCESS_REMOTE_WRITE)))
1750 /* RETH comes after BTH */
1751 if (!header_in_data)
1752 reth = &ohdr->u.rc.reth;
1754 reth = (struct ib_reth *)data;
1755 data += sizeof(*reth);
1757 hdrsize += sizeof(*reth);
1758 qp->r_len = be32_to_cpu(reth->length);
1760 if (qp->r_len != 0) {
1761 u32 rkey = be32_to_cpu(reth->rkey);
1762 u64 vaddr = be64_to_cpu(reth->vaddr);
1765 /* Check rkey & NAK */
1766 ok = ipath_rkey_ok(qp, &qp->r_sge,
1767 qp->r_len, vaddr, rkey,
1768 IB_ACCESS_REMOTE_WRITE);
1772 qp->r_sge.sg_list = NULL;
1773 qp->r_sge.sge.mr = NULL;
1774 qp->r_sge.sge.vaddr = NULL;
1775 qp->r_sge.sge.length = 0;
1776 qp->r_sge.sge.sge_length = 0;
1778 if (opcode == OP(RDMA_WRITE_FIRST))
1780 else if (opcode == OP(RDMA_WRITE_ONLY))
1782 if (!ipath_get_rwqe(qp, 1))
1786 case OP(RDMA_READ_REQUEST): {
1787 struct ipath_ack_entry *e;
1791 if (unlikely(!(qp->qp_access_flags &
1792 IB_ACCESS_REMOTE_READ)))
1794 next = qp->r_head_ack_queue + 1;
1795 if (next > IPATH_MAX_RDMA_ATOMIC)
1797 if (unlikely(next == qp->s_tail_ack_queue)) {
1798 if (!qp->s_ack_queue[next].sent)
1800 ipath_update_ack_queue(qp, next);
1802 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1803 /* RETH comes after BTH */
1804 if (!header_in_data)
1805 reth = &ohdr->u.rc.reth;
1807 reth = (struct ib_reth *)data;
1808 data += sizeof(*reth);
1810 len = be32_to_cpu(reth->length);
1812 u32 rkey = be32_to_cpu(reth->rkey);
1813 u64 vaddr = be64_to_cpu(reth->vaddr);
1816 /* Check rkey & NAK */
1817 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1818 rkey, IB_ACCESS_REMOTE_READ);
1822 * Update the next expected PSN. We add 1 later
1823 * below, so only add the remainder here.
1826 qp->r_psn += (len - 1) / pmtu;
1828 e->rdma_sge.sg_list = NULL;
1829 e->rdma_sge.num_sge = 0;
1830 e->rdma_sge.sge.mr = NULL;
1831 e->rdma_sge.sge.vaddr = NULL;
1832 e->rdma_sge.sge.length = 0;
1833 e->rdma_sge.sge.sge_length = 0;
1839 * We need to increment the MSN here instead of when we
1840 * finish sending the result since a duplicate request would
1841 * increment it more than once.
1845 qp->r_state = opcode;
1846 qp->r_nak_state = 0;
1848 qp->r_head_ack_queue = next;
1850 /* Call ipath_do_rc_send() in another thread. */
1851 tasklet_hi_schedule(&qp->s_task);
1856 case OP(COMPARE_SWAP):
1857 case OP(FETCH_ADD): {
1858 struct ib_atomic_eth *ateth;
1859 struct ipath_ack_entry *e;
1866 if (unlikely(!(qp->qp_access_flags &
1867 IB_ACCESS_REMOTE_ATOMIC)))
1869 next = qp->r_head_ack_queue + 1;
1870 if (next > IPATH_MAX_RDMA_ATOMIC)
1872 if (unlikely(next == qp->s_tail_ack_queue)) {
1873 if (!qp->s_ack_queue[next].sent)
1875 ipath_update_ack_queue(qp, next);
1877 if (!header_in_data)
1878 ateth = &ohdr->u.atomic_eth;
1880 ateth = (struct ib_atomic_eth *)data;
1881 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
1882 be32_to_cpu(ateth->vaddr[1]);
1883 if (unlikely(vaddr & (sizeof(u64) - 1)))
1885 rkey = be32_to_cpu(ateth->rkey);
1886 /* Check rkey & NAK */
1887 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
1888 sizeof(u64), vaddr, rkey,
1889 IB_ACCESS_REMOTE_ATOMIC)))
1891 /* Perform atomic OP and save result. */
1892 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
1893 sdata = be64_to_cpu(ateth->swap_data);
1894 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1895 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
1896 (u64) atomic64_add_return(sdata, maddr) - sdata :
1897 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
1898 be64_to_cpu(ateth->compare_data),
1902 e->psn = psn & IPATH_PSN_MASK;
1905 qp->r_state = opcode;
1906 qp->r_nak_state = 0;
1908 qp->r_head_ack_queue = next;
1910 /* Call ipath_do_rc_send() in another thread. */
1911 tasklet_hi_schedule(&qp->s_task);
1917 /* NAK unknown opcodes. */
1921 qp->r_state = opcode;
1922 qp->r_ack_psn = psn;
1923 qp->r_nak_state = 0;
1924 /* Send an ACK if requested or required. */
1925 if (psn & (1 << 31))
1930 ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
1931 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1932 qp->r_ack_psn = qp->r_psn;