Merge ../linux-2.6
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_rc.c
1 /*
2  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "ipath_verbs.h"
34 #include "ips_common.h"
35
36 /* cut down ridiculously long IB macro names */
37 #define OP(x) IB_OPCODE_RC_##x
38
39 /**
40  * ipath_init_restart- initialize the qp->s_sge after a restart
41  * @qp: the QP who's SGE we're restarting
42  * @wqe: the work queue to initialize the QP's SGE from
43  *
44  * The QP s_lock should be held.
45  */
46 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
47 {
48         struct ipath_ibdev *dev;
49         u32 len;
50
51         len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
52                 ib_mtu_enum_to_int(qp->path_mtu);
53         qp->s_sge.sge = wqe->sg_list[0];
54         qp->s_sge.sg_list = wqe->sg_list + 1;
55         qp->s_sge.num_sge = wqe->wr.num_sge;
56         ipath_skip_sge(&qp->s_sge, len);
57         qp->s_len = wqe->length - len;
58         dev = to_idev(qp->ibqp.device);
59         spin_lock(&dev->pending_lock);
60         if (list_empty(&qp->timerwait))
61                 list_add_tail(&qp->timerwait,
62                               &dev->pending[dev->pending_index]);
63         spin_unlock(&dev->pending_lock);
64 }
65
66 /**
67  * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
68  * @qp: a pointer to the QP
69  * @ohdr: a pointer to the IB header being constructed
70  * @pmtu: the path MTU
71  *
72  * Return bth0 if constructed; otherwise, return 0.
73  * Note the QP s_lock must be held.
74  */
75 static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
76                                     struct ipath_other_headers *ohdr,
77                                     u32 pmtu)
78 {
79         struct ipath_sge_state *ss;
80         u32 hwords;
81         u32 len;
82         u32 bth0;
83
84         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
85         hwords = 5;
86
87         /*
88          * Send a response.  Note that we are in the responder's
89          * side of the QP context.
90          */
91         switch (qp->s_ack_state) {
92         case OP(RDMA_READ_REQUEST):
93                 ss = &qp->s_rdma_sge;
94                 len = qp->s_rdma_len;
95                 if (len > pmtu) {
96                         len = pmtu;
97                         qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
98                 }
99                 else
100                         qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
101                 qp->s_rdma_len -= len;
102                 bth0 = qp->s_ack_state << 24;
103                 ohdr->u.aeth = ipath_compute_aeth(qp);
104                 hwords++;
105                 break;
106
107         case OP(RDMA_READ_RESPONSE_FIRST):
108                 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
109                 /* FALLTHROUGH */
110         case OP(RDMA_READ_RESPONSE_MIDDLE):
111                 ss = &qp->s_rdma_sge;
112                 len = qp->s_rdma_len;
113                 if (len > pmtu)
114                         len = pmtu;
115                 else {
116                         ohdr->u.aeth = ipath_compute_aeth(qp);
117                         hwords++;
118                         qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
119                 }
120                 qp->s_rdma_len -= len;
121                 bth0 = qp->s_ack_state << 24;
122                 break;
123
124         case OP(RDMA_READ_RESPONSE_LAST):
125         case OP(RDMA_READ_RESPONSE_ONLY):
126                 /*
127                  * We have to prevent new requests from changing
128                  * the r_sge state while a ipath_verbs_send()
129                  * is in progress.
130                  * Changing r_state allows the receiver
131                  * to continue processing new packets.
132                  * We do it here now instead of above so
133                  * that we are sure the packet was sent before
134                  * changing the state.
135                  */
136                 qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
137                 qp->s_ack_state = OP(ACKNOWLEDGE);
138                 return 0;
139
140         case OP(COMPARE_SWAP):
141         case OP(FETCH_ADD):
142                 ss = NULL;
143                 len = 0;
144                 qp->r_state = OP(SEND_LAST);
145                 qp->s_ack_state = OP(ACKNOWLEDGE);
146                 bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
147                 ohdr->u.at.aeth = ipath_compute_aeth(qp);
148                 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
149                 hwords += sizeof(ohdr->u.at) / 4;
150                 break;
151
152         default:
153                 /* Send a regular ACK. */
154                 ss = NULL;
155                 len = 0;
156                 qp->s_ack_state = OP(ACKNOWLEDGE);
157                 bth0 = qp->s_ack_state << 24;
158                 ohdr->u.aeth = ipath_compute_aeth(qp);
159                 hwords++;
160         }
161         qp->s_hdrwords = hwords;
162         qp->s_cur_sge = ss;
163         qp->s_cur_size = len;
164
165         return bth0;
166 }
167
168 /**
169  * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
170  * @qp: a pointer to the QP
171  * @ohdr: a pointer to the IB header being constructed
172  * @pmtu: the path MTU
173  * @bth0p: pointer to the BTH opcode word
174  * @bth2p: pointer to the BTH PSN word
175  *
176  * Return 1 if constructed; otherwise, return 0.
177  * Note the QP s_lock must be held.
178  */
179 static inline int ipath_make_rc_req(struct ipath_qp *qp,
180                                     struct ipath_other_headers *ohdr,
181                                     u32 pmtu, u32 *bth0p, u32 *bth2p)
182 {
183         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
184         struct ipath_sge_state *ss;
185         struct ipath_swqe *wqe;
186         u32 hwords;
187         u32 len;
188         u32 bth0;
189         u32 bth2;
190         char newreq;
191
192         if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
193             qp->s_rnr_timeout)
194                 goto done;
195
196         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
197         hwords = 5;
198         bth0 = 0;
199
200         /* Send a request. */
201         wqe = get_swqe_ptr(qp, qp->s_cur);
202         switch (qp->s_state) {
203         default:
204                 /*
205                  * Resend an old request or start a new one.
206                  *
207                  * We keep track of the current SWQE so that
208                  * we don't reset the "furthest progress" state
209                  * if we need to back up.
210                  */
211                 newreq = 0;
212                 if (qp->s_cur == qp->s_tail) {
213                         /* Check if send work queue is empty. */
214                         if (qp->s_tail == qp->s_head)
215                                 goto done;
216                         qp->s_psn = wqe->psn = qp->s_next_psn;
217                         newreq = 1;
218                 }
219                 /*
220                  * Note that we have to be careful not to modify the
221                  * original work request since we may need to resend
222                  * it.
223                  */
224                 qp->s_sge.sge = wqe->sg_list[0];
225                 qp->s_sge.sg_list = wqe->sg_list + 1;
226                 qp->s_sge.num_sge = wqe->wr.num_sge;
227                 qp->s_len = len = wqe->length;
228                 ss = &qp->s_sge;
229                 bth2 = 0;
230                 switch (wqe->wr.opcode) {
231                 case IB_WR_SEND:
232                 case IB_WR_SEND_WITH_IMM:
233                         /* If no credit, return. */
234                         if (qp->s_lsn != (u32) -1 &&
235                             ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
236                                 goto done;
237                         wqe->lpsn = wqe->psn;
238                         if (len > pmtu) {
239                                 wqe->lpsn += (len - 1) / pmtu;
240                                 qp->s_state = OP(SEND_FIRST);
241                                 len = pmtu;
242                                 break;
243                         }
244                         if (wqe->wr.opcode == IB_WR_SEND)
245                                 qp->s_state = OP(SEND_ONLY);
246                         else {
247                                 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
248                                 /* Immediate data comes after the BTH */
249                                 ohdr->u.imm_data = wqe->wr.imm_data;
250                                 hwords += 1;
251                         }
252                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
253                                 bth0 |= 1 << 23;
254                         bth2 = 1 << 31; /* Request ACK. */
255                         if (++qp->s_cur == qp->s_size)
256                                 qp->s_cur = 0;
257                         break;
258
259                 case IB_WR_RDMA_WRITE:
260                         if (newreq)
261                                 qp->s_lsn++;
262                         /* FALLTHROUGH */
263                 case IB_WR_RDMA_WRITE_WITH_IMM:
264                         /* If no credit, return. */
265                         if (qp->s_lsn != (u32) -1 &&
266                             ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
267                                 goto done;
268                         ohdr->u.rc.reth.vaddr =
269                                 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
270                         ohdr->u.rc.reth.rkey =
271                                 cpu_to_be32(wqe->wr.wr.rdma.rkey);
272                         ohdr->u.rc.reth.length = cpu_to_be32(len);
273                         hwords += sizeof(struct ib_reth) / 4;
274                         wqe->lpsn = wqe->psn;
275                         if (len > pmtu) {
276                                 wqe->lpsn += (len - 1) / pmtu;
277                                 qp->s_state = OP(RDMA_WRITE_FIRST);
278                                 len = pmtu;
279                                 break;
280                         }
281                         if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
282                                 qp->s_state = OP(RDMA_WRITE_ONLY);
283                         else {
284                                 qp->s_state =
285                                         OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
286                                 /* Immediate data comes
287                                  * after RETH */
288                                 ohdr->u.rc.imm_data = wqe->wr.imm_data;
289                                 hwords += 1;
290                                 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
291                                         bth0 |= 1 << 23;
292                         }
293                         bth2 = 1 << 31; /* Request ACK. */
294                         if (++qp->s_cur == qp->s_size)
295                                 qp->s_cur = 0;
296                         break;
297
298                 case IB_WR_RDMA_READ:
299                         ohdr->u.rc.reth.vaddr =
300                                 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
301                         ohdr->u.rc.reth.rkey =
302                                 cpu_to_be32(wqe->wr.wr.rdma.rkey);
303                         ohdr->u.rc.reth.length = cpu_to_be32(len);
304                         qp->s_state = OP(RDMA_READ_REQUEST);
305                         hwords += sizeof(ohdr->u.rc.reth) / 4;
306                         if (newreq) {
307                                 qp->s_lsn++;
308                                 /*
309                                  * Adjust s_next_psn to count the
310                                  * expected number of responses.
311                                  */
312                                 if (len > pmtu)
313                                         qp->s_next_psn += (len - 1) / pmtu;
314                                 wqe->lpsn = qp->s_next_psn++;
315                         }
316                         ss = NULL;
317                         len = 0;
318                         if (++qp->s_cur == qp->s_size)
319                                 qp->s_cur = 0;
320                         break;
321
322                 case IB_WR_ATOMIC_CMP_AND_SWP:
323                 case IB_WR_ATOMIC_FETCH_AND_ADD:
324                         if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
325                                 qp->s_state = OP(COMPARE_SWAP);
326                         else
327                                 qp->s_state = OP(FETCH_ADD);
328                         ohdr->u.atomic_eth.vaddr = cpu_to_be64(
329                                 wqe->wr.wr.atomic.remote_addr);
330                         ohdr->u.atomic_eth.rkey = cpu_to_be32(
331                                 wqe->wr.wr.atomic.rkey);
332                         ohdr->u.atomic_eth.swap_data = cpu_to_be64(
333                                 wqe->wr.wr.atomic.swap);
334                         ohdr->u.atomic_eth.compare_data = cpu_to_be64(
335                                 wqe->wr.wr.atomic.compare_add);
336                         hwords += sizeof(struct ib_atomic_eth) / 4;
337                         if (newreq) {
338                                 qp->s_lsn++;
339                                 wqe->lpsn = wqe->psn;
340                         }
341                         if (++qp->s_cur == qp->s_size)
342                                 qp->s_cur = 0;
343                         ss = NULL;
344                         len = 0;
345                         break;
346
347                 default:
348                         goto done;
349                 }
350                 if (newreq) {
351                         qp->s_tail++;
352                         if (qp->s_tail >= qp->s_size)
353                                 qp->s_tail = 0;
354                 }
355                 bth2 |= qp->s_psn++ & IPS_PSN_MASK;
356                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
357                         qp->s_next_psn = qp->s_psn;
358                 spin_lock(&dev->pending_lock);
359                 if (list_empty(&qp->timerwait))
360                         list_add_tail(&qp->timerwait,
361                                       &dev->pending[dev->pending_index]);
362                 spin_unlock(&dev->pending_lock);
363                 break;
364
365         case OP(RDMA_READ_RESPONSE_FIRST):
366                 /*
367                  * This case can only happen if a send is restarted.  See
368                  * ipath_restart_rc().
369                  */
370                 ipath_init_restart(qp, wqe);
371                 /* FALLTHROUGH */
372         case OP(SEND_FIRST):
373                 qp->s_state = OP(SEND_MIDDLE);
374                 /* FALLTHROUGH */
375         case OP(SEND_MIDDLE):
376                 bth2 = qp->s_psn++ & IPS_PSN_MASK;
377                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
378                         qp->s_next_psn = qp->s_psn;
379                 ss = &qp->s_sge;
380                 len = qp->s_len;
381                 if (len > pmtu) {
382                         /*
383                          * Request an ACK every 1/2 MB to avoid retransmit
384                          * timeouts.
385                          */
386                         if (((wqe->length - len) % (512 * 1024)) == 0)
387                                 bth2 |= 1 << 31;
388                         len = pmtu;
389                         break;
390                 }
391                 if (wqe->wr.opcode == IB_WR_SEND)
392                         qp->s_state = OP(SEND_LAST);
393                 else {
394                         qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
395                         /* Immediate data comes after the BTH */
396                         ohdr->u.imm_data = wqe->wr.imm_data;
397                         hwords += 1;
398                 }
399                 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
400                         bth0 |= 1 << 23;
401                 bth2 |= 1 << 31;        /* Request ACK. */
402                 qp->s_cur++;
403                 if (qp->s_cur >= qp->s_size)
404                         qp->s_cur = 0;
405                 break;
406
407         case OP(RDMA_READ_RESPONSE_LAST):
408                 /*
409                  * This case can only happen if a RDMA write is restarted.
410                  * See ipath_restart_rc().
411                  */
412                 ipath_init_restart(qp, wqe);
413                 /* FALLTHROUGH */
414         case OP(RDMA_WRITE_FIRST):
415                 qp->s_state = OP(RDMA_WRITE_MIDDLE);
416                 /* FALLTHROUGH */
417         case OP(RDMA_WRITE_MIDDLE):
418                 bth2 = qp->s_psn++ & IPS_PSN_MASK;
419                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
420                         qp->s_next_psn = qp->s_psn;
421                 ss = &qp->s_sge;
422                 len = qp->s_len;
423                 if (len > pmtu) {
424                         /*
425                          * Request an ACK every 1/2 MB to avoid retransmit
426                          * timeouts.
427                          */
428                         if (((wqe->length - len) % (512 * 1024)) == 0)
429                                 bth2 |= 1 << 31;
430                         len = pmtu;
431                         break;
432                 }
433                 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
434                         qp->s_state = OP(RDMA_WRITE_LAST);
435                 else {
436                         qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
437                         /* Immediate data comes after the BTH */
438                         ohdr->u.imm_data = wqe->wr.imm_data;
439                         hwords += 1;
440                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
441                                 bth0 |= 1 << 23;
442                 }
443                 bth2 |= 1 << 31;        /* Request ACK. */
444                 qp->s_cur++;
445                 if (qp->s_cur >= qp->s_size)
446                         qp->s_cur = 0;
447                 break;
448
449         case OP(RDMA_READ_RESPONSE_MIDDLE):
450                 /*
451                  * This case can only happen if a RDMA read is restarted.
452                  * See ipath_restart_rc().
453                  */
454                 ipath_init_restart(qp, wqe);
455                 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
456                 ohdr->u.rc.reth.vaddr =
457                         cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
458                 ohdr->u.rc.reth.rkey =
459                         cpu_to_be32(wqe->wr.wr.rdma.rkey);
460                 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
461                 qp->s_state = OP(RDMA_READ_REQUEST);
462                 hwords += sizeof(ohdr->u.rc.reth) / 4;
463                 bth2 = qp->s_psn++ & IPS_PSN_MASK;
464                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
465                         qp->s_next_psn = qp->s_psn;
466                 ss = NULL;
467                 len = 0;
468                 qp->s_cur++;
469                 if (qp->s_cur == qp->s_size)
470                         qp->s_cur = 0;
471                 break;
472
473         case OP(RDMA_READ_REQUEST):
474         case OP(COMPARE_SWAP):
475         case OP(FETCH_ADD):
476                 /*
477                  * We shouldn't start anything new until this request is
478                  * finished.  The ACK will handle rescheduling us.  XXX The
479                  * number of outstanding ones is negotiated at connection
480                  * setup time (see pg. 258,289)?  XXX Also, if we support
481                  * multiple outstanding requests, we need to check the WQE
482                  * IB_SEND_FENCE flag and not send a new request if a RDMA
483                  * read or atomic is pending.
484                  */
485                 goto done;
486         }
487         qp->s_len -= len;
488         qp->s_hdrwords = hwords;
489         qp->s_cur_sge = ss;
490         qp->s_cur_size = len;
491         *bth0p = bth0 | (qp->s_state << 24);
492         *bth2p = bth2;
493         return 1;
494
495 done:
496         return 0;
497 }
498
499 static inline void ipath_make_rc_grh(struct ipath_qp *qp,
500                                      struct ib_global_route *grh,
501                                      u32 nwords)
502 {
503         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
504
505         /* GRH header size in 32-bit words. */
506         qp->s_hdrwords += 10;
507         qp->s_hdr.u.l.grh.version_tclass_flow =
508                 cpu_to_be32((6 << 28) |
509                             (grh->traffic_class << 20) |
510                             grh->flow_label);
511         qp->s_hdr.u.l.grh.paylen =
512                 cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
513                              SIZE_OF_CRC) << 2);
514         /* next_hdr is defined by C8-7 in ch. 8.4.1 */
515         qp->s_hdr.u.l.grh.next_hdr = 0x1B;
516         qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
517         /* The SGID is 32-bit aligned. */
518         qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
519         qp->s_hdr.u.l.grh.sgid.global.interface_id =
520                 ipath_layer_get_guid(dev->dd);
521         qp->s_hdr.u.l.grh.dgid = grh->dgid;
522 }
523
524 /**
525  * ipath_do_rc_send - perform a send on an RC QP
526  * @data: contains a pointer to the QP
527  *
528  * Process entries in the send work queue until credit or queue is
529  * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
530  * Otherwise, after we drop the QP s_lock, two threads could send
531  * packets out of order.
532  */
533 void ipath_do_rc_send(unsigned long data)
534 {
535         struct ipath_qp *qp = (struct ipath_qp *)data;
536         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
537         unsigned long flags;
538         u16 lrh0;
539         u32 nwords;
540         u32 extra_bytes;
541         u32 bth0;
542         u32 bth2;
543         u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
544         struct ipath_other_headers *ohdr;
545
546         if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
547                 goto bail;
548
549         if (unlikely(qp->remote_ah_attr.dlid ==
550                      ipath_layer_get_lid(dev->dd))) {
551                 struct ib_wc wc;
552
553                 /*
554                  * Pass in an uninitialized ib_wc to be consistent with
555                  * other places where ipath_ruc_loopback() is called.
556                  */
557                 ipath_ruc_loopback(qp, &wc);
558                 goto clear;
559         }
560
561         ohdr = &qp->s_hdr.u.oth;
562         if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
563                 ohdr = &qp->s_hdr.u.l.oth;
564
565 again:
566         /* Check for a constructed packet to be sent. */
567         if (qp->s_hdrwords != 0) {
568                 /*
569                  * If no PIO bufs are available, return.  An interrupt will
570                  * call ipath_ib_piobufavail() when one is available.
571                  */
572                 _VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
573                 _VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
574                             qp->s_cur_sge->sg_list,
575                             qp->s_cur_sge->num_sge,
576                             qp->s_cur_sge->sge.vaddr,
577                             qp->s_cur_sge->sge.sge_length,
578                             qp->s_cur_sge->sge.length,
579                             qp->s_cur_sge->sge.m,
580                             qp->s_cur_sge->sge.n);
581                 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
582                                      (u32 *) &qp->s_hdr, qp->s_cur_size,
583                                      qp->s_cur_sge)) {
584                         ipath_no_bufs_available(qp, dev);
585                         goto bail;
586                 }
587                 dev->n_unicast_xmit++;
588                 /* Record that we sent the packet and s_hdr is empty. */
589                 qp->s_hdrwords = 0;
590         }
591
592         /*
593          * The lock is needed to synchronize between setting
594          * qp->s_ack_state, resend timer, and post_send().
595          */
596         spin_lock_irqsave(&qp->s_lock, flags);
597
598         /* Sending responses has higher priority over sending requests. */
599         if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
600             (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
601                 bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
602         else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
603                 goto done;
604
605         spin_unlock_irqrestore(&qp->s_lock, flags);
606
607         /* Construct the header. */
608         extra_bytes = (4 - qp->s_cur_size) & 3;
609         nwords = (qp->s_cur_size + extra_bytes) >> 2;
610         lrh0 = IPS_LRH_BTH;
611         if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
612                 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
613                 lrh0 = IPS_LRH_GRH;
614         }
615         lrh0 |= qp->remote_ah_attr.sl << 4;
616         qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
617         qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
618         qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
619                                        SIZE_OF_CRC);
620         qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
621         bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
622         bth0 |= extra_bytes << 20;
623         ohdr->bth[0] = cpu_to_be32(bth0);
624         ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
625         ohdr->bth[2] = cpu_to_be32(bth2);
626
627         /* Check for more work to do. */
628         goto again;
629
630 done:
631         spin_unlock_irqrestore(&qp->s_lock, flags);
632 clear:
633         clear_bit(IPATH_S_BUSY, &qp->s_flags);
634 bail:
635         return;
636 }
637
638 static void send_rc_ack(struct ipath_qp *qp)
639 {
640         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
641         u16 lrh0;
642         u32 bth0;
643         struct ipath_other_headers *ohdr;
644
645         /* Construct the header. */
646         ohdr = &qp->s_hdr.u.oth;
647         lrh0 = IPS_LRH_BTH;
648         /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
649         qp->s_hdrwords = 6;
650         if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
651                 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0);
652                 ohdr = &qp->s_hdr.u.l.oth;
653                 lrh0 = IPS_LRH_GRH;
654         }
655         bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
656         ohdr->u.aeth = ipath_compute_aeth(qp);
657         if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
658                 bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
659                 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
660                 qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
661         }
662         else
663                 bth0 |= OP(ACKNOWLEDGE) << 24;
664         lrh0 |= qp->remote_ah_attr.sl << 4;
665         qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
666         qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
667         qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC);
668         qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
669         ohdr->bth[0] = cpu_to_be32(bth0);
670         ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
671         ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
672
673         /*
674          * If we can send the ACK, clear the ACK state.
675          */
676         if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr,
677                              0, NULL) == 0) {
678                 qp->s_ack_state = OP(ACKNOWLEDGE);
679                 dev->n_rc_qacks++;
680                 dev->n_unicast_xmit++;
681         }
682 }
683
684 /**
685  * ipath_restart_rc - back up requester to resend the last un-ACKed request
686  * @qp: the QP to restart
687  * @psn: packet sequence number for the request
688  * @wc: the work completion request
689  *
690  * The QP s_lock should be held.
691  */
692 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
693 {
694         struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
695         struct ipath_ibdev *dev;
696         u32 n;
697
698         /*
699          * If there are no requests pending, we are done.
700          */
701         if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
702             qp->s_last == qp->s_tail)
703                 goto done;
704
705         if (qp->s_retry == 0) {
706                 wc->wr_id = wqe->wr.wr_id;
707                 wc->status = IB_WC_RETRY_EXC_ERR;
708                 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
709                 wc->vendor_err = 0;
710                 wc->byte_len = 0;
711                 wc->qp_num = qp->ibqp.qp_num;
712                 wc->src_qp = qp->remote_qpn;
713                 wc->pkey_index = 0;
714                 wc->slid = qp->remote_ah_attr.dlid;
715                 wc->sl = qp->remote_ah_attr.sl;
716                 wc->dlid_path_bits = 0;
717                 wc->port_num = 0;
718                 ipath_sqerror_qp(qp, wc);
719                 goto bail;
720         }
721         qp->s_retry--;
722
723         /*
724          * Remove the QP from the timeout queue.
725          * Note: it may already have been removed by ipath_ib_timer().
726          */
727         dev = to_idev(qp->ibqp.device);
728         spin_lock(&dev->pending_lock);
729         if (!list_empty(&qp->timerwait))
730                 list_del_init(&qp->timerwait);
731         spin_unlock(&dev->pending_lock);
732
733         if (wqe->wr.opcode == IB_WR_RDMA_READ)
734                 dev->n_rc_resends++;
735         else
736                 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
737
738         /*
739          * If we are starting the request from the beginning, let the normal
740          * send code handle initialization.
741          */
742         qp->s_cur = qp->s_last;
743         if (ipath_cmp24(psn, wqe->psn) <= 0) {
744                 qp->s_state = OP(SEND_LAST);
745                 qp->s_psn = wqe->psn;
746         } else {
747                 n = qp->s_cur;
748                 for (;;) {
749                         if (++n == qp->s_size)
750                                 n = 0;
751                         if (n == qp->s_tail) {
752                                 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
753                                         qp->s_cur = n;
754                                         wqe = get_swqe_ptr(qp, n);
755                                 }
756                                 break;
757                         }
758                         wqe = get_swqe_ptr(qp, n);
759                         if (ipath_cmp24(psn, wqe->psn) < 0)
760                                 break;
761                         qp->s_cur = n;
762                 }
763                 qp->s_psn = psn;
764
765                 /*
766                  * Reset the state to restart in the middle of a request.
767                  * Don't change the s_sge, s_cur_sge, or s_cur_size.
768                  * See ipath_do_rc_send().
769                  */
770                 switch (wqe->wr.opcode) {
771                 case IB_WR_SEND:
772                 case IB_WR_SEND_WITH_IMM:
773                         qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
774                         break;
775
776                 case IB_WR_RDMA_WRITE:
777                 case IB_WR_RDMA_WRITE_WITH_IMM:
778                         qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
779                         break;
780
781                 case IB_WR_RDMA_READ:
782                         qp->s_state =
783                                 OP(RDMA_READ_RESPONSE_MIDDLE);
784                         break;
785
786                 default:
787                         /*
788                          * This case shouldn't happen since its only
789                          * one PSN per req.
790                          */
791                         qp->s_state = OP(SEND_LAST);
792                 }
793         }
794
795 done:
796         tasklet_hi_schedule(&qp->s_task);
797
798 bail:
799         return;
800 }
801
802 /**
803  * reset_psn - reset the QP state to send starting from PSN
804  * @qp: the QP
805  * @psn: the packet sequence number to restart at
806  *
807  * This is called from ipath_rc_rcv() to process an incoming RC ACK
808  * for the given QP.
809  * Called at interrupt level with the QP s_lock held.
810  */
811 static void reset_psn(struct ipath_qp *qp, u32 psn)
812 {
813         struct ipath_swqe *wqe;
814         u32 n;
815
816         n = qp->s_cur;
817         wqe = get_swqe_ptr(qp, n);
818         for (;;) {
819                 if (++n == qp->s_size)
820                         n = 0;
821                 if (n == qp->s_tail) {
822                         if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
823                                 qp->s_cur = n;
824                                 wqe = get_swqe_ptr(qp, n);
825                         }
826                         break;
827                 }
828                 wqe = get_swqe_ptr(qp, n);
829                 if (ipath_cmp24(psn, wqe->psn) < 0)
830                         break;
831                 qp->s_cur = n;
832         }
833         qp->s_psn = psn;
834
835         /*
836          * Set the state to restart in the middle of a
837          * request.  Don't change the s_sge, s_cur_sge, or
838          * s_cur_size.  See ipath_do_rc_send().
839          */
840         switch (wqe->wr.opcode) {
841         case IB_WR_SEND:
842         case IB_WR_SEND_WITH_IMM:
843                 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
844                 break;
845
846         case IB_WR_RDMA_WRITE:
847         case IB_WR_RDMA_WRITE_WITH_IMM:
848                 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
849                 break;
850
851         case IB_WR_RDMA_READ:
852                 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
853                 break;
854
855         default:
856                 /*
857                  * This case shouldn't happen since its only
858                  * one PSN per req.
859                  */
860                 qp->s_state = OP(SEND_LAST);
861         }
862 }
863
864 /**
865  * do_rc_ack - process an incoming RC ACK
866  * @qp: the QP the ACK came in on
867  * @psn: the packet sequence number of the ACK
868  * @opcode: the opcode of the request that resulted in the ACK
869  *
870  * This is called from ipath_rc_rcv() to process an incoming RC ACK
871  * for the given QP.
872  * Called at interrupt level with the QP s_lock held.
873  * Returns 1 if OK, 0 if current operation should be aborted (NAK).
874  */
875 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
876 {
877         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
878         struct ib_wc wc;
879         struct ipath_swqe *wqe;
880         int ret = 0;
881
882         /*
883          * Remove the QP from the timeout queue (or RNR timeout queue).
884          * If ipath_ib_timer() has already removed it,
885          * it's OK since we hold the QP s_lock and ipath_restart_rc()
886          * just won't find anything to restart if we ACK everything.
887          */
888         spin_lock(&dev->pending_lock);
889         if (!list_empty(&qp->timerwait))
890                 list_del_init(&qp->timerwait);
891         spin_unlock(&dev->pending_lock);
892
893         /*
894          * Note that NAKs implicitly ACK outstanding SEND and RDMA write
895          * requests and implicitly NAK RDMA read and atomic requests issued
896          * before the NAK'ed request.  The MSN won't include the NAK'ed
897          * request but will include an ACK'ed request(s).
898          */
899         wqe = get_swqe_ptr(qp, qp->s_last);
900
901         /* Nothing is pending to ACK/NAK. */
902         if (qp->s_last == qp->s_tail)
903                 goto bail;
904
905         /*
906          * The MSN might be for a later WQE than the PSN indicates so
907          * only complete WQEs that the PSN finishes.
908          */
909         while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
910                 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
911                 if (ipath_cmp24(aeth, wqe->ssn) < 0)
912                         break;
913                 /*
914                  * If this request is a RDMA read or atomic, and the ACK is
915                  * for a later operation, this ACK NAKs the RDMA read or
916                  * atomic.  In other words, only a RDMA_READ_LAST or ONLY
917                  * can ACK a RDMA read and likewise for atomic ops.  Note
918                  * that the NAK case can only happen if relaxed ordering is
919                  * used and requests are sent after an RDMA read or atomic
920                  * is sent but before the response is received.
921                  */
922                 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
923                      opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
924                     ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
925                       wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
926                      (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
927                       ipath_cmp24(wqe->psn, psn) != 0))) {
928                         /*
929                          * The last valid PSN seen is the previous
930                          * request's.
931                          */
932                         qp->s_last_psn = wqe->psn - 1;
933                         /* Retry this request. */
934                         ipath_restart_rc(qp, wqe->psn, &wc);
935                         /*
936                          * No need to process the ACK/NAK since we are
937                          * restarting an earlier request.
938                          */
939                         goto bail;
940                 }
941                 /* Post a send completion queue entry if requested. */
942                 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
943                     (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
944                         wc.wr_id = wqe->wr.wr_id;
945                         wc.status = IB_WC_SUCCESS;
946                         wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
947                         wc.vendor_err = 0;
948                         wc.byte_len = wqe->length;
949                         wc.qp_num = qp->ibqp.qp_num;
950                         wc.src_qp = qp->remote_qpn;
951                         wc.pkey_index = 0;
952                         wc.slid = qp->remote_ah_attr.dlid;
953                         wc.sl = qp->remote_ah_attr.sl;
954                         wc.dlid_path_bits = 0;
955                         wc.port_num = 0;
956                         ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
957                 }
958                 qp->s_retry = qp->s_retry_cnt;
959                 /*
960                  * If we are completing a request which is in the process of
961                  * being resent, we can stop resending it since we know the
962                  * responder has already seen it.
963                  */
964                 if (qp->s_last == qp->s_cur) {
965                         if (++qp->s_cur >= qp->s_size)
966                                 qp->s_cur = 0;
967                         wqe = get_swqe_ptr(qp, qp->s_cur);
968                         qp->s_state = OP(SEND_LAST);
969                         qp->s_psn = wqe->psn;
970                 }
971                 if (++qp->s_last >= qp->s_size)
972                         qp->s_last = 0;
973                 wqe = get_swqe_ptr(qp, qp->s_last);
974                 if (qp->s_last == qp->s_tail)
975                         break;
976         }
977
978         switch (aeth >> 29) {
979         case 0:         /* ACK */
980                 dev->n_rc_acks++;
981                 /* If this is a partial ACK, reset the retransmit timer. */
982                 if (qp->s_last != qp->s_tail) {
983                         spin_lock(&dev->pending_lock);
984                         list_add_tail(&qp->timerwait,
985                                       &dev->pending[dev->pending_index]);
986                         spin_unlock(&dev->pending_lock);
987                 }
988                 ipath_get_credit(qp, aeth);
989                 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
990                 qp->s_retry = qp->s_retry_cnt;
991                 qp->s_last_psn = psn;
992                 ret = 1;
993                 goto bail;
994
995         case 1:         /* RNR NAK */
996                 dev->n_rnr_naks++;
997                 if (qp->s_rnr_retry == 0) {
998                         if (qp->s_last == qp->s_tail)
999                                 goto bail;
1000
1001                         wc.status = IB_WC_RNR_RETRY_EXC_ERR;
1002                         goto class_b;
1003                 }
1004                 if (qp->s_rnr_retry_cnt < 7)
1005                         qp->s_rnr_retry--;
1006                 if (qp->s_last == qp->s_tail)
1007                         goto bail;
1008
1009                 /* The last valid PSN seen is the previous request's. */
1010                 qp->s_last_psn = wqe->psn - 1;
1011
1012                 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
1013
1014                 /*
1015                  * If we are starting the request from the beginning, let
1016                  * the normal send code handle initialization.
1017                  */
1018                 qp->s_cur = qp->s_last;
1019                 wqe = get_swqe_ptr(qp, qp->s_cur);
1020                 if (ipath_cmp24(psn, wqe->psn) <= 0) {
1021                         qp->s_state = OP(SEND_LAST);
1022                         qp->s_psn = wqe->psn;
1023                 } else
1024                         reset_psn(qp, psn);
1025
1026                 qp->s_rnr_timeout =
1027                         ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
1028                                            IPS_AETH_CREDIT_MASK];
1029                 ipath_insert_rnr_queue(qp);
1030                 goto bail;
1031
1032         case 3:         /* NAK */
1033                 /* The last valid PSN seen is the previous request's. */
1034                 if (qp->s_last != qp->s_tail)
1035                         qp->s_last_psn = wqe->psn - 1;
1036                 switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
1037                         IPS_AETH_CREDIT_MASK) {
1038                 case 0: /* PSN sequence error */
1039                         dev->n_seq_naks++;
1040                         /*
1041                          * Back up to the responder's expected PSN.  XXX
1042                          * Note that we might get a NAK in the middle of an
1043                          * RDMA READ response which terminates the RDMA
1044                          * READ.
1045                          */
1046                         if (qp->s_last == qp->s_tail)
1047                                 break;
1048
1049                         if (ipath_cmp24(psn, wqe->psn) < 0)
1050                                 break;
1051
1052                         /* Retry the request. */
1053                         ipath_restart_rc(qp, psn, &wc);
1054                         break;
1055
1056                 case 1: /* Invalid Request */
1057                         wc.status = IB_WC_REM_INV_REQ_ERR;
1058                         dev->n_other_naks++;
1059                         goto class_b;
1060
1061                 case 2: /* Remote Access Error */
1062                         wc.status = IB_WC_REM_ACCESS_ERR;
1063                         dev->n_other_naks++;
1064                         goto class_b;
1065
1066                 case 3: /* Remote Operation Error */
1067                         wc.status = IB_WC_REM_OP_ERR;
1068                         dev->n_other_naks++;
1069                 class_b:
1070                         wc.wr_id = wqe->wr.wr_id;
1071                         wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1072                         wc.vendor_err = 0;
1073                         wc.byte_len = 0;
1074                         wc.qp_num = qp->ibqp.qp_num;
1075                         wc.src_qp = qp->remote_qpn;
1076                         wc.pkey_index = 0;
1077                         wc.slid = qp->remote_ah_attr.dlid;
1078                         wc.sl = qp->remote_ah_attr.sl;
1079                         wc.dlid_path_bits = 0;
1080                         wc.port_num = 0;
1081                         ipath_sqerror_qp(qp, &wc);
1082                         break;
1083
1084                 default:
1085                         /* Ignore other reserved NAK error codes */
1086                         goto reserved;
1087                 }
1088                 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1089                 goto bail;
1090
1091         default:                /* 2: reserved */
1092         reserved:
1093                 /* Ignore reserved NAK codes. */
1094                 goto bail;
1095         }
1096
1097 bail:
1098         return ret;
1099 }
1100
1101 /**
1102  * ipath_rc_rcv_resp - process an incoming RC response packet
1103  * @dev: the device this packet came in on
1104  * @ohdr: the other headers for this packet
1105  * @data: the packet data
1106  * @tlen: the packet length
1107  * @qp: the QP for this packet
1108  * @opcode: the opcode for this packet
1109  * @psn: the packet sequence number for this packet
1110  * @hdrsize: the header length
1111  * @pmtu: the path MTU
1112  * @header_in_data: true if part of the header data is in the data buffer
1113  *
1114  * This is called from ipath_rc_rcv() to process an incoming RC response
1115  * packet for the given QP.
1116  * Called at interrupt level.
1117  */
1118 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1119                                      struct ipath_other_headers *ohdr,
1120                                      void *data, u32 tlen,
1121                                      struct ipath_qp *qp,
1122                                      u32 opcode,
1123                                      u32 psn, u32 hdrsize, u32 pmtu,
1124                                      int header_in_data)
1125 {
1126         unsigned long flags;
1127         struct ib_wc wc;
1128         int diff;
1129         u32 pad;
1130         u32 aeth;
1131
1132         spin_lock_irqsave(&qp->s_lock, flags);
1133
1134         /* Ignore invalid responses. */
1135         if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1136                 goto ack_done;
1137
1138         /* Ignore duplicate responses. */
1139         diff = ipath_cmp24(psn, qp->s_last_psn);
1140         if (unlikely(diff <= 0)) {
1141                 /* Update credits for "ghost" ACKs */
1142                 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1143                         if (!header_in_data)
1144                                 aeth = be32_to_cpu(ohdr->u.aeth);
1145                         else {
1146                                 aeth = be32_to_cpu(((__be32 *) data)[0]);
1147                                 data += sizeof(__be32);
1148                         }
1149                         if ((aeth >> 29) == 0)
1150                                 ipath_get_credit(qp, aeth);
1151                 }
1152                 goto ack_done;
1153         }
1154
1155         switch (opcode) {
1156         case OP(ACKNOWLEDGE):
1157         case OP(ATOMIC_ACKNOWLEDGE):
1158         case OP(RDMA_READ_RESPONSE_FIRST):
1159                 if (!header_in_data)
1160                         aeth = be32_to_cpu(ohdr->u.aeth);
1161                 else {
1162                         aeth = be32_to_cpu(((__be32 *) data)[0]);
1163                         data += sizeof(__be32);
1164                 }
1165                 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1166                         *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
1167                 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1168                     opcode != OP(RDMA_READ_RESPONSE_FIRST))
1169                         goto ack_done;
1170                 hdrsize += 4;
1171                 /*
1172                  * do_rc_ack() has already checked the PSN so skip
1173                  * the sequence check.
1174                  */
1175                 goto rdma_read;
1176
1177         case OP(RDMA_READ_RESPONSE_MIDDLE):
1178                 /* no AETH, no ACK */
1179                 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1180                         dev->n_rdma_seq++;
1181                         ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1182                         goto ack_done;
1183                 }
1184         rdma_read:
1185         if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1186                 goto ack_done;
1187         if (unlikely(tlen != (hdrsize + pmtu + 4)))
1188                 goto ack_done;
1189         if (unlikely(pmtu >= qp->s_len))
1190                 goto ack_done;
1191         /* We got a response so update the timeout. */
1192         if (unlikely(qp->s_last == qp->s_tail ||
1193                      get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1194                      IB_WR_RDMA_READ))
1195                 goto ack_done;
1196         spin_lock(&dev->pending_lock);
1197         if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1198                 list_move_tail(&qp->timerwait,
1199                                &dev->pending[dev->pending_index]);
1200         spin_unlock(&dev->pending_lock);
1201         /*
1202          * Update the RDMA receive state but do the copy w/o holding the
1203          * locks and blocking interrupts.  XXX Yet another place that
1204          * affects relaxed RDMA order since we don't want s_sge modified.
1205          */
1206         qp->s_len -= pmtu;
1207         qp->s_last_psn = psn;
1208         spin_unlock_irqrestore(&qp->s_lock, flags);
1209         ipath_copy_sge(&qp->s_sge, data, pmtu);
1210         goto bail;
1211
1212         case OP(RDMA_READ_RESPONSE_LAST):
1213                 /* ACKs READ req. */
1214                 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1215                         dev->n_rdma_seq++;
1216                         ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1217                         goto ack_done;
1218                 }
1219                 /* FALLTHROUGH */
1220         case OP(RDMA_READ_RESPONSE_ONLY):
1221                 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1222                         goto ack_done;
1223                 /*
1224                  * Get the number of bytes the message was padded by.
1225                  */
1226                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1227                 /*
1228                  * Check that the data size is >= 1 && <= pmtu.
1229                  * Remember to account for the AETH header (4) and
1230                  * ICRC (4).
1231                  */
1232                 if (unlikely(tlen <= (hdrsize + pad + 8))) {
1233                         /*
1234                          * XXX Need to generate an error CQ
1235                          * entry.
1236                          */
1237                         goto ack_done;
1238                 }
1239                 tlen -= hdrsize + pad + 8;
1240                 if (unlikely(tlen != qp->s_len)) {
1241                         /*
1242                          * XXX Need to generate an error CQ
1243                          * entry.
1244                          */
1245                         goto ack_done;
1246                 }
1247                 if (!header_in_data)
1248                         aeth = be32_to_cpu(ohdr->u.aeth);
1249                 else {
1250                         aeth = be32_to_cpu(((__be32 *) data)[0]);
1251                         data += sizeof(__be32);
1252                 }
1253                 ipath_copy_sge(&qp->s_sge, data, tlen);
1254                 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1255                         /*
1256                          * Change the state so we contimue
1257                          * processing new requests.
1258                          */
1259                         qp->s_state = OP(SEND_LAST);
1260                 }
1261                 goto ack_done;
1262         }
1263
1264 ack_done:
1265         spin_unlock_irqrestore(&qp->s_lock, flags);
1266 bail:
1267         return;
1268 }
1269
1270 /**
1271  * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1272  * @dev: the device this packet came in on
1273  * @ohdr: the other headers for this packet
1274  * @data: the packet data
1275  * @qp: the QP for this packet
1276  * @opcode: the opcode for this packet
1277  * @psn: the packet sequence number for this packet
1278  * @diff: the difference between the PSN and the expected PSN
1279  * @header_in_data: true if part of the header data is in the data buffer
1280  *
1281  * This is called from ipath_rc_rcv() to process an unexpected
1282  * incoming RC packet for the given QP.
1283  * Called at interrupt level.
1284  * Return 1 if no more processing is needed; otherwise return 0 to
1285  * schedule a response to be sent and the s_lock unlocked.
1286  */
1287 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1288                                      struct ipath_other_headers *ohdr,
1289                                      void *data,
1290                                      struct ipath_qp *qp,
1291                                      u32 opcode,
1292                                      u32 psn,
1293                                      int diff,
1294                                      int header_in_data)
1295 {
1296         struct ib_reth *reth;
1297
1298         if (diff > 0) {
1299                 /*
1300                  * Packet sequence error.
1301                  * A NAK will ACK earlier sends and RDMA writes.
1302                  * Don't queue the NAK if a RDMA read, atomic, or
1303                  * NAK is pending though.
1304                  */
1305                 spin_lock(&qp->s_lock);
1306                 if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1307                      qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
1308                     qp->s_nak_state != 0) {
1309                         spin_unlock(&qp->s_lock);
1310                         goto done;
1311                 }
1312                 qp->s_ack_state = OP(SEND_ONLY);
1313                 qp->s_nak_state = IB_NAK_PSN_ERROR;
1314                 /* Use the expected PSN. */
1315                 qp->s_ack_psn = qp->r_psn;
1316                 goto resched;
1317         }
1318
1319         /*
1320          * Handle a duplicate request.  Don't re-execute SEND, RDMA
1321          * write or atomic op.  Don't NAK errors, just silently drop
1322          * the duplicate request.  Note that r_sge, r_len, and
1323          * r_rcv_len may be in use so don't modify them.
1324          *
1325          * We are supposed to ACK the earliest duplicate PSN but we
1326          * can coalesce an outstanding duplicate ACK.  We have to
1327          * send the earliest so that RDMA reads can be restarted at
1328          * the requester's expected PSN.
1329          */
1330         spin_lock(&qp->s_lock);
1331         if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
1332             ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
1333                 if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
1334                         qp->s_ack_psn = psn;
1335                 spin_unlock(&qp->s_lock);
1336                 goto done;
1337         }
1338         switch (opcode) {
1339         case OP(RDMA_READ_REQUEST):
1340                 /*
1341                  * We have to be careful to not change s_rdma_sge
1342                  * while ipath_do_rc_send() is using it and not
1343                  * holding the s_lock.
1344                  */
1345                 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1346                     qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1347                         spin_unlock(&qp->s_lock);
1348                         dev->n_rdma_dup_busy++;
1349                         goto done;
1350                 }
1351                 /* RETH comes after BTH */
1352                 if (!header_in_data)
1353                         reth = &ohdr->u.rc.reth;
1354                 else {
1355                         reth = (struct ib_reth *)data;
1356                         data += sizeof(*reth);
1357                 }
1358                 qp->s_rdma_len = be32_to_cpu(reth->length);
1359                 if (qp->s_rdma_len != 0) {
1360                         u32 rkey = be32_to_cpu(reth->rkey);
1361                         u64 vaddr = be64_to_cpu(reth->vaddr);
1362                         int ok;
1363
1364                         /*
1365                          * Address range must be a subset of the original
1366                          * request and start on pmtu boundaries.
1367                          */
1368                         ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1369                                            qp->s_rdma_len, vaddr, rkey,
1370                                            IB_ACCESS_REMOTE_READ);
1371                         if (unlikely(!ok))
1372                                 goto done;
1373                 } else {
1374                         qp->s_rdma_sge.sg_list = NULL;
1375                         qp->s_rdma_sge.num_sge = 0;
1376                         qp->s_rdma_sge.sge.mr = NULL;
1377                         qp->s_rdma_sge.sge.vaddr = NULL;
1378                         qp->s_rdma_sge.sge.length = 0;
1379                         qp->s_rdma_sge.sge.sge_length = 0;
1380                 }
1381                 break;
1382
1383         case OP(COMPARE_SWAP):
1384         case OP(FETCH_ADD):
1385                 /*
1386                  * Check for the PSN of the last atomic operations
1387                  * performed and resend the result if found.
1388                  */
1389                 if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
1390                         spin_unlock(&qp->s_lock);
1391                         goto done;
1392                 }
1393                 qp->s_ack_atomic = qp->r_atomic_data;
1394                 break;
1395         }
1396         qp->s_ack_state = opcode;
1397         qp->s_nak_state = 0;
1398         qp->s_ack_psn = psn;
1399 resched:
1400         return 0;
1401
1402 done:
1403         return 1;
1404 }
1405
1406 /**
1407  * ipath_rc_rcv - process an incoming RC packet
1408  * @dev: the device this packet came in on
1409  * @hdr: the header of this packet
1410  * @has_grh: true if the header has a GRH
1411  * @data: the packet data
1412  * @tlen: the packet length
1413  * @qp: the QP for this packet
1414  *
1415  * This is called from ipath_qp_rcv() to process an incoming RC packet
1416  * for the given QP.
1417  * Called at interrupt level.
1418  */
1419 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1420                   int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1421 {
1422         struct ipath_other_headers *ohdr;
1423         u32 opcode;
1424         u32 hdrsize;
1425         u32 psn;
1426         u32 pad;
1427         unsigned long flags;
1428         struct ib_wc wc;
1429         u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1430         int diff;
1431         struct ib_reth *reth;
1432         int header_in_data;
1433
1434         /* Check for GRH */
1435         if (!has_grh) {
1436                 ohdr = &hdr->u.oth;
1437                 hdrsize = 8 + 12;       /* LRH + BTH */
1438                 psn = be32_to_cpu(ohdr->bth[2]);
1439                 header_in_data = 0;
1440         } else {
1441                 ohdr = &hdr->u.l.oth;
1442                 hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
1443                 /*
1444                  * The header with GRH is 60 bytes and the core driver sets
1445                  * the eager header buffer size to 56 bytes so the last 4
1446                  * bytes of the BTH header (PSN) is in the data buffer.
1447                  */
1448                 header_in_data =
1449                         ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
1450                 if (header_in_data) {
1451                         psn = be32_to_cpu(((__be32 *) data)[0]);
1452                         data += sizeof(__be32);
1453                 } else
1454                         psn = be32_to_cpu(ohdr->bth[2]);
1455         }
1456         /*
1457          * The opcode is in the low byte when its in network order
1458          * (top byte when in host order).
1459          */
1460         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1461
1462         /*
1463          * Process responses (ACKs) before anything else.  Note that the
1464          * packet sequence number will be for something in the send work
1465          * queue rather than the expected receive packet sequence number.
1466          * In other words, this QP is the requester.
1467          */
1468         if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1469             opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1470                 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1471                                   hdrsize, pmtu, header_in_data);
1472                 goto bail;
1473         }
1474
1475         spin_lock_irqsave(&qp->r_rq.lock, flags);
1476
1477         /* Compute 24 bits worth of difference. */
1478         diff = ipath_cmp24(psn, qp->r_psn);
1479         if (unlikely(diff)) {
1480                 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1481                                        psn, diff, header_in_data))
1482                         goto done;
1483                 goto resched;
1484         }
1485
1486         /* Check for opcode sequence errors. */
1487         switch (qp->r_state) {
1488         case OP(SEND_FIRST):
1489         case OP(SEND_MIDDLE):
1490                 if (opcode == OP(SEND_MIDDLE) ||
1491                     opcode == OP(SEND_LAST) ||
1492                     opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1493                         break;
1494         nack_inv:
1495         /*
1496          * A NAK will ACK earlier sends and RDMA writes.  Don't queue the
1497          * NAK if a RDMA read, atomic, or NAK is pending though.
1498          */
1499         spin_lock(&qp->s_lock);
1500         if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1501             qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1502                 spin_unlock(&qp->s_lock);
1503                 goto done;
1504         }
1505         /* XXX Flush WQEs */
1506         qp->state = IB_QPS_ERR;
1507         qp->s_ack_state = OP(SEND_ONLY);
1508         qp->s_nak_state = IB_NAK_INVALID_REQUEST;
1509         qp->s_ack_psn = qp->r_psn;
1510         goto resched;
1511
1512         case OP(RDMA_WRITE_FIRST):
1513         case OP(RDMA_WRITE_MIDDLE):
1514                 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1515                     opcode == OP(RDMA_WRITE_LAST) ||
1516                     opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1517                         break;
1518                 goto nack_inv;
1519
1520         case OP(RDMA_READ_REQUEST):
1521         case OP(COMPARE_SWAP):
1522         case OP(FETCH_ADD):
1523                 /*
1524                  * Drop all new requests until a response has been sent.  A
1525                  * new request then ACKs the RDMA response we sent.  Relaxed
1526                  * ordering would allow new requests to be processed but we
1527                  * would need to keep a queue of rwqe's for all that are in
1528                  * progress.  Note that we can't RNR NAK this request since
1529                  * the RDMA READ or atomic response is already queued to be
1530                  * sent (unless we implement a response send queue).
1531                  */
1532                 goto done;
1533
1534         default:
1535                 if (opcode == OP(SEND_MIDDLE) ||
1536                     opcode == OP(SEND_LAST) ||
1537                     opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1538                     opcode == OP(RDMA_WRITE_MIDDLE) ||
1539                     opcode == OP(RDMA_WRITE_LAST) ||
1540                     opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1541                         goto nack_inv;
1542                 break;
1543         }
1544
1545         wc.imm_data = 0;
1546         wc.wc_flags = 0;
1547
1548         /* OK, process the packet. */
1549         switch (opcode) {
1550         case OP(SEND_FIRST):
1551                 if (!ipath_get_rwqe(qp, 0)) {
1552                 rnr_nak:
1553                         /*
1554                          * A RNR NAK will ACK earlier sends and RDMA writes.
1555                          * Don't queue the NAK if a RDMA read or atomic
1556                          * is pending though.
1557                          */
1558                         spin_lock(&qp->s_lock);
1559                         if (qp->s_ack_state >=
1560                             OP(RDMA_READ_REQUEST) &&
1561                             qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1562                                 spin_unlock(&qp->s_lock);
1563                                 goto done;
1564                         }
1565                         qp->s_ack_state = OP(SEND_ONLY);
1566                         qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
1567                         qp->s_ack_psn = qp->r_psn;
1568                         goto resched;
1569                 }
1570                 qp->r_rcv_len = 0;
1571                 /* FALLTHROUGH */
1572         case OP(SEND_MIDDLE):
1573         case OP(RDMA_WRITE_MIDDLE):
1574         send_middle:
1575                 /* Check for invalid length PMTU or posted rwqe len. */
1576                 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1577                         goto nack_inv;
1578                 qp->r_rcv_len += pmtu;
1579                 if (unlikely(qp->r_rcv_len > qp->r_len))
1580                         goto nack_inv;
1581                 ipath_copy_sge(&qp->r_sge, data, pmtu);
1582                 break;
1583
1584         case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1585                 /* consume RWQE */
1586                 if (!ipath_get_rwqe(qp, 1))
1587                         goto rnr_nak;
1588                 goto send_last_imm;
1589
1590         case OP(SEND_ONLY):
1591         case OP(SEND_ONLY_WITH_IMMEDIATE):
1592                 if (!ipath_get_rwqe(qp, 0))
1593                         goto rnr_nak;
1594                 qp->r_rcv_len = 0;
1595                 if (opcode == OP(SEND_ONLY))
1596                         goto send_last;
1597                 /* FALLTHROUGH */
1598         case OP(SEND_LAST_WITH_IMMEDIATE):
1599         send_last_imm:
1600                 if (header_in_data) {
1601                         wc.imm_data = *(__be32 *) data;
1602                         data += sizeof(__be32);
1603                 } else {
1604                         /* Immediate data comes after BTH */
1605                         wc.imm_data = ohdr->u.imm_data;
1606                 }
1607                 hdrsize += 4;
1608                 wc.wc_flags = IB_WC_WITH_IMM;
1609                 /* FALLTHROUGH */
1610         case OP(SEND_LAST):
1611         case OP(RDMA_WRITE_LAST):
1612         send_last:
1613                 /* Get the number of bytes the message was padded by. */
1614                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1615                 /* Check for invalid length. */
1616                 /* XXX LAST len should be >= 1 */
1617                 if (unlikely(tlen < (hdrsize + pad + 4)))
1618                         goto nack_inv;
1619                 /* Don't count the CRC. */
1620                 tlen -= (hdrsize + pad + 4);
1621                 wc.byte_len = tlen + qp->r_rcv_len;
1622                 if (unlikely(wc.byte_len > qp->r_len))
1623                         goto nack_inv;
1624                 ipath_copy_sge(&qp->r_sge, data, tlen);
1625                 atomic_inc(&qp->msn);
1626                 if (opcode == OP(RDMA_WRITE_LAST) ||
1627                     opcode == OP(RDMA_WRITE_ONLY))
1628                         break;
1629                 wc.wr_id = qp->r_wr_id;
1630                 wc.status = IB_WC_SUCCESS;
1631                 wc.opcode = IB_WC_RECV;
1632                 wc.vendor_err = 0;
1633                 wc.qp_num = qp->ibqp.qp_num;
1634                 wc.src_qp = qp->remote_qpn;
1635                 wc.pkey_index = 0;
1636                 wc.slid = qp->remote_ah_attr.dlid;
1637                 wc.sl = qp->remote_ah_attr.sl;
1638                 wc.dlid_path_bits = 0;
1639                 wc.port_num = 0;
1640                 /* Signal completion event if the solicited bit is set. */
1641                 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1642                                (ohdr->bth[0] &
1643                                 __constant_cpu_to_be32(1 << 23)) != 0);
1644                 break;
1645
1646         case OP(RDMA_WRITE_FIRST):
1647         case OP(RDMA_WRITE_ONLY):
1648         case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1649                 /* consume RWQE */
1650                 /* RETH comes after BTH */
1651                 if (!header_in_data)
1652                         reth = &ohdr->u.rc.reth;
1653                 else {
1654                         reth = (struct ib_reth *)data;
1655                         data += sizeof(*reth);
1656                 }
1657                 hdrsize += sizeof(*reth);
1658                 qp->r_len = be32_to_cpu(reth->length);
1659                 qp->r_rcv_len = 0;
1660                 if (qp->r_len != 0) {
1661                         u32 rkey = be32_to_cpu(reth->rkey);
1662                         u64 vaddr = be64_to_cpu(reth->vaddr);
1663                         int ok;
1664
1665                         /* Check rkey & NAK */
1666                         ok = ipath_rkey_ok(dev, &qp->r_sge,
1667                                            qp->r_len, vaddr, rkey,
1668                                            IB_ACCESS_REMOTE_WRITE);
1669                         if (unlikely(!ok)) {
1670                         nack_acc:
1671                                 /*
1672                                  * A NAK will ACK earlier sends and RDMA
1673                                  * writes.  Don't queue the NAK if a RDMA
1674                                  * read, atomic, or NAK is pending though.
1675                                  */
1676                                 spin_lock(&qp->s_lock);
1677                                 if (qp->s_ack_state >=
1678                                     OP(RDMA_READ_REQUEST) &&
1679                                     qp->s_ack_state !=
1680                                     IB_OPCODE_ACKNOWLEDGE) {
1681                                         spin_unlock(&qp->s_lock);
1682                                         goto done;
1683                                 }
1684                                 /* XXX Flush WQEs */
1685                                 qp->state = IB_QPS_ERR;
1686                                 qp->s_ack_state = OP(RDMA_WRITE_ONLY);
1687                                 qp->s_nak_state =
1688                                         IB_NAK_REMOTE_ACCESS_ERROR;
1689                                 qp->s_ack_psn = qp->r_psn;
1690                                 goto resched;
1691                         }
1692                 } else {
1693                         qp->r_sge.sg_list = NULL;
1694                         qp->r_sge.sge.mr = NULL;
1695                         qp->r_sge.sge.vaddr = NULL;
1696                         qp->r_sge.sge.length = 0;
1697                         qp->r_sge.sge.sge_length = 0;
1698                 }
1699                 if (unlikely(!(qp->qp_access_flags &
1700                                IB_ACCESS_REMOTE_WRITE)))
1701                         goto nack_acc;
1702                 if (opcode == OP(RDMA_WRITE_FIRST))
1703                         goto send_middle;
1704                 else if (opcode == OP(RDMA_WRITE_ONLY))
1705                         goto send_last;
1706                 if (!ipath_get_rwqe(qp, 1))
1707                         goto rnr_nak;
1708                 goto send_last_imm;
1709
1710         case OP(RDMA_READ_REQUEST):
1711                 /* RETH comes after BTH */
1712                 if (!header_in_data)
1713                         reth = &ohdr->u.rc.reth;
1714                 else {
1715                         reth = (struct ib_reth *)data;
1716                         data += sizeof(*reth);
1717                 }
1718                 spin_lock(&qp->s_lock);
1719                 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1720                     qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1721                         spin_unlock(&qp->s_lock);
1722                         goto done;
1723                 }
1724                 qp->s_rdma_len = be32_to_cpu(reth->length);
1725                 if (qp->s_rdma_len != 0) {
1726                         u32 rkey = be32_to_cpu(reth->rkey);
1727                         u64 vaddr = be64_to_cpu(reth->vaddr);
1728                         int ok;
1729
1730                         /* Check rkey & NAK */
1731                         ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1732                                            qp->s_rdma_len, vaddr, rkey,
1733                                            IB_ACCESS_REMOTE_READ);
1734                         if (unlikely(!ok)) {
1735                                 spin_unlock(&qp->s_lock);
1736                                 goto nack_acc;
1737                         }
1738                         /*
1739                          * Update the next expected PSN.  We add 1 later
1740                          * below, so only add the remainder here.
1741                          */
1742                         if (qp->s_rdma_len > pmtu)
1743                                 qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
1744                 } else {
1745                         qp->s_rdma_sge.sg_list = NULL;
1746                         qp->s_rdma_sge.num_sge = 0;
1747                         qp->s_rdma_sge.sge.mr = NULL;
1748                         qp->s_rdma_sge.sge.vaddr = NULL;
1749                         qp->s_rdma_sge.sge.length = 0;
1750                         qp->s_rdma_sge.sge.sge_length = 0;
1751                 }
1752                 if (unlikely(!(qp->qp_access_flags &
1753                                IB_ACCESS_REMOTE_READ)))
1754                         goto nack_acc;
1755                 /*
1756                  * We need to increment the MSN here instead of when we
1757                  * finish sending the result since a duplicate request would
1758                  * increment it more than once.
1759                  */
1760                 atomic_inc(&qp->msn);
1761                 qp->s_ack_state = opcode;
1762                 qp->s_nak_state = 0;
1763                 qp->s_ack_psn = psn;
1764                 qp->r_psn++;
1765                 qp->r_state = opcode;
1766                 goto rdmadone;
1767
1768         case OP(COMPARE_SWAP):
1769         case OP(FETCH_ADD): {
1770                 struct ib_atomic_eth *ateth;
1771                 u64 vaddr;
1772                 u64 sdata;
1773                 u32 rkey;
1774
1775                 if (!header_in_data)
1776                         ateth = &ohdr->u.atomic_eth;
1777                 else {
1778                         ateth = (struct ib_atomic_eth *)data;
1779                         data += sizeof(*ateth);
1780                 }
1781                 vaddr = be64_to_cpu(ateth->vaddr);
1782                 if (unlikely(vaddr & (sizeof(u64) - 1)))
1783                         goto nack_inv;
1784                 rkey = be32_to_cpu(ateth->rkey);
1785                 /* Check rkey & NAK */
1786                 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge,
1787                                             sizeof(u64), vaddr, rkey,
1788                                             IB_ACCESS_REMOTE_ATOMIC)))
1789                         goto nack_acc;
1790                 if (unlikely(!(qp->qp_access_flags &
1791                                IB_ACCESS_REMOTE_ATOMIC)))
1792                         goto nack_acc;
1793                 /* Perform atomic OP and save result. */
1794                 sdata = be64_to_cpu(ateth->swap_data);
1795                 spin_lock(&dev->pending_lock);
1796                 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
1797                 if (opcode == OP(FETCH_ADD))
1798                         *(u64 *) qp->r_sge.sge.vaddr =
1799                                 qp->r_atomic_data + sdata;
1800                 else if (qp->r_atomic_data ==
1801                          be64_to_cpu(ateth->compare_data))
1802                         *(u64 *) qp->r_sge.sge.vaddr = sdata;
1803                 spin_unlock(&dev->pending_lock);
1804                 atomic_inc(&qp->msn);
1805                 qp->r_atomic_psn = psn & IPS_PSN_MASK;
1806                 psn |= 1 << 31;
1807                 break;
1808         }
1809
1810         default:
1811                 /* Drop packet for unknown opcodes. */
1812                 goto done;
1813         }
1814         qp->r_psn++;
1815         qp->r_state = opcode;
1816         /* Send an ACK if requested or required. */
1817         if (psn & (1 << 31)) {
1818                 /*
1819                  * Coalesce ACKs unless there is a RDMA READ or
1820                  * ATOMIC pending.
1821                  */
1822                 spin_lock(&qp->s_lock);
1823                 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
1824                     qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
1825                         qp->s_ack_state = opcode;
1826                         qp->s_nak_state = 0;
1827                         qp->s_ack_psn = psn;
1828                         qp->s_ack_atomic = qp->r_atomic_data;
1829                         goto resched;
1830                 }
1831                 spin_unlock(&qp->s_lock);
1832         }
1833 done:
1834         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1835         goto bail;
1836
1837 resched:
1838         /*
1839          * Try to send ACK right away but not if ipath_do_rc_send() is
1840          * active.
1841          */
1842         if (qp->s_hdrwords == 0 &&
1843             (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
1844              qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
1845                 send_rc_ack(qp);
1846
1847 rdmadone:
1848         spin_unlock(&qp->s_lock);
1849         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1850
1851         /* Call ipath_do_rc_send() in another thread. */
1852         tasklet_hi_schedule(&qp->s_task);
1853
1854 bail:
1855         return;
1856 }