Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_qp.c
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
36
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
39
40 #define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
42 #define mk_qpn(qpt, map, off)   (((map) - (qpt)->map) * BITS_PER_PAGE + \
43                                  (off))
44 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
45                                                       BITS_PER_PAGE, off)
46
47 /*
48  * Convert the AETH credit code into the number of credits.
49  */
50 static u32 credit_table[31] = {
51         0,                      /* 0 */
52         1,                      /* 1 */
53         2,                      /* 2 */
54         3,                      /* 3 */
55         4,                      /* 4 */
56         6,                      /* 5 */
57         8,                      /* 6 */
58         12,                     /* 7 */
59         16,                     /* 8 */
60         24,                     /* 9 */
61         32,                     /* A */
62         48,                     /* B */
63         64,                     /* C */
64         96,                     /* D */
65         128,                    /* E */
66         192,                    /* F */
67         256,                    /* 10 */
68         384,                    /* 11 */
69         512,                    /* 12 */
70         768,                    /* 13 */
71         1024,                   /* 14 */
72         1536,                   /* 15 */
73         2048,                   /* 16 */
74         3072,                   /* 17 */
75         4096,                   /* 18 */
76         6144,                   /* 19 */
77         8192,                   /* 1A */
78         12288,                  /* 1B */
79         16384,                  /* 1C */
80         24576,                  /* 1D */
81         32768                   /* 1E */
82 };
83
84
85 static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
86 {
87         unsigned long page = get_zeroed_page(GFP_KERNEL);
88         unsigned long flags;
89
90         /*
91          * Free the page if someone raced with us installing it.
92          */
93
94         spin_lock_irqsave(&qpt->lock, flags);
95         if (map->page)
96                 free_page(page);
97         else
98                 map->page = (void *)page;
99         spin_unlock_irqrestore(&qpt->lock, flags);
100 }
101
102
103 static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
104 {
105         u32 i, offset, max_scan, qpn;
106         struct qpn_map *map;
107         u32 ret = -1;
108
109         if (type == IB_QPT_SMI)
110                 ret = 0;
111         else if (type == IB_QPT_GSI)
112                 ret = 1;
113
114         if (ret != -1) {
115                 map = &qpt->map[0];
116                 if (unlikely(!map->page)) {
117                         get_map_page(qpt, map);
118                         if (unlikely(!map->page)) {
119                                 ret = -ENOMEM;
120                                 goto bail;
121                         }
122                 }
123                 if (!test_and_set_bit(ret, map->page))
124                         atomic_dec(&map->n_free);
125                 else
126                         ret = -EBUSY;
127                 goto bail;
128         }
129
130         qpn = qpt->last + 1;
131         if (qpn >= QPN_MAX)
132                 qpn = 2;
133         offset = qpn & BITS_PER_PAGE_MASK;
134         map = &qpt->map[qpn / BITS_PER_PAGE];
135         max_scan = qpt->nmaps - !offset;
136         for (i = 0;;) {
137                 if (unlikely(!map->page)) {
138                         get_map_page(qpt, map);
139                         if (unlikely(!map->page))
140                                 break;
141                 }
142                 if (likely(atomic_read(&map->n_free))) {
143                         do {
144                                 if (!test_and_set_bit(offset, map->page)) {
145                                         atomic_dec(&map->n_free);
146                                         qpt->last = qpn;
147                                         ret = qpn;
148                                         goto bail;
149                                 }
150                                 offset = find_next_offset(map, offset);
151                                 qpn = mk_qpn(qpt, map, offset);
152                                 /*
153                                  * This test differs from alloc_pidmap().
154                                  * If find_next_offset() does find a zero
155                                  * bit, we don't need to check for QPN
156                                  * wrapping around past our starting QPN.
157                                  * We just need to be sure we don't loop
158                                  * forever.
159                                  */
160                         } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
161                 }
162                 /*
163                  * In order to keep the number of pages allocated to a
164                  * minimum, we scan the all existing pages before increasing
165                  * the size of the bitmap table.
166                  */
167                 if (++i > max_scan) {
168                         if (qpt->nmaps == QPNMAP_ENTRIES)
169                                 break;
170                         map = &qpt->map[qpt->nmaps++];
171                         offset = 0;
172                 } else if (map < &qpt->map[qpt->nmaps]) {
173                         ++map;
174                         offset = 0;
175                 } else {
176                         map = &qpt->map[0];
177                         offset = 2;
178                 }
179                 qpn = mk_qpn(qpt, map, offset);
180         }
181
182         ret = -ENOMEM;
183
184 bail:
185         return ret;
186 }
187
188 static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
189 {
190         struct qpn_map *map;
191
192         map = qpt->map + qpn / BITS_PER_PAGE;
193         if (map->page)
194                 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
195         atomic_inc(&map->n_free);
196 }
197
198 /**
199  * ipath_alloc_qpn - allocate a QP number
200  * @qpt: the QP table
201  * @qp: the QP
202  * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
203  *
204  * Allocate the next available QPN and put the QP into the hash table.
205  * The hash table holds a reference to the QP.
206  */
207 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
208                            enum ib_qp_type type)
209 {
210         unsigned long flags;
211         int ret;
212
213         ret = alloc_qpn(qpt, type);
214         if (ret < 0)
215                 goto bail;
216         qp->ibqp.qp_num = ret;
217
218         /* Add the QP to the hash table. */
219         spin_lock_irqsave(&qpt->lock, flags);
220
221         ret %= qpt->max;
222         qp->next = qpt->table[ret];
223         qpt->table[ret] = qp;
224         atomic_inc(&qp->refcount);
225
226         spin_unlock_irqrestore(&qpt->lock, flags);
227         ret = 0;
228
229 bail:
230         return ret;
231 }
232
233 /**
234  * ipath_free_qp - remove a QP from the QP table
235  * @qpt: the QP table
236  * @qp: the QP to remove
237  *
238  * Remove the QP from the table so it can't be found asynchronously by
239  * the receive interrupt routine.
240  */
241 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
242 {
243         struct ipath_qp *q, **qpp;
244         unsigned long flags;
245
246         spin_lock_irqsave(&qpt->lock, flags);
247
248         /* Remove QP from the hash table. */
249         qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
250         for (; (q = *qpp) != NULL; qpp = &q->next) {
251                 if (q == qp) {
252                         *qpp = qp->next;
253                         qp->next = NULL;
254                         atomic_dec(&qp->refcount);
255                         break;
256                 }
257         }
258
259         spin_unlock_irqrestore(&qpt->lock, flags);
260 }
261
262 /**
263  * ipath_free_all_qps - check for QPs still in use
264  * @qpt: the QP table to empty
265  *
266  * There should not be any QPs still in use.
267  * Free memory for table.
268  */
269 unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
270 {
271         unsigned long flags;
272         struct ipath_qp *qp;
273         u32 n, qp_inuse = 0;
274
275         spin_lock_irqsave(&qpt->lock, flags);
276         for (n = 0; n < qpt->max; n++) {
277                 qp = qpt->table[n];
278                 qpt->table[n] = NULL;
279
280                 for (; qp; qp = qp->next)
281                         qp_inuse++;
282         }
283         spin_unlock_irqrestore(&qpt->lock, flags);
284
285         for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
286                 if (qpt->map[n].page)
287                         free_page((unsigned long) qpt->map[n].page);
288         return qp_inuse;
289 }
290
291 /**
292  * ipath_lookup_qpn - return the QP with the given QPN
293  * @qpt: the QP table
294  * @qpn: the QP number to look up
295  *
296  * The caller is responsible for decrementing the QP reference count
297  * when done.
298  */
299 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
300 {
301         unsigned long flags;
302         struct ipath_qp *qp;
303
304         spin_lock_irqsave(&qpt->lock, flags);
305
306         for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
307                 if (qp->ibqp.qp_num == qpn) {
308                         atomic_inc(&qp->refcount);
309                         break;
310                 }
311         }
312
313         spin_unlock_irqrestore(&qpt->lock, flags);
314         return qp;
315 }
316
317 /**
318  * ipath_reset_qp - initialize the QP state to the reset state
319  * @qp: the QP to reset
320  * @type: the QP type
321  */
322 static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
323 {
324         qp->remote_qpn = 0;
325         qp->qkey = 0;
326         qp->qp_access_flags = 0;
327         atomic_set(&qp->s_dma_busy, 0);
328         qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
329         qp->s_hdrwords = 0;
330         qp->s_wqe = NULL;
331         qp->s_pkt_delay = 0;
332         qp->s_draining = 0;
333         qp->s_psn = 0;
334         qp->r_psn = 0;
335         qp->r_msn = 0;
336         if (type == IB_QPT_RC) {
337                 qp->s_state = IB_OPCODE_RC_SEND_LAST;
338                 qp->r_state = IB_OPCODE_RC_SEND_LAST;
339         } else {
340                 qp->s_state = IB_OPCODE_UC_SEND_LAST;
341                 qp->r_state = IB_OPCODE_UC_SEND_LAST;
342         }
343         qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
344         qp->r_nak_state = 0;
345         qp->r_aflags = 0;
346         qp->r_flags = 0;
347         qp->s_rnr_timeout = 0;
348         qp->s_head = 0;
349         qp->s_tail = 0;
350         qp->s_cur = 0;
351         qp->s_last = 0;
352         qp->s_ssn = 1;
353         qp->s_lsn = 0;
354         memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
355         qp->r_head_ack_queue = 0;
356         qp->s_tail_ack_queue = 0;
357         qp->s_num_rd_atomic = 0;
358         if (qp->r_rq.wq) {
359                 qp->r_rq.wq->head = 0;
360                 qp->r_rq.wq->tail = 0;
361         }
362 }
363
364 /**
365  * ipath_error_qp - put a QP into the error state
366  * @qp: the QP to put into the error state
367  * @err: the receive completion error to signal if a RWQE is active
368  *
369  * Flushes both send and receive work queues.
370  * Returns true if last WQE event should be generated.
371  * The QP s_lock should be held and interrupts disabled.
372  * If we are already in error state, just return.
373  */
374
375 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
376 {
377         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
378         struct ib_wc wc;
379         int ret = 0;
380
381         if (qp->state == IB_QPS_ERR)
382                 goto bail;
383
384         qp->state = IB_QPS_ERR;
385
386         spin_lock(&dev->pending_lock);
387         if (!list_empty(&qp->timerwait))
388                 list_del_init(&qp->timerwait);
389         if (!list_empty(&qp->piowait))
390                 list_del_init(&qp->piowait);
391         spin_unlock(&dev->pending_lock);
392
393         /* Schedule the sending tasklet to drain the send work queue. */
394         if (qp->s_last != qp->s_head)
395                 ipath_schedule_send(qp);
396
397         memset(&wc, 0, sizeof(wc));
398         wc.qp = &qp->ibqp;
399         wc.opcode = IB_WC_RECV;
400
401         if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
402                 wc.wr_id = qp->r_wr_id;
403                 wc.status = err;
404                 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
405         }
406         wc.status = IB_WC_WR_FLUSH_ERR;
407
408         if (qp->r_rq.wq) {
409                 struct ipath_rwq *wq;
410                 u32 head;
411                 u32 tail;
412
413                 spin_lock(&qp->r_rq.lock);
414
415                 /* sanity check pointers before trusting them */
416                 wq = qp->r_rq.wq;
417                 head = wq->head;
418                 if (head >= qp->r_rq.size)
419                         head = 0;
420                 tail = wq->tail;
421                 if (tail >= qp->r_rq.size)
422                         tail = 0;
423                 while (tail != head) {
424                         wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
425                         if (++tail >= qp->r_rq.size)
426                                 tail = 0;
427                         ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
428                 }
429                 wq->tail = tail;
430
431                 spin_unlock(&qp->r_rq.lock);
432         } else if (qp->ibqp.event_handler)
433                 ret = 1;
434
435 bail:
436         return ret;
437 }
438
439 /**
440  * ipath_modify_qp - modify the attributes of a queue pair
441  * @ibqp: the queue pair who's attributes we're modifying
442  * @attr: the new attributes
443  * @attr_mask: the mask of attributes to modify
444  * @udata: user data for ipathverbs.so
445  *
446  * Returns 0 on success, otherwise returns an errno.
447  */
448 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
449                     int attr_mask, struct ib_udata *udata)
450 {
451         struct ipath_ibdev *dev = to_idev(ibqp->device);
452         struct ipath_qp *qp = to_iqp(ibqp);
453         enum ib_qp_state cur_state, new_state;
454         int lastwqe = 0;
455         int ret;
456
457         spin_lock_irq(&qp->s_lock);
458
459         cur_state = attr_mask & IB_QP_CUR_STATE ?
460                 attr->cur_qp_state : qp->state;
461         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
462
463         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
464                                 attr_mask))
465                 goto inval;
466
467         if (attr_mask & IB_QP_AV) {
468                 if (attr->ah_attr.dlid == 0 ||
469                     attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
470                         goto inval;
471
472                 if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
473                     (attr->ah_attr.grh.sgid_index > 1))
474                         goto inval;
475         }
476
477         if (attr_mask & IB_QP_PKEY_INDEX)
478                 if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
479                         goto inval;
480
481         if (attr_mask & IB_QP_MIN_RNR_TIMER)
482                 if (attr->min_rnr_timer > 31)
483                         goto inval;
484
485         if (attr_mask & IB_QP_PORT)
486                 if (attr->port_num == 0 ||
487                     attr->port_num > ibqp->device->phys_port_cnt)
488                         goto inval;
489
490         /*
491          * don't allow invalid Path MTU values or greater than 2048
492          * unless we are configured for a 4KB MTU
493          */
494         if ((attr_mask & IB_QP_PATH_MTU) &&
495                 (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
496                 (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
497                 goto inval;
498
499         if (attr_mask & IB_QP_PATH_MIG_STATE)
500                 if (attr->path_mig_state != IB_MIG_MIGRATED &&
501                     attr->path_mig_state != IB_MIG_REARM)
502                         goto inval;
503
504         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
505                 if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
506                         goto inval;
507
508         switch (new_state) {
509         case IB_QPS_RESET:
510                 if (qp->state != IB_QPS_RESET) {
511                         qp->state = IB_QPS_RESET;
512                         spin_lock(&dev->pending_lock);
513                         if (!list_empty(&qp->timerwait))
514                                 list_del_init(&qp->timerwait);
515                         if (!list_empty(&qp->piowait))
516                                 list_del_init(&qp->piowait);
517                         spin_unlock(&dev->pending_lock);
518                         qp->s_flags &= ~IPATH_S_ANY_WAIT;
519                         spin_unlock_irq(&qp->s_lock);
520                         /* Stop the sending tasklet */
521                         tasklet_kill(&qp->s_task);
522                         wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
523                         spin_lock_irq(&qp->s_lock);
524                 }
525                 ipath_reset_qp(qp, ibqp->qp_type);
526                 break;
527
528         case IB_QPS_SQD:
529                 qp->s_draining = qp->s_last != qp->s_cur;
530                 qp->state = new_state;
531                 break;
532
533         case IB_QPS_SQE:
534                 if (qp->ibqp.qp_type == IB_QPT_RC)
535                         goto inval;
536                 qp->state = new_state;
537                 break;
538
539         case IB_QPS_ERR:
540                 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
541                 break;
542
543         default:
544                 qp->state = new_state;
545                 break;
546         }
547
548         if (attr_mask & IB_QP_PKEY_INDEX)
549                 qp->s_pkey_index = attr->pkey_index;
550
551         if (attr_mask & IB_QP_DEST_QPN)
552                 qp->remote_qpn = attr->dest_qp_num;
553
554         if (attr_mask & IB_QP_SQ_PSN) {
555                 qp->s_psn = qp->s_next_psn = attr->sq_psn;
556                 qp->s_last_psn = qp->s_next_psn - 1;
557         }
558
559         if (attr_mask & IB_QP_RQ_PSN)
560                 qp->r_psn = attr->rq_psn;
561
562         if (attr_mask & IB_QP_ACCESS_FLAGS)
563                 qp->qp_access_flags = attr->qp_access_flags;
564
565         if (attr_mask & IB_QP_AV) {
566                 qp->remote_ah_attr = attr->ah_attr;
567                 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
568         }
569
570         if (attr_mask & IB_QP_PATH_MTU)
571                 qp->path_mtu = attr->path_mtu;
572
573         if (attr_mask & IB_QP_RETRY_CNT)
574                 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
575
576         if (attr_mask & IB_QP_RNR_RETRY) {
577                 qp->s_rnr_retry = attr->rnr_retry;
578                 if (qp->s_rnr_retry > 7)
579                         qp->s_rnr_retry = 7;
580                 qp->s_rnr_retry_cnt = qp->s_rnr_retry;
581         }
582
583         if (attr_mask & IB_QP_MIN_RNR_TIMER)
584                 qp->r_min_rnr_timer = attr->min_rnr_timer;
585
586         if (attr_mask & IB_QP_TIMEOUT)
587                 qp->timeout = attr->timeout;
588
589         if (attr_mask & IB_QP_QKEY)
590                 qp->qkey = attr->qkey;
591
592         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
593                 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
594
595         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
596                 qp->s_max_rd_atomic = attr->max_rd_atomic;
597
598         spin_unlock_irq(&qp->s_lock);
599
600         if (lastwqe) {
601                 struct ib_event ev;
602
603                 ev.device = qp->ibqp.device;
604                 ev.element.qp = &qp->ibqp;
605                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
606                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
607         }
608         ret = 0;
609         goto bail;
610
611 inval:
612         spin_unlock_irq(&qp->s_lock);
613         ret = -EINVAL;
614
615 bail:
616         return ret;
617 }
618
619 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
620                    int attr_mask, struct ib_qp_init_attr *init_attr)
621 {
622         struct ipath_qp *qp = to_iqp(ibqp);
623
624         attr->qp_state = qp->state;
625         attr->cur_qp_state = attr->qp_state;
626         attr->path_mtu = qp->path_mtu;
627         attr->path_mig_state = 0;
628         attr->qkey = qp->qkey;
629         attr->rq_psn = qp->r_psn;
630         attr->sq_psn = qp->s_next_psn;
631         attr->dest_qp_num = qp->remote_qpn;
632         attr->qp_access_flags = qp->qp_access_flags;
633         attr->cap.max_send_wr = qp->s_size - 1;
634         attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
635         attr->cap.max_send_sge = qp->s_max_sge;
636         attr->cap.max_recv_sge = qp->r_rq.max_sge;
637         attr->cap.max_inline_data = 0;
638         attr->ah_attr = qp->remote_ah_attr;
639         memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
640         attr->pkey_index = qp->s_pkey_index;
641         attr->alt_pkey_index = 0;
642         attr->en_sqd_async_notify = 0;
643         attr->sq_draining = qp->s_draining;
644         attr->max_rd_atomic = qp->s_max_rd_atomic;
645         attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
646         attr->min_rnr_timer = qp->r_min_rnr_timer;
647         attr->port_num = 1;
648         attr->timeout = qp->timeout;
649         attr->retry_cnt = qp->s_retry_cnt;
650         attr->rnr_retry = qp->s_rnr_retry_cnt;
651         attr->alt_port_num = 0;
652         attr->alt_timeout = 0;
653
654         init_attr->event_handler = qp->ibqp.event_handler;
655         init_attr->qp_context = qp->ibqp.qp_context;
656         init_attr->send_cq = qp->ibqp.send_cq;
657         init_attr->recv_cq = qp->ibqp.recv_cq;
658         init_attr->srq = qp->ibqp.srq;
659         init_attr->cap = attr->cap;
660         if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
661                 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
662         else
663                 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
664         init_attr->qp_type = qp->ibqp.qp_type;
665         init_attr->port_num = 1;
666         return 0;
667 }
668
669 /**
670  * ipath_compute_aeth - compute the AETH (syndrome + MSN)
671  * @qp: the queue pair to compute the AETH for
672  *
673  * Returns the AETH.
674  */
675 __be32 ipath_compute_aeth(struct ipath_qp *qp)
676 {
677         u32 aeth = qp->r_msn & IPATH_MSN_MASK;
678
679         if (qp->ibqp.srq) {
680                 /*
681                  * Shared receive queues don't generate credits.
682                  * Set the credit field to the invalid value.
683                  */
684                 aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
685         } else {
686                 u32 min, max, x;
687                 u32 credits;
688                 struct ipath_rwq *wq = qp->r_rq.wq;
689                 u32 head;
690                 u32 tail;
691
692                 /* sanity check pointers before trusting them */
693                 head = wq->head;
694                 if (head >= qp->r_rq.size)
695                         head = 0;
696                 tail = wq->tail;
697                 if (tail >= qp->r_rq.size)
698                         tail = 0;
699                 /*
700                  * Compute the number of credits available (RWQEs).
701                  * XXX Not holding the r_rq.lock here so there is a small
702                  * chance that the pair of reads are not atomic.
703                  */
704                 credits = head - tail;
705                 if ((int)credits < 0)
706                         credits += qp->r_rq.size;
707                 /*
708                  * Binary search the credit table to find the code to
709                  * use.
710                  */
711                 min = 0;
712                 max = 31;
713                 for (;;) {
714                         x = (min + max) / 2;
715                         if (credit_table[x] == credits)
716                                 break;
717                         if (credit_table[x] > credits)
718                                 max = x;
719                         else if (min == x)
720                                 break;
721                         else
722                                 min = x;
723                 }
724                 aeth |= x << IPATH_AETH_CREDIT_SHIFT;
725         }
726         return cpu_to_be32(aeth);
727 }
728
729 /**
730  * ipath_create_qp - create a queue pair for a device
731  * @ibpd: the protection domain who's device we create the queue pair for
732  * @init_attr: the attributes of the queue pair
733  * @udata: unused by InfiniPath
734  *
735  * Returns the queue pair on success, otherwise returns an errno.
736  *
737  * Called by the ib_create_qp() core verbs function.
738  */
739 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
740                               struct ib_qp_init_attr *init_attr,
741                               struct ib_udata *udata)
742 {
743         struct ipath_qp *qp;
744         int err;
745         struct ipath_swqe *swq = NULL;
746         struct ipath_ibdev *dev;
747         size_t sz;
748         struct ib_qp *ret;
749
750         if (init_attr->create_flags) {
751                 ret = ERR_PTR(-EINVAL);
752                 goto bail;
753         }
754
755         if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
756             init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
757                 ret = ERR_PTR(-EINVAL);
758                 goto bail;
759         }
760
761         /* Check receive queue parameters if no SRQ is specified. */
762         if (!init_attr->srq) {
763                 if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
764                     init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
765                         ret = ERR_PTR(-EINVAL);
766                         goto bail;
767                 }
768                 if (init_attr->cap.max_send_sge +
769                     init_attr->cap.max_send_wr +
770                     init_attr->cap.max_recv_sge +
771                     init_attr->cap.max_recv_wr == 0) {
772                         ret = ERR_PTR(-EINVAL);
773                         goto bail;
774                 }
775         }
776
777         switch (init_attr->qp_type) {
778         case IB_QPT_UC:
779         case IB_QPT_RC:
780         case IB_QPT_UD:
781         case IB_QPT_SMI:
782         case IB_QPT_GSI:
783                 sz = sizeof(struct ipath_sge) *
784                         init_attr->cap.max_send_sge +
785                         sizeof(struct ipath_swqe);
786                 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
787                 if (swq == NULL) {
788                         ret = ERR_PTR(-ENOMEM);
789                         goto bail;
790                 }
791                 sz = sizeof(*qp);
792                 if (init_attr->srq) {
793                         struct ipath_srq *srq = to_isrq(init_attr->srq);
794
795                         sz += sizeof(*qp->r_sg_list) *
796                                 srq->rq.max_sge;
797                 } else
798                         sz += sizeof(*qp->r_sg_list) *
799                                 init_attr->cap.max_recv_sge;
800                 qp = kmalloc(sz, GFP_KERNEL);
801                 if (!qp) {
802                         ret = ERR_PTR(-ENOMEM);
803                         goto bail_swq;
804                 }
805                 if (init_attr->srq) {
806                         sz = 0;
807                         qp->r_rq.size = 0;
808                         qp->r_rq.max_sge = 0;
809                         qp->r_rq.wq = NULL;
810                         init_attr->cap.max_recv_wr = 0;
811                         init_attr->cap.max_recv_sge = 0;
812                 } else {
813                         qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
814                         qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
815                         sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
816                                 sizeof(struct ipath_rwqe);
817                         qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
818                                               qp->r_rq.size * sz);
819                         if (!qp->r_rq.wq) {
820                                 ret = ERR_PTR(-ENOMEM);
821                                 goto bail_qp;
822                         }
823                 }
824
825                 /*
826                  * ib_create_qp() will initialize qp->ibqp
827                  * except for qp->ibqp.qp_num.
828                  */
829                 spin_lock_init(&qp->s_lock);
830                 spin_lock_init(&qp->r_rq.lock);
831                 atomic_set(&qp->refcount, 0);
832                 init_waitqueue_head(&qp->wait);
833                 init_waitqueue_head(&qp->wait_dma);
834                 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
835                 INIT_LIST_HEAD(&qp->piowait);
836                 INIT_LIST_HEAD(&qp->timerwait);
837                 qp->state = IB_QPS_RESET;
838                 qp->s_wq = swq;
839                 qp->s_size = init_attr->cap.max_send_wr + 1;
840                 qp->s_max_sge = init_attr->cap.max_send_sge;
841                 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
842                         qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
843                 else
844                         qp->s_flags = 0;
845                 dev = to_idev(ibpd->device);
846                 err = ipath_alloc_qpn(&dev->qp_table, qp,
847                                       init_attr->qp_type);
848                 if (err) {
849                         ret = ERR_PTR(err);
850                         vfree(qp->r_rq.wq);
851                         goto bail_qp;
852                 }
853                 qp->ip = NULL;
854                 qp->s_tx = NULL;
855                 ipath_reset_qp(qp, init_attr->qp_type);
856                 break;
857
858         default:
859                 /* Don't support raw QPs */
860                 ret = ERR_PTR(-ENOSYS);
861                 goto bail;
862         }
863
864         init_attr->cap.max_inline_data = 0;
865
866         /*
867          * Return the address of the RWQ as the offset to mmap.
868          * See ipath_mmap() for details.
869          */
870         if (udata && udata->outlen >= sizeof(__u64)) {
871                 if (!qp->r_rq.wq) {
872                         __u64 offset = 0;
873
874                         err = ib_copy_to_udata(udata, &offset,
875                                                sizeof(offset));
876                         if (err) {
877                                 ret = ERR_PTR(err);
878                                 goto bail_ip;
879                         }
880                 } else {
881                         u32 s = sizeof(struct ipath_rwq) +
882                                 qp->r_rq.size * sz;
883
884                         qp->ip =
885                             ipath_create_mmap_info(dev, s,
886                                                    ibpd->uobject->context,
887                                                    qp->r_rq.wq);
888                         if (!qp->ip) {
889                                 ret = ERR_PTR(-ENOMEM);
890                                 goto bail_ip;
891                         }
892
893                         err = ib_copy_to_udata(udata, &(qp->ip->offset),
894                                                sizeof(qp->ip->offset));
895                         if (err) {
896                                 ret = ERR_PTR(err);
897                                 goto bail_ip;
898                         }
899                 }
900         }
901
902         spin_lock(&dev->n_qps_lock);
903         if (dev->n_qps_allocated == ib_ipath_max_qps) {
904                 spin_unlock(&dev->n_qps_lock);
905                 ret = ERR_PTR(-ENOMEM);
906                 goto bail_ip;
907         }
908
909         dev->n_qps_allocated++;
910         spin_unlock(&dev->n_qps_lock);
911
912         if (qp->ip) {
913                 spin_lock_irq(&dev->pending_lock);
914                 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
915                 spin_unlock_irq(&dev->pending_lock);
916         }
917
918         ret = &qp->ibqp;
919         goto bail;
920
921 bail_ip:
922         if (qp->ip)
923                 kref_put(&qp->ip->ref, ipath_release_mmap_info);
924         else
925                 vfree(qp->r_rq.wq);
926         ipath_free_qp(&dev->qp_table, qp);
927         free_qpn(&dev->qp_table, qp->ibqp.qp_num);
928 bail_qp:
929         kfree(qp);
930 bail_swq:
931         vfree(swq);
932 bail:
933         return ret;
934 }
935
936 /**
937  * ipath_destroy_qp - destroy a queue pair
938  * @ibqp: the queue pair to destroy
939  *
940  * Returns 0 on success.
941  *
942  * Note that this can be called while the QP is actively sending or
943  * receiving!
944  */
945 int ipath_destroy_qp(struct ib_qp *ibqp)
946 {
947         struct ipath_qp *qp = to_iqp(ibqp);
948         struct ipath_ibdev *dev = to_idev(ibqp->device);
949
950         /* Make sure HW and driver activity is stopped. */
951         spin_lock_irq(&qp->s_lock);
952         if (qp->state != IB_QPS_RESET) {
953                 qp->state = IB_QPS_RESET;
954                 spin_lock(&dev->pending_lock);
955                 if (!list_empty(&qp->timerwait))
956                         list_del_init(&qp->timerwait);
957                 if (!list_empty(&qp->piowait))
958                         list_del_init(&qp->piowait);
959                 spin_unlock(&dev->pending_lock);
960                 qp->s_flags &= ~IPATH_S_ANY_WAIT;
961                 spin_unlock_irq(&qp->s_lock);
962                 /* Stop the sending tasklet */
963                 tasklet_kill(&qp->s_task);
964                 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
965         } else
966                 spin_unlock_irq(&qp->s_lock);
967
968         ipath_free_qp(&dev->qp_table, qp);
969
970         if (qp->s_tx) {
971                 atomic_dec(&qp->refcount);
972                 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
973                         kfree(qp->s_tx->txreq.map_addr);
974                 spin_lock_irq(&dev->pending_lock);
975                 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
976                 spin_unlock_irq(&dev->pending_lock);
977                 qp->s_tx = NULL;
978         }
979
980         wait_event(qp->wait, !atomic_read(&qp->refcount));
981
982         /* all user's cleaned up, mark it available */
983         free_qpn(&dev->qp_table, qp->ibqp.qp_num);
984         spin_lock(&dev->n_qps_lock);
985         dev->n_qps_allocated--;
986         spin_unlock(&dev->n_qps_lock);
987
988         if (qp->ip)
989                 kref_put(&qp->ip->ref, ipath_release_mmap_info);
990         else
991                 vfree(qp->r_rq.wq);
992         vfree(qp->s_wq);
993         kfree(qp);
994         return 0;
995 }
996
997 /**
998  * ipath_init_qp_table - initialize the QP table for a device
999  * @idev: the device who's QP table we're initializing
1000  * @size: the size of the QP table
1001  *
1002  * Returns 0 on success, otherwise returns an errno.
1003  */
1004 int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
1005 {
1006         int i;
1007         int ret;
1008
1009         idev->qp_table.last = 1;        /* QPN 0 and 1 are special. */
1010         idev->qp_table.max = size;
1011         idev->qp_table.nmaps = 1;
1012         idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
1013                                        GFP_KERNEL);
1014         if (idev->qp_table.table == NULL) {
1015                 ret = -ENOMEM;
1016                 goto bail;
1017         }
1018
1019         for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
1020                 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
1021                 idev->qp_table.map[i].page = NULL;
1022         }
1023
1024         ret = 0;
1025
1026 bail:
1027         return ret;
1028 }
1029
1030 /**
1031  * ipath_get_credit - flush the send work queue of a QP
1032  * @qp: the qp who's send work queue to flush
1033  * @aeth: the Acknowledge Extended Transport Header
1034  *
1035  * The QP s_lock should be held.
1036  */
1037 void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1038 {
1039         u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
1040
1041         /*
1042          * If the credit is invalid, we can send
1043          * as many packets as we like.  Otherwise, we have to
1044          * honor the credit field.
1045          */
1046         if (credit == IPATH_AETH_CREDIT_INVAL)
1047                 qp->s_lsn = (u32) -1;
1048         else if (qp->s_lsn != (u32) -1) {
1049                 /* Compute new LSN (i.e., MSN + credit) */
1050                 credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
1051                 if (ipath_cmp24(credit, qp->s_lsn) > 0)
1052                         qp->s_lsn = credit;
1053         }
1054
1055         /* Restart sending if it was blocked due to lack of credits. */
1056         if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
1057             qp->s_cur != qp->s_head &&
1058             (qp->s_lsn == (u32) -1 ||
1059              ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1060                          qp->s_lsn + 1) <= 0))
1061                 ipath_schedule_send(qp);
1062 }