2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/types.h>
38 #include <linux/spinlock.h>
39 #include <linux/kernel.h>
40 #include <linux/interrupt.h>
41 #include <linux/kref.h>
42 #include <rdma/ib_pack.h>
43 #include <rdma/ib_user_verbs.h>
45 #include "ipath_kernel.h"
47 #define IPATH_MAX_RDMA_ATOMIC 4
49 #define QPN_MAX (1 << 24)
50 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
53 * Increment this value if any changes that break userspace ABI
54 * compatibility are made.
56 #define IPATH_UVERBS_ABI_VERSION 2
59 * Define an ib_cq_notify value that is not valid so we know when CQ
60 * notifications are armed.
62 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
64 /* AETH NAK opcode values */
65 #define IB_RNR_NAK 0x20
66 #define IB_NAK_PSN_ERROR 0x60
67 #define IB_NAK_INVALID_REQUEST 0x61
68 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
69 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
70 #define IB_NAK_INVALID_RD_REQUEST 0x64
72 /* Flags for checking QP state (see ib_ipath_state_ops[]) */
73 #define IPATH_POST_SEND_OK 0x01
74 #define IPATH_POST_RECV_OK 0x02
75 #define IPATH_PROCESS_RECV_OK 0x04
76 #define IPATH_PROCESS_SEND_OK 0x08
77 #define IPATH_PROCESS_NEXT_SEND_OK 0x10
78 #define IPATH_FLUSH_SEND 0x20
79 #define IPATH_FLUSH_RECV 0x40
80 #define IPATH_PROCESS_OR_FLUSH_SEND \
81 (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
83 /* IB Performance Manager status values */
84 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
85 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
86 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
88 /* Mandatory IB performance counter select values. */
89 #define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001)
90 #define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002)
91 #define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003)
92 #define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004)
93 #define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005)
99 } __attribute__ ((packed));
101 struct ib_atomic_eth {
102 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
106 } __attribute__ ((packed));
108 struct ipath_other_headers {
121 __be32 atomic_ack_eth[2];
125 struct ib_atomic_eth atomic_eth;
127 } __attribute__ ((packed));
130 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
131 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
132 * will be in the eager header buffer. The remaining 12 or 16 bytes
133 * are in the data buffer.
135 struct ipath_ib_header {
140 struct ipath_other_headers oth;
142 struct ipath_other_headers oth;
144 } __attribute__ ((packed));
146 struct ipath_pio_header {
148 struct ipath_ib_header hdr;
149 } __attribute__ ((packed));
152 * There is one struct ipath_mcast for each multicast GID.
153 * All attached QPs are then stored as a list of
154 * struct ipath_mcast_qp.
156 struct ipath_mcast_qp {
157 struct list_head list;
162 struct rb_node rb_node;
164 struct list_head qp_list;
165 wait_queue_head_t wait;
170 /* Protection domain */
173 int user; /* non-zero if created from user space */
179 struct ib_ah_attr attr;
183 * This structure is used by ipath_mmap() to validate an offset
184 * when an mmap() request is made. The vm_area_struct then uses
185 * this as its vm_private_data.
187 struct ipath_mmap_info {
188 struct list_head pending_mmaps;
189 struct ib_ucontext *context;
197 * This structure is used to contain the head pointer, tail pointer,
198 * and completion queue entries as a single memory allocation so
199 * it can be mmap'ed into user space.
202 u32 head; /* index of next entry to fill */
203 u32 tail; /* index of next ib_poll_cq() entry */
205 /* these are actually size ibcq.cqe + 1 */
206 struct ib_uverbs_wc uqueue[0];
207 struct ib_wc kqueue[0];
212 * The completion queue structure.
216 struct tasklet_struct comptask;
220 struct ipath_cq_wc *queue;
221 struct ipath_mmap_info *ip;
225 * A segment is a linear region of low physical memory.
226 * XXX Maybe we should use phys addr here and kmap()/kunmap().
227 * Used by the verbs layer.
234 /* The number of ipath_segs that fit in a page. */
235 #define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
237 struct ipath_segarray {
238 struct ipath_seg segs[IPATH_SEGSZ];
241 struct ipath_mregion {
242 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
243 u64 user_base; /* User's address for this region */
244 u64 iova; /* IB start address of this region */
247 u32 offset; /* offset (bytes) to start of region */
249 u32 max_segs; /* number of ipath_segs in all the arrays */
250 u32 mapsz; /* size of the map array */
251 struct ipath_segarray *map[0]; /* the segments */
255 * These keep track of the copy progress within a memory region.
256 * Used by the verbs layer.
259 struct ipath_mregion *mr;
260 void *vaddr; /* kernel virtual address of segment */
261 u32 sge_length; /* length of the SGE */
262 u32 length; /* remaining length of the segment */
263 u16 m; /* current index: mr->map[m] */
264 u16 n; /* current index: mr->map[m]->segs[n] */
270 struct ib_umem *umem;
271 struct ipath_mregion mr; /* must be last */
275 * Send work request queue entry.
276 * The size of the sg_list is determined when the QP is created and stored
280 struct ib_send_wr wr; /* don't use wr.sg_list */
281 u32 psn; /* first packet sequence number */
282 u32 lpsn; /* last packet sequence number */
283 u32 ssn; /* send sequence number */
284 u32 length; /* total length of data in sg_list */
285 struct ipath_sge sg_list[0];
289 * Receive work request queue entry.
290 * The size of the sg_list is determined when the QP (or SRQ) is created
291 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
296 struct ib_sge sg_list[0];
300 * This structure is used to contain the head pointer, tail pointer,
301 * and receive work queue entries as a single memory allocation so
302 * it can be mmap'ed into user space.
303 * Note that the wq array elements are variable size so you can't
304 * just index into the array to get the N'th element;
305 * use get_rwqe_ptr() instead.
308 u32 head; /* new work requests posted to the head */
309 u32 tail; /* receives pull requests from here. */
310 struct ipath_rwqe wq[0];
314 struct ipath_rwq *wq;
316 u32 size; /* size of RWQE array */
323 struct ipath_mmap_info *ip;
324 /* send signal when number of RWQEs < limit */
328 struct ipath_sge_state {
329 struct ipath_sge *sg_list; /* next SGE to be used if any */
330 struct ipath_sge sge; /* progress state for the current SGE */
336 * This structure holds the information that the send tasklet needs
337 * to send a RDMA read response or atomic operation.
339 struct ipath_ack_entry {
344 struct ipath_sge_state rdma_sge;
350 * Variables prefixed with s_ are for the requester (sender).
351 * Variables prefixed with r_ are for the responder (receiver).
352 * Variables prefixed with ack_ are for responder replies.
354 * Common variables are protected by both r_rq.lock and s_lock in that order
355 * which only happens in modify_qp() or changing the QP 'state'.
359 struct ipath_qp *next; /* link list for QPN hash table */
360 struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
361 struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */
362 struct list_head piowait; /* link for wait PIO buf */
363 struct list_head timerwait; /* link for waiting for timeouts */
364 struct ib_ah_attr remote_ah_attr;
365 struct ipath_ib_header s_hdr; /* next packet header to send */
367 wait_queue_head_t wait;
368 wait_queue_head_t wait_dma;
369 struct tasklet_struct s_task;
370 struct ipath_mmap_info *ip;
371 struct ipath_sge_state *s_cur_sge;
372 struct ipath_verbs_txreq *s_tx;
373 struct ipath_sge_state s_sge; /* current send request data */
374 struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
375 struct ipath_sge_state s_ack_rdma_sge;
376 struct ipath_sge_state s_rdma_read_sge;
377 struct ipath_sge_state r_sge; /* current receive data */
381 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
382 u32 s_cur_size; /* size of send packet in bytes */
383 u32 s_len; /* total length of s_sge */
384 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
385 u32 s_next_psn; /* PSN for next request */
386 u32 s_last_psn; /* last response PSN processed */
387 u32 s_psn; /* current packet sequence number */
388 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
389 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
390 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
391 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
392 u64 r_wr_id; /* ID for current receive WQE */
393 unsigned long r_aflags;
394 u32 r_len; /* total length of r_sge */
395 u32 r_rcv_len; /* receive data len processed */
396 u32 r_psn; /* expected rcv packet sequence number */
397 u32 r_msn; /* message sequence number */
398 u8 state; /* QP state */
399 u8 s_state; /* opcode of last packet sent */
400 u8 s_ack_state; /* opcode of packet to ACK */
401 u8 s_nak_state; /* non-zero if NAK is pending */
402 u8 r_state; /* opcode of last packet received */
403 u8 r_nak_state; /* non-zero if NAK is pending */
404 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
406 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
407 u8 r_head_ack_queue; /* index into s_ack_queue[] */
409 u8 s_max_sge; /* size of s_wq->sg_list */
410 u8 s_retry_cnt; /* number of times to retry */
412 u8 s_retry; /* requester retry counter */
413 u8 s_rnr_retry; /* requester RNR retry counter */
414 u8 s_pkey_index; /* PKEY index to use */
415 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
416 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
417 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
421 u8 timeout; /* Timeout for this QP */
422 enum ib_mtu path_mtu;
424 u32 qkey; /* QKEY for this QP (for UD or RD) */
425 u32 s_size; /* send work queue size */
426 u32 s_head; /* new entries added here */
427 u32 s_tail; /* next entry to process */
428 u32 s_cur; /* current work queue entry */
429 u32 s_last; /* last un-ACK'ed entry */
430 u32 s_ssn; /* SSN of tail entry */
431 u32 s_lsn; /* limit sequence number (credit) */
432 struct ipath_swqe *s_wq; /* send work queue */
433 struct ipath_swqe *s_wqe;
434 struct ipath_rq r_rq; /* receive work queue */
435 struct ipath_sge r_sg_list[0]; /* verified SGEs */
439 * Atomic bit definitions for r_aflags.
441 #define IPATH_R_WRID_VALID 0
444 * Bit definitions for r_flags.
446 #define IPATH_R_REUSE_SGE 0x01
447 #define IPATH_R_RDMAR_SEQ 0x02
450 * Bit definitions for s_flags.
452 * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
453 * before processing the next SWQE
454 * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
455 * before processing the next SWQE
456 * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
457 * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
458 * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
459 * next send completion entry not via send DMA.
461 #define IPATH_S_SIGNAL_REQ_WR 0x01
462 #define IPATH_S_FENCE_PENDING 0x02
463 #define IPATH_S_RDMAR_PENDING 0x04
464 #define IPATH_S_ACK_PENDING 0x08
465 #define IPATH_S_BUSY 0x10
466 #define IPATH_S_WAITING 0x20
467 #define IPATH_S_WAIT_SSN_CREDIT 0x40
468 #define IPATH_S_WAIT_DMA 0x80
470 #define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
471 IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
473 #define IPATH_PSN_CREDIT 512
476 * Since struct ipath_swqe is not a fixed size, we can't simply index into
477 * struct ipath_qp.s_wq. This function does the array index computation.
479 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
482 return (struct ipath_swqe *)((char *)qp->s_wq +
483 (sizeof(struct ipath_swqe) +
485 sizeof(struct ipath_sge)) * n);
489 * Since struct ipath_rwqe is not a fixed size, we can't simply index into
490 * struct ipath_rwq.wq. This function does the array index computation.
492 static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
495 return (struct ipath_rwqe *)
496 ((char *) rq->wq->wq +
497 (sizeof(struct ipath_rwqe) +
498 rq->max_sge * sizeof(struct ib_sge)) * n);
502 * QPN-map pages start out as NULL, they get allocated upon
503 * first use and are never deallocated. This way,
504 * large bitmaps are not allocated unless large numbers of QPs are used.
511 struct ipath_qp_table {
513 u32 last; /* last QP number allocated */
514 u32 max; /* size of the hash table */
515 u32 nmaps; /* size of the map table */
516 struct ipath_qp **table;
517 /* bit map of free numbers */
518 struct qpn_map map[QPNMAP_ENTRIES];
521 struct ipath_lkey_table {
523 u32 next; /* next unused index (speeds search) */
524 u32 gen; /* generation count */
525 u32 max; /* size of the table */
526 struct ipath_mregion **table;
529 struct ipath_opcode_stats {
530 u64 n_packets; /* number of packets */
531 u64 n_bytes; /* total number of bytes */
535 struct ib_device ibdev;
536 struct ipath_devdata *dd;
537 struct list_head pending_mmaps;
538 spinlock_t mmap_offset_lock;
540 int ib_unit; /* This is the device number */
541 u16 sm_lid; /* in host order */
544 /* non-zero when timer is set */
545 unsigned long mkey_lease_timeout;
547 /* The following fields are really per port. */
548 struct ipath_qp_table qp_table;
549 struct ipath_lkey_table lk_table;
550 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
551 struct list_head piowait; /* list for wait PIO buf */
552 struct list_head txreq_free;
554 /* list of QPs waiting for RNR timer */
555 struct list_head rnrwait;
556 spinlock_t pending_lock;
557 __be64 sys_image_guid; /* in network order */
558 __be64 gid_prefix; /* in network order */
561 u32 n_pds_allocated; /* number of PDs allocated for device */
562 spinlock_t n_pds_lock;
563 u32 n_ahs_allocated; /* number of AHs allocated for device */
564 spinlock_t n_ahs_lock;
565 u32 n_cqs_allocated; /* number of CQs allocated for device */
566 spinlock_t n_cqs_lock;
567 u32 n_qps_allocated; /* number of QPs allocated for device */
568 spinlock_t n_qps_lock;
569 u32 n_srqs_allocated; /* number of SRQs allocated for device */
570 spinlock_t n_srqs_lock;
571 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
572 spinlock_t n_mcast_grps_lock;
574 u64 ipath_sword; /* total dwords sent (sample result) */
575 u64 ipath_rword; /* total dwords received (sample result) */
576 u64 ipath_spkts; /* total packets sent (sample result) */
577 u64 ipath_rpkts; /* total packets received (sample result) */
578 /* # of ticks no data sent (sample result) */
580 u64 rcv_errors; /* # of packets with SW detected rcv errs */
581 u64 n_unicast_xmit; /* total unicast packets sent */
582 u64 n_unicast_rcv; /* total unicast packets received */
583 u64 n_multicast_xmit; /* total multicast packets sent */
584 u64 n_multicast_rcv; /* total multicast packets received */
585 u64 z_symbol_error_counter; /* starting count for PMA */
586 u64 z_link_error_recovery_counter; /* starting count for PMA */
587 u64 z_link_downed_counter; /* starting count for PMA */
588 u64 z_port_rcv_errors; /* starting count for PMA */
589 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
590 u64 z_port_xmit_discards; /* starting count for PMA */
591 u64 z_port_xmit_data; /* starting count for PMA */
592 u64 z_port_rcv_data; /* starting count for PMA */
593 u64 z_port_xmit_packets; /* starting count for PMA */
594 u64 z_port_rcv_packets; /* starting count for PMA */
595 u32 z_pkey_violations; /* starting count for PMA */
596 u32 z_local_link_integrity_errors; /* starting count for PMA */
597 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
598 u32 z_vl15_dropped; /* starting count for PMA */
614 u32 pma_sample_start;
615 u32 pma_sample_interval;
616 __be16 pma_counter_select[5];
620 u16 mkey_lease_period;
621 u16 pending_index; /* which pending queue is active */
622 u8 pma_sample_status;
625 struct ipath_opcode_stats opstats[128];
628 struct ipath_verbs_counters {
629 u64 symbol_error_counter;
630 u64 link_error_recovery_counter;
631 u64 link_downed_counter;
633 u64 port_rcv_remphys_errors;
634 u64 port_xmit_discards;
637 u64 port_xmit_packets;
638 u64 port_rcv_packets;
639 u32 local_link_integrity_errors;
640 u32 excessive_buffer_overrun_errors;
644 struct ipath_verbs_txreq {
646 struct ipath_swqe *wqe;
649 struct ipath_sge_state *ss;
650 struct ipath_pio_header hdr;
651 struct ipath_sdma_txreq txreq;
654 static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
656 return container_of(ibmr, struct ipath_mr, ibmr);
659 static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
661 return container_of(ibpd, struct ipath_pd, ibpd);
664 static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
666 return container_of(ibah, struct ipath_ah, ibah);
669 static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
671 return container_of(ibcq, struct ipath_cq, ibcq);
674 static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
676 return container_of(ibsrq, struct ipath_srq, ibsrq);
679 static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
681 return container_of(ibqp, struct ipath_qp, ibqp);
684 static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
686 return container_of(ibdev, struct ipath_ibdev, ibdev);
690 * This must be called with s_lock held.
692 static inline void ipath_schedule_send(struct ipath_qp *qp)
694 if (qp->s_flags & IPATH_S_ANY_WAIT)
695 qp->s_flags &= ~IPATH_S_ANY_WAIT;
696 if (!(qp->s_flags & IPATH_S_BUSY))
697 tasklet_hi_schedule(&qp->s_task);
700 int ipath_process_mad(struct ib_device *ibdev,
704 struct ib_grh *in_grh,
705 struct ib_mad *in_mad, struct ib_mad *out_mad);
708 * Compare the lower 24 bits of the two values.
709 * Returns an integer <, ==, or > than zero.
711 static inline int ipath_cmp24(u32 a, u32 b)
713 return (((int) a) - ((int) b)) << 8;
716 struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
718 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
719 u64 *rwords, u64 *spkts, u64 *rpkts,
722 int ipath_get_counters(struct ipath_devdata *dd,
723 struct ipath_verbs_counters *cntrs);
725 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
727 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
729 int ipath_mcast_tree_empty(void);
731 __be32 ipath_compute_aeth(struct ipath_qp *qp);
733 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
735 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
736 struct ib_qp_init_attr *init_attr,
737 struct ib_udata *udata);
739 int ipath_destroy_qp(struct ib_qp *ibqp);
741 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
743 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
744 int attr_mask, struct ib_udata *udata);
746 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
747 int attr_mask, struct ib_qp_init_attr *init_attr);
749 unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
751 int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
753 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
755 unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
757 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
758 u32 hdrwords, struct ipath_sge_state *ss, u32 len);
760 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
762 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
764 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
765 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
767 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
768 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
770 void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
772 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
774 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
776 void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
777 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
779 int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
780 struct ipath_mregion *mr);
782 void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
784 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
785 struct ib_sge *sge, int acc);
787 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
788 u32 len, u64 vaddr, u32 rkey, int acc);
790 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
791 struct ib_recv_wr **bad_wr);
793 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
794 struct ib_srq_init_attr *srq_init_attr,
795 struct ib_udata *udata);
797 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
798 enum ib_srq_attr_mask attr_mask,
799 struct ib_udata *udata);
801 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
803 int ipath_destroy_srq(struct ib_srq *ibsrq);
805 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
807 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
809 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
810 struct ib_ucontext *context,
811 struct ib_udata *udata);
813 int ipath_destroy_cq(struct ib_cq *ibcq);
815 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
817 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
819 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
821 struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
822 struct ib_phys_buf *buffer_list,
823 int num_phys_buf, int acc, u64 *iova_start);
825 struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
826 u64 virt_addr, int mr_access_flags,
827 struct ib_udata *udata);
829 int ipath_dereg_mr(struct ib_mr *ibmr);
831 struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
832 struct ib_fmr_attr *fmr_attr);
834 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
835 int list_len, u64 iova);
837 int ipath_unmap_fmr(struct list_head *fmr_list);
839 int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
841 void ipath_release_mmap_info(struct kref *ref);
843 struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
845 struct ib_ucontext *context,
848 void ipath_update_mmap_info(struct ipath_ibdev *dev,
849 struct ipath_mmap_info *ip,
850 u32 size, void *obj);
852 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
854 void ipath_insert_rnr_queue(struct ipath_qp *qp);
856 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
857 u32 *lengthp, struct ipath_sge_state *ss);
859 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
861 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
862 struct ib_global_route *grh, u32 hwords, u32 nwords);
864 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
865 struct ipath_other_headers *ohdr,
868 void ipath_do_send(unsigned long data);
870 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
871 enum ib_wc_status status);
873 int ipath_make_rc_req(struct ipath_qp *qp);
875 int ipath_make_uc_req(struct ipath_qp *qp);
877 int ipath_make_ud_req(struct ipath_qp *qp);
879 int ipath_register_ib_device(struct ipath_devdata *);
881 void ipath_unregister_ib_device(struct ipath_ibdev *);
883 void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
885 int ipath_ib_piobufavail(struct ipath_ibdev *);
887 unsigned ipath_get_npkeys(struct ipath_devdata *);
889 u32 ipath_get_cr_errpkey(struct ipath_devdata *);
891 unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
893 extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
896 * Below converts HCA-specific LinkTrainingState to IB PhysPortState
899 extern const u8 ipath_cvt_physportstate[];
900 #define IB_PHYSPORTSTATE_SLEEP 1
901 #define IB_PHYSPORTSTATE_POLL 2
902 #define IB_PHYSPORTSTATE_DISABLED 3
903 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
904 #define IB_PHYSPORTSTATE_LINKUP 5
905 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
907 extern const int ib_ipath_state_ops[];
909 extern unsigned int ib_ipath_lkey_table_size;
911 extern unsigned int ib_ipath_max_cqes;
913 extern unsigned int ib_ipath_max_cqs;
915 extern unsigned int ib_ipath_max_qp_wrs;
917 extern unsigned int ib_ipath_max_qps;
919 extern unsigned int ib_ipath_max_sges;
921 extern unsigned int ib_ipath_max_mcast_grps;
923 extern unsigned int ib_ipath_max_mcast_qp_attached;
925 extern unsigned int ib_ipath_max_srqs;
927 extern unsigned int ib_ipath_max_srq_sges;
929 extern unsigned int ib_ipath_max_srq_wrs;
931 extern const u32 ib_ipath_rnr_table[];
933 extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
935 #endif /* IPATH_VERBS_H */