2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #ifndef __IWCH_PROVIDER_H__
33 #define __IWCH_PROVIDER_H__
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <rdma/ib_verbs.h>
38 #include <asm/types.h>
50 static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
52 return container_of(ibpd, struct iwch_pd, ibpd);
55 struct tpt_attributes {
60 enum tpt_mem_perm perms;
61 u32 remote_invaliate_disable:1;
78 struct tpt_attributes attr;
81 typedef struct iwch_mw iwch_mw_handle;
83 static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
85 return container_of(ibmr, struct iwch_mr, ibmr);
92 struct tpt_attributes attr;
95 static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
97 return container_of(ibmw, struct iwch_mw, ibmw);
102 struct iwch_dev *rhp;
106 wait_queue_head_t wait;
107 u32 __user *user_rptr_addr;
110 static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
112 return container_of(ibcq, struct iwch_cq, ibcq);
119 struct iwch_mpa_attributes {
120 u8 recv_marker_enabled;
121 u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
123 u8 version; /* 0 or 1 */
126 struct iwch_qp_attributes {
132 u32 sq_max_sges_rdma_write;
136 u8 enable_rdma_write; /* enable inbound Read Resp. */
138 u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
140 * Next QP state. If specify the current state, only the
141 * QP attributes will be modified.
147 char terminate_buffer[52];
148 u32 terminate_msg_len;
149 u8 is_terminate_local;
150 struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
151 struct iwch_ep *llp_stream_handle;
152 char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
153 u32 stream_msg_buf_len; /* Only on Idle -> RTS */
158 struct iwch_dev *rhp;
160 struct iwch_qp_attributes attr;
164 wait_queue_head_t wait;
165 enum IWCH_QP_FLAGS flags;
166 struct timer_list timer;
169 static inline int qp_quiesced(struct iwch_qp *qhp)
171 return qhp->flags & QP_QUIESCED;
174 static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
176 return container_of(ibqp, struct iwch_qp, ibqp);
179 void iwch_qp_add_ref(struct ib_qp *qp);
180 void iwch_qp_rem_ref(struct ib_qp *qp);
182 struct iwch_ucontext {
183 struct ib_ucontext ibucontext;
184 struct cxio_ucontext uctx;
186 spinlock_t mmap_lock;
187 struct list_head mmaps;
190 static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
192 return container_of(c, struct iwch_ucontext, ibucontext);
195 struct iwch_mm_entry {
196 struct list_head entry;
202 static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
203 u32 key, unsigned len)
205 struct list_head *pos, *nxt;
206 struct iwch_mm_entry *mm;
208 spin_lock(&ucontext->mmap_lock);
209 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
211 mm = list_entry(pos, struct iwch_mm_entry, entry);
212 if (mm->key == key && mm->len == len) {
213 list_del_init(&mm->entry);
214 spin_unlock(&ucontext->mmap_lock);
215 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
216 key, (unsigned long long) mm->addr, mm->len);
220 spin_unlock(&ucontext->mmap_lock);
224 static inline void insert_mmap(struct iwch_ucontext *ucontext,
225 struct iwch_mm_entry *mm)
227 spin_lock(&ucontext->mmap_lock);
228 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
229 mm->key, (unsigned long long) mm->addr, mm->len);
230 list_add_tail(&mm->entry, &ucontext->mmaps);
231 spin_unlock(&ucontext->mmap_lock);
234 enum iwch_qp_attr_mask {
235 IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
236 IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
237 IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
238 IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
239 IWCH_QP_ATTR_MAX_ORD = 1 << 11,
240 IWCH_QP_ATTR_MAX_IRD = 1 << 12,
241 IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
242 IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
243 IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
244 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
245 IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
246 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
247 IWCH_QP_ATTR_MAX_ORD |
248 IWCH_QP_ATTR_MAX_IRD |
249 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
250 IWCH_QP_ATTR_STREAM_MSG_BUFFER |
251 IWCH_QP_ATTR_MPA_ATTR |
252 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
255 int iwch_modify_qp(struct iwch_dev *rhp,
257 enum iwch_qp_attr_mask mask,
258 struct iwch_qp_attributes *attrs,
265 IWCH_QP_STATE_TERMINATE,
266 IWCH_QP_STATE_CLOSING,
270 static inline int iwch_convert_state(enum ib_qp_state ib_state)
275 return IWCH_QP_STATE_IDLE;
277 return IWCH_QP_STATE_RTS;
279 return IWCH_QP_STATE_CLOSING;
281 return IWCH_QP_STATE_TERMINATE;
283 return IWCH_QP_STATE_ERROR;
289 enum iwch_mem_perms {
290 IWCH_MEM_ACCESS_LOCAL_READ = 1 << 0,
291 IWCH_MEM_ACCESS_LOCAL_WRITE = 1 << 1,
292 IWCH_MEM_ACCESS_REMOTE_READ = 1 << 2,
293 IWCH_MEM_ACCESS_REMOTE_WRITE = 1 << 3,
294 IWCH_MEM_ACCESS_ATOMICS = 1 << 4,
295 IWCH_MEM_ACCESS_BINDING = 1 << 5,
296 IWCH_MEM_ACCESS_LOCAL =
297 (IWCH_MEM_ACCESS_LOCAL_READ | IWCH_MEM_ACCESS_LOCAL_WRITE),
298 IWCH_MEM_ACCESS_REMOTE =
299 (IWCH_MEM_ACCESS_REMOTE_WRITE | IWCH_MEM_ACCESS_REMOTE_READ)
300 /* cannot go beyond 1 << 31 */
301 } __attribute__ ((packed));
303 static inline u32 iwch_convert_access(int acc)
305 return (acc & IB_ACCESS_REMOTE_WRITE ? IWCH_MEM_ACCESS_REMOTE_WRITE : 0)
306 | (acc & IB_ACCESS_REMOTE_READ ? IWCH_MEM_ACCESS_REMOTE_READ : 0) |
307 (acc & IB_ACCESS_LOCAL_WRITE ? IWCH_MEM_ACCESS_LOCAL_WRITE : 0) |
308 (acc & IB_ACCESS_MW_BIND ? IWCH_MEM_ACCESS_BINDING : 0) |
309 IWCH_MEM_ACCESS_LOCAL_READ;
312 enum iwch_mmid_state {
313 IWCH_STAG_STATE_VALID,
314 IWCH_STAG_STATE_INVALID
317 enum iwch_qp_query_flags {
318 IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
319 IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
320 IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
323 * Quiesce QP context; Consumer
324 * will NOT replay outstanding WR
326 IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
327 IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
328 IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
331 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
332 struct ib_send_wr **bad_wr);
333 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
334 struct ib_recv_wr **bad_wr);
335 int iwch_bind_mw(struct ib_qp *qp,
337 struct ib_mw_bind *mw_bind);
338 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
339 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
340 int iwch_register_device(struct iwch_dev *dev);
341 void iwch_unregister_device(struct iwch_dev *dev);
342 int iwch_quiesce_qps(struct iwch_cq *chp);
343 int iwch_resume_qps(struct iwch_cq *chp);
344 void stop_read_rep_timer(struct iwch_qp *qhp);
345 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
349 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
354 int build_phys_page_list(struct ib_phys_buf *buffer_list,
363 #define IWCH_NODE_DESC "cxgb3 Chelsio Communications"