2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $
39 #include <rdma/ib_smi.h>
40 #include <rdma/ib_user_verbs.h>
43 #include "mthca_dev.h"
44 #include "mthca_cmd.h"
45 #include "mthca_user.h"
46 #include "mthca_memfree.h"
48 static void init_query_mad(struct ib_smp *mad)
50 mad->base_version = 1;
51 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
52 mad->class_version = 1;
53 mad->method = IB_MGMT_METHOD_GET;
56 static int mthca_query_device(struct ib_device *ibdev,
57 struct ib_device_attr *props)
59 struct ib_smp *in_mad = NULL;
60 struct ib_smp *out_mad = NULL;
62 struct mthca_dev* mdev = to_mdev(ibdev);
66 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
67 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
68 if (!in_mad || !out_mad)
71 memset(props, 0, sizeof *props);
73 props->fw_ver = mdev->fw_ver;
75 init_query_mad(in_mad);
76 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
78 err = mthca_MAD_IFC(mdev, 1, 1,
79 1, NULL, NULL, in_mad, out_mad,
88 props->device_cap_flags = mdev->device_cap_flags;
89 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
91 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
92 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
93 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
95 props->max_mr_size = ~0ull;
96 props->page_size_cap = mdev->limits.page_size_cap;
97 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
98 props->max_qp_wr = mdev->limits.max_wqes;
99 props->max_sge = mdev->limits.max_sg;
100 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
101 props->max_cqe = mdev->limits.max_cqes;
102 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
103 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
104 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
105 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
106 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
107 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
108 props->max_srq_wr = mdev->limits.max_srq_wqes;
109 props->max_srq_sge = mdev->limits.max_sg;
110 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
111 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
112 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
113 props->max_pkeys = mdev->limits.pkey_table_len;
114 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
115 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
116 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
117 props->max_mcast_grp;
126 static int mthca_query_port(struct ib_device *ibdev,
127 u8 port, struct ib_port_attr *props)
129 struct ib_smp *in_mad = NULL;
130 struct ib_smp *out_mad = NULL;
134 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
135 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
136 if (!in_mad || !out_mad)
139 memset(props, 0, sizeof *props);
141 init_query_mad(in_mad);
142 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
143 in_mad->attr_mod = cpu_to_be32(port);
145 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
146 port, NULL, NULL, in_mad, out_mad,
155 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
156 props->lmc = out_mad->data[34] & 0x7;
157 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
158 props->sm_sl = out_mad->data[36] & 0xf;
159 props->state = out_mad->data[32] & 0xf;
160 props->phys_state = out_mad->data[33] >> 4;
161 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
162 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
163 props->max_msg_sz = 0x80000000;
164 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
165 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
166 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
167 props->active_width = out_mad->data[31] & 0xf;
168 props->active_speed = out_mad->data[35] >> 4;
169 props->max_mtu = out_mad->data[41] & 0xf;
170 props->active_mtu = out_mad->data[36] >> 4;
171 props->subnet_timeout = out_mad->data[51] & 0x1f;
179 static int mthca_modify_port(struct ib_device *ibdev,
180 u8 port, int port_modify_mask,
181 struct ib_port_modify *props)
183 struct mthca_set_ib_param set_ib;
184 struct ib_port_attr attr;
188 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
191 err = mthca_query_port(ibdev, port, &attr);
195 set_ib.set_si_guid = 0;
196 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
198 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
199 ~props->clr_port_cap_mask;
201 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
210 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
214 static int mthca_query_pkey(struct ib_device *ibdev,
215 u8 port, u16 index, u16 *pkey)
217 struct ib_smp *in_mad = NULL;
218 struct ib_smp *out_mad = NULL;
222 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
223 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
224 if (!in_mad || !out_mad)
227 init_query_mad(in_mad);
228 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
229 in_mad->attr_mod = cpu_to_be32(index / 32);
231 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
232 port, NULL, NULL, in_mad, out_mad,
241 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
249 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
250 int index, union ib_gid *gid)
252 struct ib_smp *in_mad = NULL;
253 struct ib_smp *out_mad = NULL;
257 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
258 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
259 if (!in_mad || !out_mad)
262 init_query_mad(in_mad);
263 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
264 in_mad->attr_mod = cpu_to_be32(port);
266 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
267 port, NULL, NULL, in_mad, out_mad,
276 memcpy(gid->raw, out_mad->data + 8, 8);
278 init_query_mad(in_mad);
279 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
280 in_mad->attr_mod = cpu_to_be32(index / 8);
282 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
283 port, NULL, NULL, in_mad, out_mad,
292 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
300 static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
301 struct ib_udata *udata)
303 struct mthca_alloc_ucontext_resp uresp;
304 struct mthca_ucontext *context;
307 memset(&uresp, 0, sizeof uresp);
309 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
310 if (mthca_is_memfree(to_mdev(ibdev)))
311 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
315 context = kmalloc(sizeof *context, GFP_KERNEL);
317 return ERR_PTR(-ENOMEM);
319 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
325 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
326 if (IS_ERR(context->db_tab)) {
327 err = PTR_ERR(context->db_tab);
328 mthca_uar_free(to_mdev(ibdev), &context->uar);
333 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
334 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
335 mthca_uar_free(to_mdev(ibdev), &context->uar);
337 return ERR_PTR(-EFAULT);
340 return &context->ibucontext;
343 static int mthca_dealloc_ucontext(struct ib_ucontext *context)
345 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
346 to_mucontext(context)->db_tab);
347 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
348 kfree(to_mucontext(context));
353 static int mthca_mmap_uar(struct ib_ucontext *context,
354 struct vm_area_struct *vma)
356 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
359 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
361 if (io_remap_pfn_range(vma, vma->vm_start,
362 to_mucontext(context)->uar.pfn,
363 PAGE_SIZE, vma->vm_page_prot))
369 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
370 struct ib_ucontext *context,
371 struct ib_udata *udata)
376 pd = kmalloc(sizeof *pd, GFP_KERNEL);
378 return ERR_PTR(-ENOMEM);
380 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
387 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
388 mthca_pd_free(to_mdev(ibdev), pd);
390 return ERR_PTR(-EFAULT);
397 static int mthca_dealloc_pd(struct ib_pd *pd)
399 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
405 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
406 struct ib_ah_attr *ah_attr)
411 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
413 return ERR_PTR(-ENOMEM);
415 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
424 static int mthca_ah_destroy(struct ib_ah *ah)
426 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
432 static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
433 struct ib_srq_init_attr *init_attr,
434 struct ib_udata *udata)
436 struct mthca_create_srq ucmd;
437 struct mthca_ucontext *context = NULL;
438 struct mthca_srq *srq;
441 srq = kmalloc(sizeof *srq, GFP_KERNEL);
443 return ERR_PTR(-ENOMEM);
446 context = to_mucontext(pd->uobject->context);
448 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
453 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
454 context->db_tab, ucmd.db_index,
460 srq->mr.ibmr.lkey = ucmd.lkey;
461 srq->db_index = ucmd.db_index;
464 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
465 &init_attr->attr, srq);
467 if (err && pd->uobject)
468 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
469 context->db_tab, ucmd.db_index);
474 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
475 mthca_free_srq(to_mdev(pd->device), srq);
488 static int mthca_destroy_srq(struct ib_srq *srq)
490 struct mthca_ucontext *context;
493 context = to_mucontext(srq->uobject->context);
495 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
496 context->db_tab, to_msrq(srq)->db_index);
499 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
505 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
506 struct ib_qp_init_attr *init_attr,
507 struct ib_udata *udata)
509 struct mthca_create_qp ucmd;
513 switch (init_attr->qp_type) {
518 struct mthca_ucontext *context;
520 qp = kmalloc(sizeof *qp, GFP_KERNEL);
522 return ERR_PTR(-ENOMEM);
525 context = to_mucontext(pd->uobject->context);
527 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
529 return ERR_PTR(-EFAULT);
532 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
534 ucmd.sq_db_index, ucmd.sq_db_page);
540 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
542 ucmd.rq_db_index, ucmd.rq_db_page);
544 mthca_unmap_user_db(to_mdev(pd->device),
552 qp->mr.ibmr.lkey = ucmd.lkey;
553 qp->sq.db_index = ucmd.sq_db_index;
554 qp->rq.db_index = ucmd.rq_db_index;
557 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
558 to_mcq(init_attr->send_cq),
559 to_mcq(init_attr->recv_cq),
560 init_attr->qp_type, init_attr->sq_sig_type,
561 &init_attr->cap, qp);
563 if (err && pd->uobject) {
564 context = to_mucontext(pd->uobject->context);
566 mthca_unmap_user_db(to_mdev(pd->device),
570 mthca_unmap_user_db(to_mdev(pd->device),
576 qp->ibqp.qp_num = qp->qpn;
582 /* Don't allow userspace to create special QPs */
584 return ERR_PTR(-EINVAL);
586 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
588 return ERR_PTR(-ENOMEM);
590 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
592 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
593 to_mcq(init_attr->send_cq),
594 to_mcq(init_attr->recv_cq),
595 init_attr->sq_sig_type, &init_attr->cap,
596 qp->ibqp.qp_num, init_attr->port_num,
601 /* Don't support raw QPs */
602 return ERR_PTR(-ENOSYS);
610 init_attr->cap.max_send_wr = qp->sq.max;
611 init_attr->cap.max_recv_wr = qp->rq.max;
612 init_attr->cap.max_send_sge = qp->sq.max_gs;
613 init_attr->cap.max_recv_sge = qp->rq.max_gs;
614 init_attr->cap.max_inline_data = qp->max_inline_data;
619 static int mthca_destroy_qp(struct ib_qp *qp)
622 mthca_unmap_user_db(to_mdev(qp->device),
623 &to_mucontext(qp->uobject->context)->uar,
624 to_mucontext(qp->uobject->context)->db_tab,
625 to_mqp(qp)->sq.db_index);
626 mthca_unmap_user_db(to_mdev(qp->device),
627 &to_mucontext(qp->uobject->context)->uar,
628 to_mucontext(qp->uobject->context)->db_tab,
629 to_mqp(qp)->rq.db_index);
631 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
636 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
637 struct ib_ucontext *context,
638 struct ib_udata *udata)
640 struct mthca_create_cq ucmd;
645 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
646 return ERR_PTR(-EINVAL);
649 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
650 return ERR_PTR(-EFAULT);
652 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
653 to_mucontext(context)->db_tab,
654 ucmd.set_db_index, ucmd.set_db_page);
658 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
659 to_mucontext(context)->db_tab,
660 ucmd.arm_db_index, ucmd.arm_db_page);
665 cq = kmalloc(sizeof *cq, GFP_KERNEL);
672 cq->mr.ibmr.lkey = ucmd.lkey;
673 cq->set_ci_db_index = ucmd.set_db_index;
674 cq->arm_db_index = ucmd.arm_db_index;
677 for (nent = 1; nent <= entries; nent <<= 1)
680 err = mthca_init_cq(to_mdev(ibdev), nent,
681 context ? to_mucontext(context) : NULL,
682 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
687 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
688 mthca_free_cq(to_mdev(ibdev), cq);
699 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
700 to_mucontext(context)->db_tab, ucmd.arm_db_index);
704 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
705 to_mucontext(context)->db_tab, ucmd.set_db_index);
710 static int mthca_destroy_cq(struct ib_cq *cq)
713 mthca_unmap_user_db(to_mdev(cq->device),
714 &to_mucontext(cq->uobject->context)->uar,
715 to_mucontext(cq->uobject->context)->db_tab,
716 to_mcq(cq)->arm_db_index);
717 mthca_unmap_user_db(to_mdev(cq->device),
718 &to_mucontext(cq->uobject->context)->uar,
719 to_mucontext(cq->uobject->context)->db_tab,
720 to_mcq(cq)->set_ci_db_index);
722 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
728 static inline u32 convert_access(int acc)
730 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
731 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
732 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
733 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
734 MTHCA_MPT_FLAG_LOCAL_READ;
737 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
742 mr = kmalloc(sizeof *mr, GFP_KERNEL);
744 return ERR_PTR(-ENOMEM);
746 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
748 convert_access(acc), mr);
758 static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
759 struct ib_phys_buf *buffer_list,
773 /* First check that we have enough alignment */
774 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
775 return ERR_PTR(-EINVAL);
779 for (i = 0; i < num_phys_buf; ++i) {
781 mask |= buffer_list[i].addr;
782 if (i != num_phys_buf - 1)
783 mask |= buffer_list[i].addr + buffer_list[i].size;
785 total_size += buffer_list[i].size;
788 if (mask & ~PAGE_MASK)
789 return ERR_PTR(-EINVAL);
791 /* Find largest page shift we can use to cover buffers */
792 for (shift = PAGE_SHIFT; shift < 31; ++shift)
793 if (num_phys_buf > 1) {
794 if ((1ULL << shift) & mask)
798 buffer_list[0].size +
799 (buffer_list[0].addr & ((1ULL << shift) - 1)))
803 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
804 buffer_list[0].addr &= ~0ull << shift;
806 mr = kmalloc(sizeof *mr, GFP_KERNEL);
808 return ERR_PTR(-ENOMEM);
811 for (i = 0; i < num_phys_buf; ++i)
812 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
817 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
820 return ERR_PTR(-ENOMEM);
824 for (i = 0; i < num_phys_buf; ++i)
826 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
828 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
830 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
831 "in PD %x; shift %d, npages %d.\n",
832 (unsigned long long) buffer_list[0].addr,
833 (unsigned long long) *iova_start,
837 err = mthca_mr_alloc_phys(to_mdev(pd->device),
839 page_list, shift, npages,
840 *iova_start, total_size,
841 convert_access(acc), mr);
853 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
854 int acc, struct ib_udata *udata)
856 struct mthca_dev *dev = to_mdev(pd->device);
857 struct ib_umem_chunk *chunk;
864 shift = ffs(region->page_size) - 1;
866 mr = kmalloc(sizeof *mr, GFP_KERNEL);
868 return ERR_PTR(-ENOMEM);
871 list_for_each_entry(chunk, ®ion->chunk_list, list)
874 mr->mtt = mthca_alloc_mtt(dev, n);
875 if (IS_ERR(mr->mtt)) {
876 err = PTR_ERR(mr->mtt);
880 pages = (u64 *) __get_free_page(GFP_KERNEL);
888 list_for_each_entry(chunk, ®ion->chunk_list, list)
889 for (j = 0; j < chunk->nmap; ++j) {
890 len = sg_dma_len(&chunk->page_list[j]) >> shift;
891 for (k = 0; k < len; ++k) {
892 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
893 region->page_size * k;
895 * Be friendly to WRITE_MTT command
896 * and leave two empty slots for the
897 * index and reserved fields of the
900 if (i == PAGE_SIZE / sizeof (u64) - 2) {
901 err = mthca_write_mtt(dev, mr->mtt,
912 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
914 free_page((unsigned long) pages);
918 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
919 region->length, convert_access(acc), mr);
927 mthca_free_mtt(dev, mr->mtt);
934 static int mthca_dereg_mr(struct ib_mr *mr)
936 struct mthca_mr *mmr = to_mmr(mr);
937 mthca_free_mr(to_mdev(mr->device), mmr);
942 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
943 struct ib_fmr_attr *fmr_attr)
945 struct mthca_fmr *fmr;
948 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
950 return ERR_PTR(-ENOMEM);
952 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
953 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
954 convert_access(mr_access_flags), fmr);
964 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
966 struct mthca_fmr *mfmr = to_mfmr(fmr);
969 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
977 static int mthca_unmap_fmr(struct list_head *fmr_list)
982 struct mthca_dev *mdev = NULL;
984 list_for_each_entry(fmr, fmr_list, list) {
985 if (mdev && to_mdev(fmr->device) != mdev)
987 mdev = to_mdev(fmr->device);
993 if (mthca_is_memfree(mdev)) {
994 list_for_each_entry(fmr, fmr_list, list)
995 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
999 list_for_each_entry(fmr, fmr_list, list)
1000 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1002 err = mthca_SYNC_TPT(mdev, &status);
1010 static ssize_t show_rev(struct class_device *cdev, char *buf)
1012 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1013 return sprintf(buf, "%x\n", dev->rev_id);
1016 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1018 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1019 return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
1020 (int) (dev->fw_ver >> 16) & 0xffff,
1021 (int) dev->fw_ver & 0xffff);
1024 static ssize_t show_hca(struct class_device *cdev, char *buf)
1026 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1027 switch (dev->pdev->device) {
1028 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1029 return sprintf(buf, "MT23108\n");
1030 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1031 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1032 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1033 return sprintf(buf, "MT25208\n");
1034 case PCI_DEVICE_ID_MELLANOX_SINAI:
1035 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1036 return sprintf(buf, "MT25204\n");
1038 return sprintf(buf, "unknown\n");
1042 static ssize_t show_board(struct class_device *cdev, char *buf)
1044 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1045 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1048 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1049 static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1050 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1051 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1053 static struct class_device_attribute *mthca_class_attributes[] = {
1054 &class_device_attr_hw_rev,
1055 &class_device_attr_fw_ver,
1056 &class_device_attr_hca_type,
1057 &class_device_attr_board_id
1060 static int mthca_init_node_data(struct mthca_dev *dev)
1062 struct ib_smp *in_mad = NULL;
1063 struct ib_smp *out_mad = NULL;
1067 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1068 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1069 if (!in_mad || !out_mad)
1072 init_query_mad(in_mad);
1073 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1075 err = mthca_MAD_IFC(dev, 1, 1,
1076 1, NULL, NULL, in_mad, out_mad,
1085 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1093 int mthca_register_device(struct mthca_dev *dev)
1098 ret = mthca_init_node_data(dev);
1102 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1103 dev->ib_dev.owner = THIS_MODULE;
1105 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
1106 dev->ib_dev.uverbs_cmd_mask =
1107 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1108 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1109 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1110 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1111 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1112 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1113 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1114 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1115 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1116 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1117 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1118 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1119 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1120 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1121 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1122 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1123 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1124 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1125 dev->ib_dev.node_type = IB_NODE_CA;
1126 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1127 dev->ib_dev.dma_device = &dev->pdev->dev;
1128 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
1129 dev->ib_dev.query_device = mthca_query_device;
1130 dev->ib_dev.query_port = mthca_query_port;
1131 dev->ib_dev.modify_port = mthca_modify_port;
1132 dev->ib_dev.query_pkey = mthca_query_pkey;
1133 dev->ib_dev.query_gid = mthca_query_gid;
1134 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1135 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1136 dev->ib_dev.mmap = mthca_mmap_uar;
1137 dev->ib_dev.alloc_pd = mthca_alloc_pd;
1138 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1139 dev->ib_dev.create_ah = mthca_ah_create;
1140 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1142 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1143 dev->ib_dev.create_srq = mthca_create_srq;
1144 dev->ib_dev.modify_srq = mthca_modify_srq;
1145 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1147 if (mthca_is_memfree(dev))
1148 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1150 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1153 dev->ib_dev.create_qp = mthca_create_qp;
1154 dev->ib_dev.modify_qp = mthca_modify_qp;
1155 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1156 dev->ib_dev.create_cq = mthca_create_cq;
1157 dev->ib_dev.destroy_cq = mthca_destroy_cq;
1158 dev->ib_dev.poll_cq = mthca_poll_cq;
1159 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1160 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
1161 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1162 dev->ib_dev.dereg_mr = mthca_dereg_mr;
1164 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1165 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1166 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1167 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1168 if (mthca_is_memfree(dev))
1169 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1171 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1174 dev->ib_dev.attach_mcast = mthca_multicast_attach;
1175 dev->ib_dev.detach_mcast = mthca_multicast_detach;
1176 dev->ib_dev.process_mad = mthca_process_mad;
1178 if (mthca_is_memfree(dev)) {
1179 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1180 dev->ib_dev.post_send = mthca_arbel_post_send;
1181 dev->ib_dev.post_recv = mthca_arbel_post_receive;
1183 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1184 dev->ib_dev.post_send = mthca_tavor_post_send;
1185 dev->ib_dev.post_recv = mthca_tavor_post_receive;
1188 mutex_init(&dev->cap_mask_mutex);
1190 ret = ib_register_device(&dev->ib_dev);
1194 for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
1195 ret = class_device_create_file(&dev->ib_dev.class_dev,
1196 mthca_class_attributes[i]);
1198 ib_unregister_device(&dev->ib_dev);
1203 mthca_start_catas_poll(dev);
1208 void mthca_unregister_device(struct mthca_dev *dev)
1210 mthca_stop_catas_poll(dev);
1211 ib_unregister_device(&dev->ib_dev);