2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
55 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
57 static void cma_add_one(struct ib_device *device);
58 static void cma_remove_one(struct ib_device *device);
60 static struct ib_client cma_client = {
63 .remove = cma_remove_one
66 static struct ib_sa_client sa_client;
67 static struct rdma_addr_client addr_client;
68 static LIST_HEAD(dev_list);
69 static LIST_HEAD(listen_any_list);
70 static DEFINE_MUTEX(lock);
71 static struct workqueue_struct *cma_wq;
72 static DEFINE_IDR(sdp_ps);
73 static DEFINE_IDR(tcp_ps);
74 static DEFINE_IDR(udp_ps);
75 static DEFINE_IDR(ipoib_ps);
79 struct list_head list;
80 struct ib_device *device;
81 struct completion comp;
83 struct list_head id_list;
100 struct rdma_bind_list {
102 struct hlist_head owners;
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
112 struct rdma_id_private {
113 struct rdma_cm_id id;
115 struct rdma_bind_list *bind_list;
116 struct hlist_node node;
117 struct list_head list;
118 struct list_head listen_list;
119 struct cma_device *cma_dev;
120 struct list_head mc_list;
122 enum cma_state state;
124 struct mutex qp_mutex;
126 struct completion comp;
128 wait_queue_head_t wait_remove;
133 struct ib_sa_query *query;
147 struct cma_multicast {
148 struct rdma_id_private *id_priv;
150 struct ib_sa_multicast *ib;
152 struct list_head list;
154 struct sockaddr addr;
155 u8 pad[sizeof(struct sockaddr_in6) -
156 sizeof(struct sockaddr)];
160 struct work_struct work;
161 struct rdma_id_private *id;
162 enum cma_state old_state;
163 enum cma_state new_state;
164 struct rdma_cm_event event;
177 u8 ip_version; /* IP version: 7:4 */
179 union cma_ip_addr src_addr;
180 union cma_ip_addr dst_addr;
185 u8 sdp_version; /* Major version: 7:4 */
186 u8 ip_version; /* IP version: 7:4 */
187 u8 sdp_specific1[10];
190 union cma_ip_addr src_addr;
191 union cma_ip_addr dst_addr;
199 #define CMA_VERSION 0x00
200 #define SDP_MAJ_VERSION 0x2
202 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
207 spin_lock_irqsave(&id_priv->lock, flags);
208 ret = (id_priv->state == comp);
209 spin_unlock_irqrestore(&id_priv->lock, flags);
213 static int cma_comp_exch(struct rdma_id_private *id_priv,
214 enum cma_state comp, enum cma_state exch)
219 spin_lock_irqsave(&id_priv->lock, flags);
220 if ((ret = (id_priv->state == comp)))
221 id_priv->state = exch;
222 spin_unlock_irqrestore(&id_priv->lock, flags);
226 static enum cma_state cma_exch(struct rdma_id_private *id_priv,
232 spin_lock_irqsave(&id_priv->lock, flags);
233 old = id_priv->state;
234 id_priv->state = exch;
235 spin_unlock_irqrestore(&id_priv->lock, flags);
239 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
241 return hdr->ip_version >> 4;
244 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
246 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
249 static inline u8 sdp_get_majv(u8 sdp_version)
251 return sdp_version >> 4;
254 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
256 return hh->ip_version >> 4;
259 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
261 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
264 static inline int cma_is_ud_ps(enum rdma_port_space ps)
266 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
269 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
270 struct cma_device *cma_dev)
272 atomic_inc(&cma_dev->refcount);
273 id_priv->cma_dev = cma_dev;
274 id_priv->id.device = cma_dev->device;
275 list_add_tail(&id_priv->list, &cma_dev->id_list);
278 static inline void cma_deref_dev(struct cma_device *cma_dev)
280 if (atomic_dec_and_test(&cma_dev->refcount))
281 complete(&cma_dev->comp);
284 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
286 list_del(&id_priv->list);
287 cma_deref_dev(id_priv->cma_dev);
288 id_priv->cma_dev = NULL;
291 static int cma_set_qkey(struct ib_device *device, u8 port_num,
292 enum rdma_port_space ps,
293 struct rdma_dev_addr *dev_addr, u32 *qkey)
295 struct ib_sa_mcmember_rec rec;
300 *qkey = RDMA_UDP_QKEY;
303 ib_addr_get_mgid(dev_addr, &rec.mgid);
304 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
305 *qkey = be32_to_cpu(rec.qkey);
313 static int cma_acquire_dev(struct rdma_id_private *id_priv)
315 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
316 struct cma_device *cma_dev;
320 switch (rdma_node_get_transport(dev_addr->dev_type)) {
321 case RDMA_TRANSPORT_IB:
322 ib_addr_get_sgid(dev_addr, &gid);
324 case RDMA_TRANSPORT_IWARP:
325 iw_addr_get_sgid(dev_addr, &gid);
331 list_for_each_entry(cma_dev, &dev_list, list) {
332 ret = ib_find_cached_gid(cma_dev->device, &gid,
333 &id_priv->id.port_num, NULL);
335 ret = cma_set_qkey(cma_dev->device,
336 id_priv->id.port_num,
337 id_priv->id.ps, dev_addr,
340 cma_attach_to_dev(id_priv, cma_dev);
347 static void cma_deref_id(struct rdma_id_private *id_priv)
349 if (atomic_dec_and_test(&id_priv->refcount))
350 complete(&id_priv->comp);
353 static int cma_disable_remove(struct rdma_id_private *id_priv,
354 enum cma_state state)
359 spin_lock_irqsave(&id_priv->lock, flags);
360 if (id_priv->state == state) {
361 atomic_inc(&id_priv->dev_remove);
365 spin_unlock_irqrestore(&id_priv->lock, flags);
369 static void cma_enable_remove(struct rdma_id_private *id_priv)
371 if (atomic_dec_and_test(&id_priv->dev_remove))
372 wake_up(&id_priv->wait_remove);
375 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
377 return (id_priv->id.device && id_priv->cm_id.ib);
380 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
381 void *context, enum rdma_port_space ps)
383 struct rdma_id_private *id_priv;
385 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
387 return ERR_PTR(-ENOMEM);
389 id_priv->state = CMA_IDLE;
390 id_priv->id.context = context;
391 id_priv->id.event_handler = event_handler;
393 spin_lock_init(&id_priv->lock);
394 mutex_init(&id_priv->qp_mutex);
395 init_completion(&id_priv->comp);
396 atomic_set(&id_priv->refcount, 1);
397 init_waitqueue_head(&id_priv->wait_remove);
398 atomic_set(&id_priv->dev_remove, 0);
399 INIT_LIST_HEAD(&id_priv->listen_list);
400 INIT_LIST_HEAD(&id_priv->mc_list);
401 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
405 EXPORT_SYMBOL(rdma_create_id);
407 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
409 struct ib_qp_attr qp_attr;
410 int qp_attr_mask, ret;
412 qp_attr.qp_state = IB_QPS_INIT;
413 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
417 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
421 qp_attr.qp_state = IB_QPS_RTR;
422 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
426 qp_attr.qp_state = IB_QPS_RTS;
428 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
433 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
435 struct ib_qp_attr qp_attr;
436 int qp_attr_mask, ret;
438 qp_attr.qp_state = IB_QPS_INIT;
439 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
443 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
446 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
447 struct ib_qp_init_attr *qp_init_attr)
449 struct rdma_id_private *id_priv;
453 id_priv = container_of(id, struct rdma_id_private, id);
454 if (id->device != pd->device)
457 qp = ib_create_qp(pd, qp_init_attr);
461 if (cma_is_ud_ps(id_priv->id.ps))
462 ret = cma_init_ud_qp(id_priv, qp);
464 ret = cma_init_conn_qp(id_priv, qp);
469 id_priv->qp_num = qp->qp_num;
470 id_priv->srq = (qp->srq != NULL);
476 EXPORT_SYMBOL(rdma_create_qp);
478 void rdma_destroy_qp(struct rdma_cm_id *id)
480 struct rdma_id_private *id_priv;
482 id_priv = container_of(id, struct rdma_id_private, id);
483 mutex_lock(&id_priv->qp_mutex);
484 ib_destroy_qp(id_priv->id.qp);
485 id_priv->id.qp = NULL;
486 mutex_unlock(&id_priv->qp_mutex);
488 EXPORT_SYMBOL(rdma_destroy_qp);
490 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
492 struct ib_qp_attr qp_attr;
493 int qp_attr_mask, ret;
495 mutex_lock(&id_priv->qp_mutex);
496 if (!id_priv->id.qp) {
501 /* Need to update QP attributes from default values. */
502 qp_attr.qp_state = IB_QPS_INIT;
503 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
507 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
511 qp_attr.qp_state = IB_QPS_RTR;
512 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
516 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
518 mutex_unlock(&id_priv->qp_mutex);
522 static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
524 struct ib_qp_attr qp_attr;
525 int qp_attr_mask, ret;
527 mutex_lock(&id_priv->qp_mutex);
528 if (!id_priv->id.qp) {
533 qp_attr.qp_state = IB_QPS_RTS;
534 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
538 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
540 mutex_unlock(&id_priv->qp_mutex);
544 static int cma_modify_qp_err(struct rdma_id_private *id_priv)
546 struct ib_qp_attr qp_attr;
549 mutex_lock(&id_priv->qp_mutex);
550 if (!id_priv->id.qp) {
555 qp_attr.qp_state = IB_QPS_ERR;
556 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
558 mutex_unlock(&id_priv->qp_mutex);
562 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
563 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
565 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
568 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
569 ib_addr_get_pkey(dev_addr),
570 &qp_attr->pkey_index);
574 qp_attr->port_num = id_priv->id.port_num;
575 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
577 if (cma_is_ud_ps(id_priv->id.ps)) {
578 qp_attr->qkey = id_priv->qkey;
579 *qp_attr_mask |= IB_QP_QKEY;
581 qp_attr->qp_access_flags = 0;
582 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
587 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
590 struct rdma_id_private *id_priv;
593 id_priv = container_of(id, struct rdma_id_private, id);
594 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
595 case RDMA_TRANSPORT_IB:
596 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
597 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
599 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
601 if (qp_attr->qp_state == IB_QPS_RTR)
602 qp_attr->rq_psn = id_priv->seq_num;
604 case RDMA_TRANSPORT_IWARP:
605 if (!id_priv->cm_id.iw) {
606 qp_attr->qp_access_flags = 0;
607 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
609 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
619 EXPORT_SYMBOL(rdma_init_qp_attr);
621 static inline int cma_zero_addr(struct sockaddr *addr)
623 struct in6_addr *ip6;
625 if (addr->sa_family == AF_INET)
626 return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
628 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
629 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
630 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
634 static inline int cma_loopback_addr(struct sockaddr *addr)
636 return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
639 static inline int cma_any_addr(struct sockaddr *addr)
641 return cma_zero_addr(addr) || cma_loopback_addr(addr);
644 static inline __be16 cma_port(struct sockaddr *addr)
646 if (addr->sa_family == AF_INET)
647 return ((struct sockaddr_in *) addr)->sin_port;
649 return ((struct sockaddr_in6 *) addr)->sin6_port;
652 static inline int cma_any_port(struct sockaddr *addr)
654 return !cma_port(addr);
657 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
658 u8 *ip_ver, __u16 *port,
659 union cma_ip_addr **src, union cma_ip_addr **dst)
663 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
667 *ip_ver = sdp_get_ip_ver(hdr);
668 *port = ((struct sdp_hh *) hdr)->port;
669 *src = &((struct sdp_hh *) hdr)->src_addr;
670 *dst = &((struct sdp_hh *) hdr)->dst_addr;
673 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
676 *ip_ver = cma_get_ip_ver(hdr);
677 *port = ((struct cma_hdr *) hdr)->port;
678 *src = &((struct cma_hdr *) hdr)->src_addr;
679 *dst = &((struct cma_hdr *) hdr)->dst_addr;
683 if (*ip_ver != 4 && *ip_ver != 6)
688 static void cma_save_net_info(struct rdma_addr *addr,
689 struct rdma_addr *listen_addr,
690 u8 ip_ver, __u16 port,
691 union cma_ip_addr *src, union cma_ip_addr *dst)
693 struct sockaddr_in *listen4, *ip4;
694 struct sockaddr_in6 *listen6, *ip6;
698 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
699 ip4 = (struct sockaddr_in *) &addr->src_addr;
700 ip4->sin_family = listen4->sin_family;
701 ip4->sin_addr.s_addr = dst->ip4.addr;
702 ip4->sin_port = listen4->sin_port;
704 ip4 = (struct sockaddr_in *) &addr->dst_addr;
705 ip4->sin_family = listen4->sin_family;
706 ip4->sin_addr.s_addr = src->ip4.addr;
707 ip4->sin_port = port;
710 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
711 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
712 ip6->sin6_family = listen6->sin6_family;
713 ip6->sin6_addr = dst->ip6;
714 ip6->sin6_port = listen6->sin6_port;
716 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
717 ip6->sin6_family = listen6->sin6_family;
718 ip6->sin6_addr = src->ip6;
719 ip6->sin6_port = port;
726 static inline int cma_user_data_offset(enum rdma_port_space ps)
732 return sizeof(struct cma_hdr);
736 static void cma_cancel_route(struct rdma_id_private *id_priv)
738 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
739 case RDMA_TRANSPORT_IB:
741 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
748 static inline int cma_internal_listen(struct rdma_id_private *id_priv)
750 return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev &&
751 cma_any_addr(&id_priv->id.route.addr.src_addr);
754 static void cma_destroy_listen(struct rdma_id_private *id_priv)
756 cma_exch(id_priv, CMA_DESTROYING);
758 if (id_priv->cma_dev) {
759 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
760 case RDMA_TRANSPORT_IB:
761 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
762 ib_destroy_cm_id(id_priv->cm_id.ib);
764 case RDMA_TRANSPORT_IWARP:
765 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
766 iw_destroy_cm_id(id_priv->cm_id.iw);
771 cma_detach_from_dev(id_priv);
773 list_del(&id_priv->listen_list);
775 cma_deref_id(id_priv);
776 wait_for_completion(&id_priv->comp);
781 static void cma_cancel_listens(struct rdma_id_private *id_priv)
783 struct rdma_id_private *dev_id_priv;
786 list_del(&id_priv->list);
788 while (!list_empty(&id_priv->listen_list)) {
789 dev_id_priv = list_entry(id_priv->listen_list.next,
790 struct rdma_id_private, listen_list);
791 cma_destroy_listen(dev_id_priv);
796 static void cma_cancel_operation(struct rdma_id_private *id_priv,
797 enum cma_state state)
801 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
803 case CMA_ROUTE_QUERY:
804 cma_cancel_route(id_priv);
807 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
809 cma_cancel_listens(id_priv);
816 static void cma_release_port(struct rdma_id_private *id_priv)
818 struct rdma_bind_list *bind_list = id_priv->bind_list;
824 hlist_del(&id_priv->node);
825 if (hlist_empty(&bind_list->owners)) {
826 idr_remove(bind_list->ps, bind_list->port);
832 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
834 struct cma_multicast *mc;
836 while (!list_empty(&id_priv->mc_list)) {
837 mc = container_of(id_priv->mc_list.next,
838 struct cma_multicast, list);
840 ib_sa_free_multicast(mc->multicast.ib);
845 void rdma_destroy_id(struct rdma_cm_id *id)
847 struct rdma_id_private *id_priv;
848 enum cma_state state;
850 id_priv = container_of(id, struct rdma_id_private, id);
851 state = cma_exch(id_priv, CMA_DESTROYING);
852 cma_cancel_operation(id_priv, state);
855 if (id_priv->cma_dev) {
857 switch (rdma_node_get_transport(id->device->node_type)) {
858 case RDMA_TRANSPORT_IB:
859 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
860 ib_destroy_cm_id(id_priv->cm_id.ib);
862 case RDMA_TRANSPORT_IWARP:
863 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
864 iw_destroy_cm_id(id_priv->cm_id.iw);
869 cma_leave_mc_groups(id_priv);
871 cma_detach_from_dev(id_priv);
875 cma_release_port(id_priv);
876 cma_deref_id(id_priv);
877 wait_for_completion(&id_priv->comp);
879 kfree(id_priv->id.route.path_rec);
882 EXPORT_SYMBOL(rdma_destroy_id);
884 static int cma_rep_recv(struct rdma_id_private *id_priv)
888 ret = cma_modify_qp_rtr(id_priv);
892 ret = cma_modify_qp_rts(id_priv);
896 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
902 cma_modify_qp_err(id_priv);
903 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
908 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
910 if (id_priv->id.ps == RDMA_PS_SDP &&
911 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
918 static void cma_set_rep_event_data(struct rdma_cm_event *event,
919 struct ib_cm_rep_event_param *rep_data,
922 event->param.conn.private_data = private_data;
923 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
924 event->param.conn.responder_resources = rep_data->responder_resources;
925 event->param.conn.initiator_depth = rep_data->initiator_depth;
926 event->param.conn.flow_control = rep_data->flow_control;
927 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
928 event->param.conn.srq = rep_data->srq;
929 event->param.conn.qp_num = rep_data->remote_qpn;
932 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
934 struct rdma_id_private *id_priv = cm_id->context;
935 struct rdma_cm_event event;
938 if (cma_disable_remove(id_priv, CMA_CONNECT))
941 memset(&event, 0, sizeof event);
942 switch (ib_event->event) {
943 case IB_CM_REQ_ERROR:
944 case IB_CM_REP_ERROR:
945 event.event = RDMA_CM_EVENT_UNREACHABLE;
946 event.status = -ETIMEDOUT;
948 case IB_CM_REP_RECEIVED:
949 event.status = cma_verify_rep(id_priv, ib_event->private_data);
951 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
952 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
953 event.status = cma_rep_recv(id_priv);
954 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
955 RDMA_CM_EVENT_ESTABLISHED;
957 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
958 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
959 ib_event->private_data);
961 case IB_CM_RTU_RECEIVED:
962 case IB_CM_USER_ESTABLISHED:
963 event.event = RDMA_CM_EVENT_ESTABLISHED;
965 case IB_CM_DREQ_ERROR:
966 event.status = -ETIMEDOUT; /* fall through */
967 case IB_CM_DREQ_RECEIVED:
968 case IB_CM_DREP_RECEIVED:
969 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
971 event.event = RDMA_CM_EVENT_DISCONNECTED;
973 case IB_CM_TIMEWAIT_EXIT:
974 case IB_CM_MRA_RECEIVED:
977 case IB_CM_REJ_RECEIVED:
978 cma_modify_qp_err(id_priv);
979 event.status = ib_event->param.rej_rcvd.reason;
980 event.event = RDMA_CM_EVENT_REJECTED;
981 event.param.conn.private_data = ib_event->private_data;
982 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
985 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
990 ret = id_priv->id.event_handler(&id_priv->id, &event);
992 /* Destroy the CM ID by returning a non-zero value. */
993 id_priv->cm_id.ib = NULL;
994 cma_exch(id_priv, CMA_DESTROYING);
995 cma_enable_remove(id_priv);
996 rdma_destroy_id(&id_priv->id);
1000 cma_enable_remove(id_priv);
1004 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1005 struct ib_cm_event *ib_event)
1007 struct rdma_id_private *id_priv;
1008 struct rdma_cm_id *id;
1009 struct rdma_route *rt;
1010 union cma_ip_addr *src, *dst;
1014 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1015 &ip_ver, &port, &src, &dst))
1018 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1023 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1024 ip_ver, port, src, dst);
1027 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
1028 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1033 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1034 if (rt->num_paths == 2)
1035 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1037 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
1038 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1039 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
1040 rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
1042 id_priv = container_of(id, struct rdma_id_private, id);
1043 id_priv->state = CMA_CONNECT;
1047 rdma_destroy_id(id);
1052 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1053 struct ib_cm_event *ib_event)
1055 struct rdma_id_private *id_priv;
1056 struct rdma_cm_id *id;
1057 union cma_ip_addr *src, *dst;
1062 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1068 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1069 &ip_ver, &port, &src, &dst))
1072 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1073 ip_ver, port, src, dst);
1075 ret = rdma_translate_ip(&id->route.addr.src_addr,
1076 &id->route.addr.dev_addr);
1080 id_priv = container_of(id, struct rdma_id_private, id);
1081 id_priv->state = CMA_CONNECT;
1084 rdma_destroy_id(id);
1088 static void cma_set_req_event_data(struct rdma_cm_event *event,
1089 struct ib_cm_req_event_param *req_data,
1090 void *private_data, int offset)
1092 event->param.conn.private_data = private_data + offset;
1093 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1094 event->param.conn.responder_resources = req_data->responder_resources;
1095 event->param.conn.initiator_depth = req_data->initiator_depth;
1096 event->param.conn.flow_control = req_data->flow_control;
1097 event->param.conn.retry_count = req_data->retry_count;
1098 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1099 event->param.conn.srq = req_data->srq;
1100 event->param.conn.qp_num = req_data->remote_qpn;
1103 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1105 struct rdma_id_private *listen_id, *conn_id;
1106 struct rdma_cm_event event;
1109 listen_id = cm_id->context;
1110 if (cma_disable_remove(listen_id, CMA_LISTEN))
1111 return -ECONNABORTED;
1113 memset(&event, 0, sizeof event);
1114 offset = cma_user_data_offset(listen_id->id.ps);
1115 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1116 if (cma_is_ud_ps(listen_id->id.ps)) {
1117 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1118 event.param.ud.private_data = ib_event->private_data + offset;
1119 event.param.ud.private_data_len =
1120 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1122 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1123 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1124 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1125 ib_event->private_data, offset);
1132 atomic_inc(&conn_id->dev_remove);
1134 ret = cma_acquire_dev(conn_id);
1135 mutex_unlock(&lock);
1137 goto release_conn_id;
1139 conn_id->cm_id.ib = cm_id;
1140 cm_id->context = conn_id;
1141 cm_id->cm_handler = cma_ib_handler;
1143 ret = conn_id->id.event_handler(&conn_id->id, &event);
1147 /* Destroy the CM ID by returning a non-zero value. */
1148 conn_id->cm_id.ib = NULL;
1151 cma_exch(conn_id, CMA_DESTROYING);
1152 cma_enable_remove(conn_id);
1153 rdma_destroy_id(&conn_id->id);
1156 cma_enable_remove(listen_id);
1160 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
1162 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
1165 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1166 struct ib_cm_compare_data *compare)
1168 struct cma_hdr *cma_data, *cma_mask;
1169 struct sdp_hh *sdp_data, *sdp_mask;
1171 struct in6_addr ip6_addr;
1173 memset(compare, 0, sizeof *compare);
1174 cma_data = (void *) compare->data;
1175 cma_mask = (void *) compare->mask;
1176 sdp_data = (void *) compare->data;
1177 sdp_mask = (void *) compare->mask;
1179 switch (addr->sa_family) {
1181 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
1182 if (ps == RDMA_PS_SDP) {
1183 sdp_set_ip_ver(sdp_data, 4);
1184 sdp_set_ip_ver(sdp_mask, 0xF);
1185 sdp_data->dst_addr.ip4.addr = ip4_addr;
1186 sdp_mask->dst_addr.ip4.addr = ~0;
1188 cma_set_ip_ver(cma_data, 4);
1189 cma_set_ip_ver(cma_mask, 0xF);
1190 cma_data->dst_addr.ip4.addr = ip4_addr;
1191 cma_mask->dst_addr.ip4.addr = ~0;
1195 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1196 if (ps == RDMA_PS_SDP) {
1197 sdp_set_ip_ver(sdp_data, 6);
1198 sdp_set_ip_ver(sdp_mask, 0xF);
1199 sdp_data->dst_addr.ip6 = ip6_addr;
1200 memset(&sdp_mask->dst_addr.ip6, 0xFF,
1201 sizeof sdp_mask->dst_addr.ip6);
1203 cma_set_ip_ver(cma_data, 6);
1204 cma_set_ip_ver(cma_mask, 0xF);
1205 cma_data->dst_addr.ip6 = ip6_addr;
1206 memset(&cma_mask->dst_addr.ip6, 0xFF,
1207 sizeof cma_mask->dst_addr.ip6);
1215 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1217 struct rdma_id_private *id_priv = iw_id->context;
1218 struct rdma_cm_event event;
1219 struct sockaddr_in *sin;
1222 if (cma_disable_remove(id_priv, CMA_CONNECT))
1225 memset(&event, 0, sizeof event);
1226 switch (iw_event->event) {
1227 case IW_CM_EVENT_CLOSE:
1228 event.event = RDMA_CM_EVENT_DISCONNECTED;
1230 case IW_CM_EVENT_CONNECT_REPLY:
1231 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1232 *sin = iw_event->local_addr;
1233 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1234 *sin = iw_event->remote_addr;
1235 switch (iw_event->status) {
1237 event.event = RDMA_CM_EVENT_ESTABLISHED;
1241 event.event = RDMA_CM_EVENT_REJECTED;
1244 event.event = RDMA_CM_EVENT_UNREACHABLE;
1247 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1251 case IW_CM_EVENT_ESTABLISHED:
1252 event.event = RDMA_CM_EVENT_ESTABLISHED;
1258 event.status = iw_event->status;
1259 event.param.conn.private_data = iw_event->private_data;
1260 event.param.conn.private_data_len = iw_event->private_data_len;
1261 ret = id_priv->id.event_handler(&id_priv->id, &event);
1263 /* Destroy the CM ID by returning a non-zero value. */
1264 id_priv->cm_id.iw = NULL;
1265 cma_exch(id_priv, CMA_DESTROYING);
1266 cma_enable_remove(id_priv);
1267 rdma_destroy_id(&id_priv->id);
1271 cma_enable_remove(id_priv);
1275 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1276 struct iw_cm_event *iw_event)
1278 struct rdma_cm_id *new_cm_id;
1279 struct rdma_id_private *listen_id, *conn_id;
1280 struct sockaddr_in *sin;
1281 struct net_device *dev = NULL;
1282 struct rdma_cm_event event;
1285 listen_id = cm_id->context;
1286 if (cma_disable_remove(listen_id, CMA_LISTEN))
1287 return -ECONNABORTED;
1289 /* Create a new RDMA id for the new IW CM ID */
1290 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1291 listen_id->id.context,
1297 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1298 atomic_inc(&conn_id->dev_remove);
1299 conn_id->state = CMA_CONNECT;
1301 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
1303 ret = -EADDRNOTAVAIL;
1304 cma_enable_remove(conn_id);
1305 rdma_destroy_id(new_cm_id);
1308 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1310 cma_enable_remove(conn_id);
1311 rdma_destroy_id(new_cm_id);
1316 ret = cma_acquire_dev(conn_id);
1317 mutex_unlock(&lock);
1319 cma_enable_remove(conn_id);
1320 rdma_destroy_id(new_cm_id);
1324 conn_id->cm_id.iw = cm_id;
1325 cm_id->context = conn_id;
1326 cm_id->cm_handler = cma_iw_handler;
1328 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1329 *sin = iw_event->local_addr;
1330 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1331 *sin = iw_event->remote_addr;
1333 memset(&event, 0, sizeof event);
1334 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1335 event.param.conn.private_data = iw_event->private_data;
1336 event.param.conn.private_data_len = iw_event->private_data_len;
1337 ret = conn_id->id.event_handler(&conn_id->id, &event);
1339 /* User wants to destroy the CM ID */
1340 conn_id->cm_id.iw = NULL;
1341 cma_exch(conn_id, CMA_DESTROYING);
1342 cma_enable_remove(conn_id);
1343 rdma_destroy_id(&conn_id->id);
1349 cma_enable_remove(listen_id);
1353 static int cma_ib_listen(struct rdma_id_private *id_priv)
1355 struct ib_cm_compare_data compare_data;
1356 struct sockaddr *addr;
1360 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1362 if (IS_ERR(id_priv->cm_id.ib))
1363 return PTR_ERR(id_priv->cm_id.ib);
1365 addr = &id_priv->id.route.addr.src_addr;
1366 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1367 if (cma_any_addr(addr))
1368 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1370 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1371 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1375 ib_destroy_cm_id(id_priv->cm_id.ib);
1376 id_priv->cm_id.ib = NULL;
1382 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1385 struct sockaddr_in *sin;
1387 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1388 iw_conn_req_handler,
1390 if (IS_ERR(id_priv->cm_id.iw))
1391 return PTR_ERR(id_priv->cm_id.iw);
1393 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1394 id_priv->cm_id.iw->local_addr = *sin;
1396 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1399 iw_destroy_cm_id(id_priv->cm_id.iw);
1400 id_priv->cm_id.iw = NULL;
1406 static int cma_listen_handler(struct rdma_cm_id *id,
1407 struct rdma_cm_event *event)
1409 struct rdma_id_private *id_priv = id->context;
1411 id->context = id_priv->id.context;
1412 id->event_handler = id_priv->id.event_handler;
1413 return id_priv->id.event_handler(id, event);
1416 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1417 struct cma_device *cma_dev)
1419 struct rdma_id_private *dev_id_priv;
1420 struct rdma_cm_id *id;
1423 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1427 dev_id_priv = container_of(id, struct rdma_id_private, id);
1429 dev_id_priv->state = CMA_ADDR_BOUND;
1430 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1431 ip_addr_size(&id_priv->id.route.addr.src_addr));
1433 cma_attach_to_dev(dev_id_priv, cma_dev);
1434 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1436 ret = rdma_listen(id, id_priv->backlog);
1442 cma_destroy_listen(dev_id_priv);
1445 static void cma_listen_on_all(struct rdma_id_private *id_priv)
1447 struct cma_device *cma_dev;
1450 list_add_tail(&id_priv->list, &listen_any_list);
1451 list_for_each_entry(cma_dev, &dev_list, list)
1452 cma_listen_on_dev(id_priv, cma_dev);
1453 mutex_unlock(&lock);
1456 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1458 struct sockaddr_in addr_in;
1460 memset(&addr_in, 0, sizeof addr_in);
1461 addr_in.sin_family = af;
1462 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1465 int rdma_listen(struct rdma_cm_id *id, int backlog)
1467 struct rdma_id_private *id_priv;
1470 id_priv = container_of(id, struct rdma_id_private, id);
1471 if (id_priv->state == CMA_IDLE) {
1472 ret = cma_bind_any(id, AF_INET);
1477 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1480 id_priv->backlog = backlog;
1482 switch (rdma_node_get_transport(id->device->node_type)) {
1483 case RDMA_TRANSPORT_IB:
1484 ret = cma_ib_listen(id_priv);
1488 case RDMA_TRANSPORT_IWARP:
1489 ret = cma_iw_listen(id_priv, backlog);
1498 cma_listen_on_all(id_priv);
1502 id_priv->backlog = 0;
1503 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1506 EXPORT_SYMBOL(rdma_listen);
1508 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1510 struct rdma_id_private *id_priv;
1512 id_priv = container_of(id, struct rdma_id_private, id);
1513 id_priv->tos = (u8) tos;
1515 EXPORT_SYMBOL(rdma_set_service_type);
1517 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1520 struct cma_work *work = context;
1521 struct rdma_route *route;
1523 route = &work->id->id.route;
1526 route->num_paths = 1;
1527 *route->path_rec = *path_rec;
1529 work->old_state = CMA_ROUTE_QUERY;
1530 work->new_state = CMA_ADDR_RESOLVED;
1531 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1532 work->event.status = status;
1535 queue_work(cma_wq, &work->work);
1538 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1539 struct cma_work *work)
1541 struct rdma_addr *addr = &id_priv->id.route.addr;
1542 struct ib_sa_path_rec path_rec;
1543 ib_sa_comp_mask comp_mask;
1544 struct sockaddr_in6 *sin6;
1546 memset(&path_rec, 0, sizeof path_rec);
1547 ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
1548 ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
1549 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1550 path_rec.numb_path = 1;
1551 path_rec.reversible = 1;
1552 path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr);
1554 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1555 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1556 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1558 if (addr->src_addr.sa_family == AF_INET) {
1559 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1560 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1562 sin6 = (struct sockaddr_in6 *) &addr->src_addr;
1563 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1564 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1567 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1568 id_priv->id.port_num, &path_rec,
1569 comp_mask, timeout_ms,
1570 GFP_KERNEL, cma_query_handler,
1571 work, &id_priv->query);
1573 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1576 static void cma_work_handler(struct work_struct *_work)
1578 struct cma_work *work = container_of(_work, struct cma_work, work);
1579 struct rdma_id_private *id_priv = work->id;
1582 atomic_inc(&id_priv->dev_remove);
1583 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1586 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1587 cma_exch(id_priv, CMA_DESTROYING);
1591 cma_enable_remove(id_priv);
1592 cma_deref_id(id_priv);
1594 rdma_destroy_id(&id_priv->id);
1598 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1600 struct rdma_route *route = &id_priv->id.route;
1601 struct cma_work *work;
1604 work = kzalloc(sizeof *work, GFP_KERNEL);
1609 INIT_WORK(&work->work, cma_work_handler);
1610 work->old_state = CMA_ROUTE_QUERY;
1611 work->new_state = CMA_ROUTE_RESOLVED;
1612 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1614 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1615 if (!route->path_rec) {
1620 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1626 kfree(route->path_rec);
1627 route->path_rec = NULL;
1633 int rdma_set_ib_paths(struct rdma_cm_id *id,
1634 struct ib_sa_path_rec *path_rec, int num_paths)
1636 struct rdma_id_private *id_priv;
1639 id_priv = container_of(id, struct rdma_id_private, id);
1640 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1643 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1644 if (!id->route.path_rec) {
1649 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1652 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1655 EXPORT_SYMBOL(rdma_set_ib_paths);
1657 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1659 struct cma_work *work;
1661 work = kzalloc(sizeof *work, GFP_KERNEL);
1666 INIT_WORK(&work->work, cma_work_handler);
1667 work->old_state = CMA_ROUTE_QUERY;
1668 work->new_state = CMA_ROUTE_RESOLVED;
1669 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1670 queue_work(cma_wq, &work->work);
1674 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1676 struct rdma_id_private *id_priv;
1679 id_priv = container_of(id, struct rdma_id_private, id);
1680 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1683 atomic_inc(&id_priv->refcount);
1684 switch (rdma_node_get_transport(id->device->node_type)) {
1685 case RDMA_TRANSPORT_IB:
1686 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1688 case RDMA_TRANSPORT_IWARP:
1689 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1700 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1701 cma_deref_id(id_priv);
1704 EXPORT_SYMBOL(rdma_resolve_route);
1706 static int cma_bind_loopback(struct rdma_id_private *id_priv)
1708 struct cma_device *cma_dev;
1709 struct ib_port_attr port_attr;
1716 if (list_empty(&dev_list)) {
1720 list_for_each_entry(cma_dev, &dev_list, list)
1721 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1722 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1723 port_attr.state == IB_PORT_ACTIVE)
1727 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1730 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1734 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1738 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1739 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1740 id_priv->id.port_num = p;
1741 cma_attach_to_dev(id_priv, cma_dev);
1743 mutex_unlock(&lock);
1747 static void addr_handler(int status, struct sockaddr *src_addr,
1748 struct rdma_dev_addr *dev_addr, void *context)
1750 struct rdma_id_private *id_priv = context;
1751 struct rdma_cm_event event;
1753 memset(&event, 0, sizeof event);
1754 atomic_inc(&id_priv->dev_remove);
1757 * Grab mutex to block rdma_destroy_id() from removing the device while
1758 * we're trying to acquire it.
1761 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1762 mutex_unlock(&lock);
1766 if (!status && !id_priv->cma_dev)
1767 status = cma_acquire_dev(id_priv);
1768 mutex_unlock(&lock);
1771 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1773 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1774 event.status = status;
1776 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1777 ip_addr_size(src_addr));
1778 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1781 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1782 cma_exch(id_priv, CMA_DESTROYING);
1783 cma_enable_remove(id_priv);
1784 cma_deref_id(id_priv);
1785 rdma_destroy_id(&id_priv->id);
1789 cma_enable_remove(id_priv);
1790 cma_deref_id(id_priv);
1793 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1795 struct cma_work *work;
1796 struct sockaddr_in *src_in, *dst_in;
1800 work = kzalloc(sizeof *work, GFP_KERNEL);
1804 if (!id_priv->cma_dev) {
1805 ret = cma_bind_loopback(id_priv);
1810 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1811 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1813 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1814 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1815 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1816 src_in->sin_family = dst_in->sin_family;
1817 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1821 INIT_WORK(&work->work, cma_work_handler);
1822 work->old_state = CMA_ADDR_QUERY;
1823 work->new_state = CMA_ADDR_RESOLVED;
1824 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1825 queue_work(cma_wq, &work->work);
1832 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1833 struct sockaddr *dst_addr)
1835 if (src_addr && src_addr->sa_family)
1836 return rdma_bind_addr(id, src_addr);
1838 return cma_bind_any(id, dst_addr->sa_family);
1841 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1842 struct sockaddr *dst_addr, int timeout_ms)
1844 struct rdma_id_private *id_priv;
1847 id_priv = container_of(id, struct rdma_id_private, id);
1848 if (id_priv->state == CMA_IDLE) {
1849 ret = cma_bind_addr(id, src_addr, dst_addr);
1854 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1857 atomic_inc(&id_priv->refcount);
1858 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1859 if (cma_any_addr(dst_addr))
1860 ret = cma_resolve_loopback(id_priv);
1862 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1863 dst_addr, &id->route.addr.dev_addr,
1864 timeout_ms, addr_handler, id_priv);
1870 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1871 cma_deref_id(id_priv);
1874 EXPORT_SYMBOL(rdma_resolve_addr);
1876 static void cma_bind_port(struct rdma_bind_list *bind_list,
1877 struct rdma_id_private *id_priv)
1879 struct sockaddr_in *sin;
1881 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1882 sin->sin_port = htons(bind_list->port);
1883 id_priv->bind_list = bind_list;
1884 hlist_add_head(&id_priv->node, &bind_list->owners);
1887 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1888 unsigned short snum)
1890 struct rdma_bind_list *bind_list;
1893 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1898 ret = idr_get_new_above(ps, bind_list, snum, &port);
1899 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1905 ret = -EADDRNOTAVAIL;
1910 bind_list->port = (unsigned short) port;
1911 cma_bind_port(bind_list, id_priv);
1914 idr_remove(ps, port);
1920 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1922 struct rdma_bind_list *bind_list;
1923 int port, ret, low, high;
1925 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1930 /* FIXME: add proper port randomization per like inet_csk_get_port */
1932 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1933 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1938 inet_get_local_port_range(&low, &high);
1940 if (next_port != low) {
1941 idr_remove(ps, port);
1945 ret = -EADDRNOTAVAIL;
1952 next_port = port + 1;
1955 bind_list->port = (unsigned short) port;
1956 cma_bind_port(bind_list, id_priv);
1959 idr_remove(ps, port);
1965 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
1967 struct rdma_id_private *cur_id;
1968 struct sockaddr_in *sin, *cur_sin;
1969 struct rdma_bind_list *bind_list;
1970 struct hlist_node *node;
1971 unsigned short snum;
1973 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1974 snum = ntohs(sin->sin_port);
1975 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
1978 bind_list = idr_find(ps, snum);
1980 return cma_alloc_port(ps, id_priv, snum);
1983 * We don't support binding to any address if anyone is bound to
1984 * a specific address on the same port.
1986 if (cma_any_addr(&id_priv->id.route.addr.src_addr))
1987 return -EADDRNOTAVAIL;
1989 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
1990 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1991 return -EADDRNOTAVAIL;
1993 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1994 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
1998 cma_bind_port(bind_list, id_priv);
2002 static int cma_get_port(struct rdma_id_private *id_priv)
2007 switch (id_priv->id.ps) {
2021 return -EPROTONOSUPPORT;
2025 if (cma_any_port(&id_priv->id.route.addr.src_addr))
2026 ret = cma_alloc_any_port(ps, id_priv);
2028 ret = cma_use_port(ps, id_priv);
2029 mutex_unlock(&lock);
2034 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2036 struct rdma_id_private *id_priv;
2039 if (addr->sa_family != AF_INET)
2040 return -EAFNOSUPPORT;
2042 id_priv = container_of(id, struct rdma_id_private, id);
2043 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
2046 if (!cma_any_addr(addr)) {
2047 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2052 ret = cma_acquire_dev(id_priv);
2053 mutex_unlock(&lock);
2058 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2059 ret = cma_get_port(id_priv);
2065 if (!cma_any_addr(addr)) {
2067 cma_detach_from_dev(id_priv);
2068 mutex_unlock(&lock);
2071 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
2074 EXPORT_SYMBOL(rdma_bind_addr);
2076 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
2077 struct rdma_route *route)
2079 struct sockaddr_in *src4, *dst4;
2080 struct cma_hdr *cma_hdr;
2081 struct sdp_hh *sdp_hdr;
2083 src4 = (struct sockaddr_in *) &route->addr.src_addr;
2084 dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
2089 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2091 sdp_set_ip_ver(sdp_hdr, 4);
2092 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2093 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2094 sdp_hdr->port = src4->sin_port;
2098 cma_hdr->cma_version = CMA_VERSION;
2099 cma_set_ip_ver(cma_hdr, 4);
2100 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2101 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2102 cma_hdr->port = src4->sin_port;
2108 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2109 struct ib_cm_event *ib_event)
2111 struct rdma_id_private *id_priv = cm_id->context;
2112 struct rdma_cm_event event;
2113 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2116 if (cma_disable_remove(id_priv, CMA_CONNECT))
2119 memset(&event, 0, sizeof event);
2120 switch (ib_event->event) {
2121 case IB_CM_SIDR_REQ_ERROR:
2122 event.event = RDMA_CM_EVENT_UNREACHABLE;
2123 event.status = -ETIMEDOUT;
2125 case IB_CM_SIDR_REP_RECEIVED:
2126 event.param.ud.private_data = ib_event->private_data;
2127 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2128 if (rep->status != IB_SIDR_SUCCESS) {
2129 event.event = RDMA_CM_EVENT_UNREACHABLE;
2130 event.status = ib_event->param.sidr_rep_rcvd.status;
2133 if (id_priv->qkey != rep->qkey) {
2134 event.event = RDMA_CM_EVENT_UNREACHABLE;
2135 event.status = -EINVAL;
2138 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2139 id_priv->id.route.path_rec,
2140 &event.param.ud.ah_attr);
2141 event.param.ud.qp_num = rep->qpn;
2142 event.param.ud.qkey = rep->qkey;
2143 event.event = RDMA_CM_EVENT_ESTABLISHED;
2147 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
2152 ret = id_priv->id.event_handler(&id_priv->id, &event);
2154 /* Destroy the CM ID by returning a non-zero value. */
2155 id_priv->cm_id.ib = NULL;
2156 cma_exch(id_priv, CMA_DESTROYING);
2157 cma_enable_remove(id_priv);
2158 rdma_destroy_id(&id_priv->id);
2162 cma_enable_remove(id_priv);
2166 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2167 struct rdma_conn_param *conn_param)
2169 struct ib_cm_sidr_req_param req;
2170 struct rdma_route *route;
2173 req.private_data_len = sizeof(struct cma_hdr) +
2174 conn_param->private_data_len;
2175 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2176 if (!req.private_data)
2179 if (conn_param->private_data && conn_param->private_data_len)
2180 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
2181 conn_param->private_data, conn_param->private_data_len);
2183 route = &id_priv->id.route;
2184 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2188 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2189 cma_sidr_rep_handler, id_priv);
2190 if (IS_ERR(id_priv->cm_id.ib)) {
2191 ret = PTR_ERR(id_priv->cm_id.ib);
2195 req.path = route->path_rec;
2196 req.service_id = cma_get_service_id(id_priv->id.ps,
2197 &route->addr.dst_addr);
2198 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2199 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2201 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2203 ib_destroy_cm_id(id_priv->cm_id.ib);
2204 id_priv->cm_id.ib = NULL;
2207 kfree(req.private_data);
2211 static int cma_connect_ib(struct rdma_id_private *id_priv,
2212 struct rdma_conn_param *conn_param)
2214 struct ib_cm_req_param req;
2215 struct rdma_route *route;
2219 memset(&req, 0, sizeof req);
2220 offset = cma_user_data_offset(id_priv->id.ps);
2221 req.private_data_len = offset + conn_param->private_data_len;
2222 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2226 if (conn_param->private_data && conn_param->private_data_len)
2227 memcpy(private_data + offset, conn_param->private_data,
2228 conn_param->private_data_len);
2230 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2232 if (IS_ERR(id_priv->cm_id.ib)) {
2233 ret = PTR_ERR(id_priv->cm_id.ib);
2237 route = &id_priv->id.route;
2238 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2241 req.private_data = private_data;
2243 req.primary_path = &route->path_rec[0];
2244 if (route->num_paths == 2)
2245 req.alternate_path = &route->path_rec[1];
2247 req.service_id = cma_get_service_id(id_priv->id.ps,
2248 &route->addr.dst_addr);
2249 req.qp_num = id_priv->qp_num;
2250 req.qp_type = IB_QPT_RC;
2251 req.starting_psn = id_priv->seq_num;
2252 req.responder_resources = conn_param->responder_resources;
2253 req.initiator_depth = conn_param->initiator_depth;
2254 req.flow_control = conn_param->flow_control;
2255 req.retry_count = conn_param->retry_count;
2256 req.rnr_retry_count = conn_param->rnr_retry_count;
2257 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2258 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2259 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2260 req.srq = id_priv->srq ? 1 : 0;
2262 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2264 if (ret && !IS_ERR(id_priv->cm_id.ib)) {
2265 ib_destroy_cm_id(id_priv->cm_id.ib);
2266 id_priv->cm_id.ib = NULL;
2269 kfree(private_data);
2273 static int cma_connect_iw(struct rdma_id_private *id_priv,
2274 struct rdma_conn_param *conn_param)
2276 struct iw_cm_id *cm_id;
2277 struct sockaddr_in* sin;
2279 struct iw_cm_conn_param iw_param;
2281 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2282 if (IS_ERR(cm_id)) {
2283 ret = PTR_ERR(cm_id);
2287 id_priv->cm_id.iw = cm_id;
2289 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2290 cm_id->local_addr = *sin;
2292 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2293 cm_id->remote_addr = *sin;
2295 ret = cma_modify_qp_rtr(id_priv);
2299 iw_param.ord = conn_param->initiator_depth;
2300 iw_param.ird = conn_param->responder_resources;
2301 iw_param.private_data = conn_param->private_data;
2302 iw_param.private_data_len = conn_param->private_data_len;
2304 iw_param.qpn = id_priv->qp_num;
2306 iw_param.qpn = conn_param->qp_num;
2307 ret = iw_cm_connect(cm_id, &iw_param);
2309 if (ret && !IS_ERR(cm_id)) {
2310 iw_destroy_cm_id(cm_id);
2311 id_priv->cm_id.iw = NULL;
2316 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2318 struct rdma_id_private *id_priv;
2321 id_priv = container_of(id, struct rdma_id_private, id);
2322 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
2326 id_priv->qp_num = conn_param->qp_num;
2327 id_priv->srq = conn_param->srq;
2330 switch (rdma_node_get_transport(id->device->node_type)) {
2331 case RDMA_TRANSPORT_IB:
2332 if (cma_is_ud_ps(id->ps))
2333 ret = cma_resolve_ib_udp(id_priv, conn_param);
2335 ret = cma_connect_ib(id_priv, conn_param);
2337 case RDMA_TRANSPORT_IWARP:
2338 ret = cma_connect_iw(id_priv, conn_param);
2349 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
2352 EXPORT_SYMBOL(rdma_connect);
2354 static int cma_accept_ib(struct rdma_id_private *id_priv,
2355 struct rdma_conn_param *conn_param)
2357 struct ib_cm_rep_param rep;
2358 struct ib_qp_attr qp_attr;
2359 int qp_attr_mask, ret;
2361 if (id_priv->id.qp) {
2362 ret = cma_modify_qp_rtr(id_priv);
2366 qp_attr.qp_state = IB_QPS_RTS;
2367 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
2372 qp_attr.max_rd_atomic = conn_param->initiator_depth;
2373 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
2378 memset(&rep, 0, sizeof rep);
2379 rep.qp_num = id_priv->qp_num;
2380 rep.starting_psn = id_priv->seq_num;
2381 rep.private_data = conn_param->private_data;
2382 rep.private_data_len = conn_param->private_data_len;
2383 rep.responder_resources = conn_param->responder_resources;
2384 rep.initiator_depth = conn_param->initiator_depth;
2385 rep.failover_accepted = 0;
2386 rep.flow_control = conn_param->flow_control;
2387 rep.rnr_retry_count = conn_param->rnr_retry_count;
2388 rep.srq = id_priv->srq ? 1 : 0;
2390 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2395 static int cma_accept_iw(struct rdma_id_private *id_priv,
2396 struct rdma_conn_param *conn_param)
2398 struct iw_cm_conn_param iw_param;
2401 ret = cma_modify_qp_rtr(id_priv);
2405 iw_param.ord = conn_param->initiator_depth;
2406 iw_param.ird = conn_param->responder_resources;
2407 iw_param.private_data = conn_param->private_data;
2408 iw_param.private_data_len = conn_param->private_data_len;
2409 if (id_priv->id.qp) {
2410 iw_param.qpn = id_priv->qp_num;
2412 iw_param.qpn = conn_param->qp_num;
2414 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2417 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2418 enum ib_cm_sidr_status status,
2419 const void *private_data, int private_data_len)
2421 struct ib_cm_sidr_rep_param rep;
2423 memset(&rep, 0, sizeof rep);
2424 rep.status = status;
2425 if (status == IB_SIDR_SUCCESS) {
2426 rep.qp_num = id_priv->qp_num;
2427 rep.qkey = id_priv->qkey;
2429 rep.private_data = private_data;
2430 rep.private_data_len = private_data_len;
2432 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2435 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2437 struct rdma_id_private *id_priv;
2440 id_priv = container_of(id, struct rdma_id_private, id);
2441 if (!cma_comp(id_priv, CMA_CONNECT))
2444 if (!id->qp && conn_param) {
2445 id_priv->qp_num = conn_param->qp_num;
2446 id_priv->srq = conn_param->srq;
2449 switch (rdma_node_get_transport(id->device->node_type)) {
2450 case RDMA_TRANSPORT_IB:
2451 if (cma_is_ud_ps(id->ps))
2452 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2453 conn_param->private_data,
2454 conn_param->private_data_len);
2455 else if (conn_param)
2456 ret = cma_accept_ib(id_priv, conn_param);
2458 ret = cma_rep_recv(id_priv);
2460 case RDMA_TRANSPORT_IWARP:
2461 ret = cma_accept_iw(id_priv, conn_param);
2473 cma_modify_qp_err(id_priv);
2474 rdma_reject(id, NULL, 0);
2477 EXPORT_SYMBOL(rdma_accept);
2479 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2481 struct rdma_id_private *id_priv;
2484 id_priv = container_of(id, struct rdma_id_private, id);
2485 if (!cma_has_cm_dev(id_priv))
2488 switch (id->device->node_type) {
2489 case RDMA_NODE_IB_CA:
2490 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2498 EXPORT_SYMBOL(rdma_notify);
2500 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2501 u8 private_data_len)
2503 struct rdma_id_private *id_priv;
2506 id_priv = container_of(id, struct rdma_id_private, id);
2507 if (!cma_has_cm_dev(id_priv))
2510 switch (rdma_node_get_transport(id->device->node_type)) {
2511 case RDMA_TRANSPORT_IB:
2512 if (cma_is_ud_ps(id->ps))
2513 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2514 private_data, private_data_len);
2516 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2517 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2518 0, private_data, private_data_len);
2520 case RDMA_TRANSPORT_IWARP:
2521 ret = iw_cm_reject(id_priv->cm_id.iw,
2522 private_data, private_data_len);
2530 EXPORT_SYMBOL(rdma_reject);
2532 int rdma_disconnect(struct rdma_cm_id *id)
2534 struct rdma_id_private *id_priv;
2537 id_priv = container_of(id, struct rdma_id_private, id);
2538 if (!cma_has_cm_dev(id_priv))
2541 switch (rdma_node_get_transport(id->device->node_type)) {
2542 case RDMA_TRANSPORT_IB:
2543 ret = cma_modify_qp_err(id_priv);
2546 /* Initiate or respond to a disconnect. */
2547 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2548 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2550 case RDMA_TRANSPORT_IWARP:
2551 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2560 EXPORT_SYMBOL(rdma_disconnect);
2562 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2564 struct rdma_id_private *id_priv;
2565 struct cma_multicast *mc = multicast->context;
2566 struct rdma_cm_event event;
2569 id_priv = mc->id_priv;
2570 if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
2571 cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
2574 mutex_lock(&id_priv->qp_mutex);
2575 if (!status && id_priv->id.qp)
2576 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2577 multicast->rec.mlid);
2578 mutex_unlock(&id_priv->qp_mutex);
2580 memset(&event, 0, sizeof event);
2581 event.status = status;
2582 event.param.ud.private_data = mc->context;
2584 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
2585 ib_init_ah_from_mcmember(id_priv->id.device,
2586 id_priv->id.port_num, &multicast->rec,
2587 &event.param.ud.ah_attr);
2588 event.param.ud.qp_num = 0xFFFFFF;
2589 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
2591 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
2593 ret = id_priv->id.event_handler(&id_priv->id, &event);
2595 cma_exch(id_priv, CMA_DESTROYING);
2596 cma_enable_remove(id_priv);
2597 rdma_destroy_id(&id_priv->id);
2601 cma_enable_remove(id_priv);
2605 static void cma_set_mgid(struct rdma_id_private *id_priv,
2606 struct sockaddr *addr, union ib_gid *mgid)
2608 unsigned char mc_map[MAX_ADDR_LEN];
2609 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2610 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
2611 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
2613 if (cma_any_addr(addr)) {
2614 memset(mgid, 0, sizeof *mgid);
2615 } else if ((addr->sa_family == AF_INET6) &&
2616 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
2618 /* IPv6 address is an SA assigned MGID. */
2619 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2621 ip_ib_mc_map(sin->sin_addr.s_addr, mc_map);
2622 if (id_priv->id.ps == RDMA_PS_UDP)
2623 mc_map[7] = 0x01; /* Use RDMA CM signature */
2624 mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
2625 mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
2626 *mgid = *(union ib_gid *) (mc_map + 4);
2630 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2631 struct cma_multicast *mc)
2633 struct ib_sa_mcmember_rec rec;
2634 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2635 ib_sa_comp_mask comp_mask;
2638 ib_addr_get_mgid(dev_addr, &rec.mgid);
2639 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2644 cma_set_mgid(id_priv, &mc->addr, &rec.mgid);
2645 if (id_priv->id.ps == RDMA_PS_UDP)
2646 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2647 ib_addr_get_sgid(dev_addr, &rec.port_gid);
2648 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2651 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
2652 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
2653 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
2654 IB_SA_MCMEMBER_REC_FLOW_LABEL |
2655 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2657 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2658 id_priv->id.port_num, &rec,
2659 comp_mask, GFP_KERNEL,
2660 cma_ib_mc_handler, mc);
2661 if (IS_ERR(mc->multicast.ib))
2662 return PTR_ERR(mc->multicast.ib);
2667 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
2670 struct rdma_id_private *id_priv;
2671 struct cma_multicast *mc;
2674 id_priv = container_of(id, struct rdma_id_private, id);
2675 if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
2676 !cma_comp(id_priv, CMA_ADDR_RESOLVED))
2679 mc = kmalloc(sizeof *mc, GFP_KERNEL);
2683 memcpy(&mc->addr, addr, ip_addr_size(addr));
2684 mc->context = context;
2685 mc->id_priv = id_priv;
2687 spin_lock(&id_priv->lock);
2688 list_add(&mc->list, &id_priv->mc_list);
2689 spin_unlock(&id_priv->lock);
2691 switch (rdma_node_get_transport(id->device->node_type)) {
2692 case RDMA_TRANSPORT_IB:
2693 ret = cma_join_ib_multicast(id_priv, mc);
2701 spin_lock_irq(&id_priv->lock);
2702 list_del(&mc->list);
2703 spin_unlock_irq(&id_priv->lock);
2708 EXPORT_SYMBOL(rdma_join_multicast);
2710 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2712 struct rdma_id_private *id_priv;
2713 struct cma_multicast *mc;
2715 id_priv = container_of(id, struct rdma_id_private, id);
2716 spin_lock_irq(&id_priv->lock);
2717 list_for_each_entry(mc, &id_priv->mc_list, list) {
2718 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
2719 list_del(&mc->list);
2720 spin_unlock_irq(&id_priv->lock);
2723 ib_detach_mcast(id->qp,
2724 &mc->multicast.ib->rec.mgid,
2725 mc->multicast.ib->rec.mlid);
2726 ib_sa_free_multicast(mc->multicast.ib);
2731 spin_unlock_irq(&id_priv->lock);
2733 EXPORT_SYMBOL(rdma_leave_multicast);
2735 static void cma_add_one(struct ib_device *device)
2737 struct cma_device *cma_dev;
2738 struct rdma_id_private *id_priv;
2740 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2744 cma_dev->device = device;
2746 init_completion(&cma_dev->comp);
2747 atomic_set(&cma_dev->refcount, 1);
2748 INIT_LIST_HEAD(&cma_dev->id_list);
2749 ib_set_client_data(device, &cma_client, cma_dev);
2752 list_add_tail(&cma_dev->list, &dev_list);
2753 list_for_each_entry(id_priv, &listen_any_list, list)
2754 cma_listen_on_dev(id_priv, cma_dev);
2755 mutex_unlock(&lock);
2758 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2760 struct rdma_cm_event event;
2761 enum cma_state state;
2763 /* Record that we want to remove the device */
2764 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2765 if (state == CMA_DESTROYING)
2768 cma_cancel_operation(id_priv, state);
2769 wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove));
2771 /* Check for destruction from another callback. */
2772 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2775 memset(&event, 0, sizeof event);
2776 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2777 return id_priv->id.event_handler(&id_priv->id, &event);
2780 static void cma_process_remove(struct cma_device *cma_dev)
2782 struct rdma_id_private *id_priv;
2786 while (!list_empty(&cma_dev->id_list)) {
2787 id_priv = list_entry(cma_dev->id_list.next,
2788 struct rdma_id_private, list);
2790 if (cma_internal_listen(id_priv)) {
2791 cma_destroy_listen(id_priv);
2795 list_del_init(&id_priv->list);
2796 atomic_inc(&id_priv->refcount);
2797 mutex_unlock(&lock);
2799 ret = cma_remove_id_dev(id_priv);
2800 cma_deref_id(id_priv);
2802 rdma_destroy_id(&id_priv->id);
2806 mutex_unlock(&lock);
2808 cma_deref_dev(cma_dev);
2809 wait_for_completion(&cma_dev->comp);
2812 static void cma_remove_one(struct ib_device *device)
2814 struct cma_device *cma_dev;
2816 cma_dev = ib_get_client_data(device, &cma_client);
2821 list_del(&cma_dev->list);
2822 mutex_unlock(&lock);
2824 cma_process_remove(cma_dev);
2828 static int cma_init(void)
2832 get_random_bytes(&next_port, sizeof next_port);
2833 inet_get_local_port_range(&low, &high);
2834 next_port = ((unsigned int) next_port % (high - low)) + low;
2836 cma_wq = create_singlethread_workqueue("rdma_cm");
2840 ib_sa_register_client(&sa_client);
2841 rdma_addr_register_client(&addr_client);
2843 ret = ib_register_client(&cma_client);
2849 rdma_addr_unregister_client(&addr_client);
2850 ib_sa_unregister_client(&sa_client);
2851 destroy_workqueue(cma_wq);
2855 static void cma_cleanup(void)
2857 ib_unregister_client(&cma_client);
2858 rdma_addr_unregister_client(&addr_client);
2859 ib_sa_unregister_client(&sa_client);
2860 destroy_workqueue(cma_wq);
2861 idr_destroy(&sdp_ps);
2862 idr_destroy(&tcp_ps);
2863 idr_destroy(&udp_ps);
2864 idr_destroy(&ipoib_ps);
2867 module_init(cma_init);
2868 module_exit(cma_cleanup);