2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
55 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
57 static void cma_add_one(struct ib_device *device);
58 static void cma_remove_one(struct ib_device *device);
60 static struct ib_client cma_client = {
63 .remove = cma_remove_one
66 static struct ib_sa_client sa_client;
67 static struct rdma_addr_client addr_client;
68 static LIST_HEAD(dev_list);
69 static LIST_HEAD(listen_any_list);
70 static DEFINE_MUTEX(lock);
71 static struct workqueue_struct *cma_wq;
72 static DEFINE_IDR(sdp_ps);
73 static DEFINE_IDR(tcp_ps);
74 static DEFINE_IDR(udp_ps);
75 static DEFINE_IDR(ipoib_ps);
79 struct list_head list;
80 struct ib_device *device;
81 struct completion comp;
83 struct list_head id_list;
100 struct rdma_bind_list {
102 struct hlist_head owners;
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
112 struct rdma_id_private {
113 struct rdma_cm_id id;
115 struct rdma_bind_list *bind_list;
116 struct hlist_node node;
117 struct list_head list; /* listen_any_list or cma_device.list */
118 struct list_head listen_list; /* per device listens */
119 struct cma_device *cma_dev;
120 struct list_head mc_list;
123 enum cma_state state;
125 struct mutex qp_mutex;
127 struct completion comp;
129 wait_queue_head_t wait_remove;
134 struct ib_sa_query *query;
148 struct cma_multicast {
149 struct rdma_id_private *id_priv;
151 struct ib_sa_multicast *ib;
153 struct list_head list;
155 struct sockaddr addr;
156 u8 pad[sizeof(struct sockaddr_in6) -
157 sizeof(struct sockaddr)];
161 struct work_struct work;
162 struct rdma_id_private *id;
163 enum cma_state old_state;
164 enum cma_state new_state;
165 struct rdma_cm_event event;
178 u8 ip_version; /* IP version: 7:4 */
180 union cma_ip_addr src_addr;
181 union cma_ip_addr dst_addr;
186 u8 sdp_version; /* Major version: 7:4 */
187 u8 ip_version; /* IP version: 7:4 */
188 u8 sdp_specific1[10];
191 union cma_ip_addr src_addr;
192 union cma_ip_addr dst_addr;
200 #define CMA_VERSION 0x00
201 #define SDP_MAJ_VERSION 0x2
203 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
208 spin_lock_irqsave(&id_priv->lock, flags);
209 ret = (id_priv->state == comp);
210 spin_unlock_irqrestore(&id_priv->lock, flags);
214 static int cma_comp_exch(struct rdma_id_private *id_priv,
215 enum cma_state comp, enum cma_state exch)
220 spin_lock_irqsave(&id_priv->lock, flags);
221 if ((ret = (id_priv->state == comp)))
222 id_priv->state = exch;
223 spin_unlock_irqrestore(&id_priv->lock, flags);
227 static enum cma_state cma_exch(struct rdma_id_private *id_priv,
233 spin_lock_irqsave(&id_priv->lock, flags);
234 old = id_priv->state;
235 id_priv->state = exch;
236 spin_unlock_irqrestore(&id_priv->lock, flags);
240 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
242 return hdr->ip_version >> 4;
245 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
247 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
250 static inline u8 sdp_get_majv(u8 sdp_version)
252 return sdp_version >> 4;
255 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
257 return hh->ip_version >> 4;
260 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
262 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
265 static inline int cma_is_ud_ps(enum rdma_port_space ps)
267 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
270 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
271 struct cma_device *cma_dev)
273 atomic_inc(&cma_dev->refcount);
274 id_priv->cma_dev = cma_dev;
275 id_priv->id.device = cma_dev->device;
276 list_add_tail(&id_priv->list, &cma_dev->id_list);
279 static inline void cma_deref_dev(struct cma_device *cma_dev)
281 if (atomic_dec_and_test(&cma_dev->refcount))
282 complete(&cma_dev->comp);
285 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
287 list_del(&id_priv->list);
288 cma_deref_dev(id_priv->cma_dev);
289 id_priv->cma_dev = NULL;
292 static int cma_set_qkey(struct ib_device *device, u8 port_num,
293 enum rdma_port_space ps,
294 struct rdma_dev_addr *dev_addr, u32 *qkey)
296 struct ib_sa_mcmember_rec rec;
301 *qkey = RDMA_UDP_QKEY;
304 ib_addr_get_mgid(dev_addr, &rec.mgid);
305 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
306 *qkey = be32_to_cpu(rec.qkey);
314 static int cma_acquire_dev(struct rdma_id_private *id_priv)
316 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
317 struct cma_device *cma_dev;
321 switch (rdma_node_get_transport(dev_addr->dev_type)) {
322 case RDMA_TRANSPORT_IB:
323 ib_addr_get_sgid(dev_addr, &gid);
325 case RDMA_TRANSPORT_IWARP:
326 iw_addr_get_sgid(dev_addr, &gid);
332 list_for_each_entry(cma_dev, &dev_list, list) {
333 ret = ib_find_cached_gid(cma_dev->device, &gid,
334 &id_priv->id.port_num, NULL);
336 ret = cma_set_qkey(cma_dev->device,
337 id_priv->id.port_num,
338 id_priv->id.ps, dev_addr,
341 cma_attach_to_dev(id_priv, cma_dev);
348 static void cma_deref_id(struct rdma_id_private *id_priv)
350 if (atomic_dec_and_test(&id_priv->refcount))
351 complete(&id_priv->comp);
354 static int cma_disable_remove(struct rdma_id_private *id_priv,
355 enum cma_state state)
360 spin_lock_irqsave(&id_priv->lock, flags);
361 if (id_priv->state == state) {
362 atomic_inc(&id_priv->dev_remove);
366 spin_unlock_irqrestore(&id_priv->lock, flags);
370 static void cma_enable_remove(struct rdma_id_private *id_priv)
372 if (atomic_dec_and_test(&id_priv->dev_remove))
373 wake_up(&id_priv->wait_remove);
376 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
378 return (id_priv->id.device && id_priv->cm_id.ib);
381 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
382 void *context, enum rdma_port_space ps)
384 struct rdma_id_private *id_priv;
386 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
388 return ERR_PTR(-ENOMEM);
390 id_priv->state = CMA_IDLE;
391 id_priv->id.context = context;
392 id_priv->id.event_handler = event_handler;
394 spin_lock_init(&id_priv->lock);
395 mutex_init(&id_priv->qp_mutex);
396 init_completion(&id_priv->comp);
397 atomic_set(&id_priv->refcount, 1);
398 init_waitqueue_head(&id_priv->wait_remove);
399 atomic_set(&id_priv->dev_remove, 0);
400 INIT_LIST_HEAD(&id_priv->listen_list);
401 INIT_LIST_HEAD(&id_priv->mc_list);
402 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
406 EXPORT_SYMBOL(rdma_create_id);
408 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
410 struct ib_qp_attr qp_attr;
411 int qp_attr_mask, ret;
413 qp_attr.qp_state = IB_QPS_INIT;
414 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
418 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
422 qp_attr.qp_state = IB_QPS_RTR;
423 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
427 qp_attr.qp_state = IB_QPS_RTS;
429 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
434 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
436 struct ib_qp_attr qp_attr;
437 int qp_attr_mask, ret;
439 qp_attr.qp_state = IB_QPS_INIT;
440 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
444 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
447 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
448 struct ib_qp_init_attr *qp_init_attr)
450 struct rdma_id_private *id_priv;
454 id_priv = container_of(id, struct rdma_id_private, id);
455 if (id->device != pd->device)
458 qp = ib_create_qp(pd, qp_init_attr);
462 if (cma_is_ud_ps(id_priv->id.ps))
463 ret = cma_init_ud_qp(id_priv, qp);
465 ret = cma_init_conn_qp(id_priv, qp);
470 id_priv->qp_num = qp->qp_num;
471 id_priv->srq = (qp->srq != NULL);
477 EXPORT_SYMBOL(rdma_create_qp);
479 void rdma_destroy_qp(struct rdma_cm_id *id)
481 struct rdma_id_private *id_priv;
483 id_priv = container_of(id, struct rdma_id_private, id);
484 mutex_lock(&id_priv->qp_mutex);
485 ib_destroy_qp(id_priv->id.qp);
486 id_priv->id.qp = NULL;
487 mutex_unlock(&id_priv->qp_mutex);
489 EXPORT_SYMBOL(rdma_destroy_qp);
491 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
492 struct rdma_conn_param *conn_param)
494 struct ib_qp_attr qp_attr;
495 int qp_attr_mask, ret;
497 mutex_lock(&id_priv->qp_mutex);
498 if (!id_priv->id.qp) {
503 /* Need to update QP attributes from default values. */
504 qp_attr.qp_state = IB_QPS_INIT;
505 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
509 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
513 qp_attr.qp_state = IB_QPS_RTR;
514 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
519 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
520 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
522 mutex_unlock(&id_priv->qp_mutex);
526 static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
527 struct rdma_conn_param *conn_param)
529 struct ib_qp_attr qp_attr;
530 int qp_attr_mask, ret;
532 mutex_lock(&id_priv->qp_mutex);
533 if (!id_priv->id.qp) {
538 qp_attr.qp_state = IB_QPS_RTS;
539 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
544 qp_attr.max_rd_atomic = conn_param->initiator_depth;
545 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
547 mutex_unlock(&id_priv->qp_mutex);
551 static int cma_modify_qp_err(struct rdma_id_private *id_priv)
553 struct ib_qp_attr qp_attr;
556 mutex_lock(&id_priv->qp_mutex);
557 if (!id_priv->id.qp) {
562 qp_attr.qp_state = IB_QPS_ERR;
563 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
565 mutex_unlock(&id_priv->qp_mutex);
569 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
570 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
572 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
575 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
576 ib_addr_get_pkey(dev_addr),
577 &qp_attr->pkey_index);
581 qp_attr->port_num = id_priv->id.port_num;
582 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
584 if (cma_is_ud_ps(id_priv->id.ps)) {
585 qp_attr->qkey = id_priv->qkey;
586 *qp_attr_mask |= IB_QP_QKEY;
588 qp_attr->qp_access_flags = 0;
589 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
594 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
597 struct rdma_id_private *id_priv;
600 id_priv = container_of(id, struct rdma_id_private, id);
601 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
602 case RDMA_TRANSPORT_IB:
603 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
604 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
606 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
608 if (qp_attr->qp_state == IB_QPS_RTR)
609 qp_attr->rq_psn = id_priv->seq_num;
611 case RDMA_TRANSPORT_IWARP:
612 if (!id_priv->cm_id.iw) {
613 qp_attr->qp_access_flags = 0;
614 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
616 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
626 EXPORT_SYMBOL(rdma_init_qp_attr);
628 static inline int cma_zero_addr(struct sockaddr *addr)
630 struct in6_addr *ip6;
632 if (addr->sa_family == AF_INET)
633 return ipv4_is_zeronet(
634 ((struct sockaddr_in *)addr)->sin_addr.s_addr);
636 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
637 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
638 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
642 static inline int cma_loopback_addr(struct sockaddr *addr)
644 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
647 static inline int cma_any_addr(struct sockaddr *addr)
649 return cma_zero_addr(addr) || cma_loopback_addr(addr);
652 static inline __be16 cma_port(struct sockaddr *addr)
654 if (addr->sa_family == AF_INET)
655 return ((struct sockaddr_in *) addr)->sin_port;
657 return ((struct sockaddr_in6 *) addr)->sin6_port;
660 static inline int cma_any_port(struct sockaddr *addr)
662 return !cma_port(addr);
665 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
666 u8 *ip_ver, __u16 *port,
667 union cma_ip_addr **src, union cma_ip_addr **dst)
671 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
675 *ip_ver = sdp_get_ip_ver(hdr);
676 *port = ((struct sdp_hh *) hdr)->port;
677 *src = &((struct sdp_hh *) hdr)->src_addr;
678 *dst = &((struct sdp_hh *) hdr)->dst_addr;
681 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
684 *ip_ver = cma_get_ip_ver(hdr);
685 *port = ((struct cma_hdr *) hdr)->port;
686 *src = &((struct cma_hdr *) hdr)->src_addr;
687 *dst = &((struct cma_hdr *) hdr)->dst_addr;
691 if (*ip_ver != 4 && *ip_ver != 6)
696 static void cma_save_net_info(struct rdma_addr *addr,
697 struct rdma_addr *listen_addr,
698 u8 ip_ver, __u16 port,
699 union cma_ip_addr *src, union cma_ip_addr *dst)
701 struct sockaddr_in *listen4, *ip4;
702 struct sockaddr_in6 *listen6, *ip6;
706 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
707 ip4 = (struct sockaddr_in *) &addr->src_addr;
708 ip4->sin_family = listen4->sin_family;
709 ip4->sin_addr.s_addr = dst->ip4.addr;
710 ip4->sin_port = listen4->sin_port;
712 ip4 = (struct sockaddr_in *) &addr->dst_addr;
713 ip4->sin_family = listen4->sin_family;
714 ip4->sin_addr.s_addr = src->ip4.addr;
715 ip4->sin_port = port;
718 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
719 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
720 ip6->sin6_family = listen6->sin6_family;
721 ip6->sin6_addr = dst->ip6;
722 ip6->sin6_port = listen6->sin6_port;
724 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
725 ip6->sin6_family = listen6->sin6_family;
726 ip6->sin6_addr = src->ip6;
727 ip6->sin6_port = port;
734 static inline int cma_user_data_offset(enum rdma_port_space ps)
740 return sizeof(struct cma_hdr);
744 static void cma_cancel_route(struct rdma_id_private *id_priv)
746 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
747 case RDMA_TRANSPORT_IB:
749 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
756 static void cma_cancel_listens(struct rdma_id_private *id_priv)
758 struct rdma_id_private *dev_id_priv;
761 * Remove from listen_any_list to prevent added devices from spawning
762 * additional listen requests.
765 list_del(&id_priv->list);
767 while (!list_empty(&id_priv->listen_list)) {
768 dev_id_priv = list_entry(id_priv->listen_list.next,
769 struct rdma_id_private, listen_list);
770 /* sync with device removal to avoid duplicate destruction */
771 list_del_init(&dev_id_priv->list);
772 list_del(&dev_id_priv->listen_list);
775 rdma_destroy_id(&dev_id_priv->id);
781 static void cma_cancel_operation(struct rdma_id_private *id_priv,
782 enum cma_state state)
786 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
788 case CMA_ROUTE_QUERY:
789 cma_cancel_route(id_priv);
792 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
794 cma_cancel_listens(id_priv);
801 static void cma_release_port(struct rdma_id_private *id_priv)
803 struct rdma_bind_list *bind_list = id_priv->bind_list;
809 hlist_del(&id_priv->node);
810 if (hlist_empty(&bind_list->owners)) {
811 idr_remove(bind_list->ps, bind_list->port);
817 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
819 struct cma_multicast *mc;
821 while (!list_empty(&id_priv->mc_list)) {
822 mc = container_of(id_priv->mc_list.next,
823 struct cma_multicast, list);
825 ib_sa_free_multicast(mc->multicast.ib);
830 void rdma_destroy_id(struct rdma_cm_id *id)
832 struct rdma_id_private *id_priv;
833 enum cma_state state;
835 id_priv = container_of(id, struct rdma_id_private, id);
836 state = cma_exch(id_priv, CMA_DESTROYING);
837 cma_cancel_operation(id_priv, state);
840 if (id_priv->cma_dev) {
842 switch (rdma_node_get_transport(id->device->node_type)) {
843 case RDMA_TRANSPORT_IB:
844 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
845 ib_destroy_cm_id(id_priv->cm_id.ib);
847 case RDMA_TRANSPORT_IWARP:
848 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
849 iw_destroy_cm_id(id_priv->cm_id.iw);
854 cma_leave_mc_groups(id_priv);
856 cma_detach_from_dev(id_priv);
860 cma_release_port(id_priv);
861 cma_deref_id(id_priv);
862 wait_for_completion(&id_priv->comp);
864 if (id_priv->internal_id)
865 cma_deref_id(id_priv->id.context);
867 kfree(id_priv->id.route.path_rec);
870 EXPORT_SYMBOL(rdma_destroy_id);
872 static int cma_rep_recv(struct rdma_id_private *id_priv)
876 ret = cma_modify_qp_rtr(id_priv, NULL);
880 ret = cma_modify_qp_rts(id_priv, NULL);
884 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
890 cma_modify_qp_err(id_priv);
891 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
896 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
898 if (id_priv->id.ps == RDMA_PS_SDP &&
899 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
906 static void cma_set_rep_event_data(struct rdma_cm_event *event,
907 struct ib_cm_rep_event_param *rep_data,
910 event->param.conn.private_data = private_data;
911 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
912 event->param.conn.responder_resources = rep_data->responder_resources;
913 event->param.conn.initiator_depth = rep_data->initiator_depth;
914 event->param.conn.flow_control = rep_data->flow_control;
915 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
916 event->param.conn.srq = rep_data->srq;
917 event->param.conn.qp_num = rep_data->remote_qpn;
920 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
922 struct rdma_id_private *id_priv = cm_id->context;
923 struct rdma_cm_event event;
926 if (cma_disable_remove(id_priv, CMA_CONNECT))
929 memset(&event, 0, sizeof event);
930 switch (ib_event->event) {
931 case IB_CM_REQ_ERROR:
932 case IB_CM_REP_ERROR:
933 event.event = RDMA_CM_EVENT_UNREACHABLE;
934 event.status = -ETIMEDOUT;
936 case IB_CM_REP_RECEIVED:
937 event.status = cma_verify_rep(id_priv, ib_event->private_data);
939 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
940 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
941 event.status = cma_rep_recv(id_priv);
942 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
943 RDMA_CM_EVENT_ESTABLISHED;
945 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
946 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
947 ib_event->private_data);
949 case IB_CM_RTU_RECEIVED:
950 case IB_CM_USER_ESTABLISHED:
951 event.event = RDMA_CM_EVENT_ESTABLISHED;
953 case IB_CM_DREQ_ERROR:
954 event.status = -ETIMEDOUT; /* fall through */
955 case IB_CM_DREQ_RECEIVED:
956 case IB_CM_DREP_RECEIVED:
957 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
959 event.event = RDMA_CM_EVENT_DISCONNECTED;
961 case IB_CM_TIMEWAIT_EXIT:
962 case IB_CM_MRA_RECEIVED:
965 case IB_CM_REJ_RECEIVED:
966 cma_modify_qp_err(id_priv);
967 event.status = ib_event->param.rej_rcvd.reason;
968 event.event = RDMA_CM_EVENT_REJECTED;
969 event.param.conn.private_data = ib_event->private_data;
970 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
973 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
978 ret = id_priv->id.event_handler(&id_priv->id, &event);
980 /* Destroy the CM ID by returning a non-zero value. */
981 id_priv->cm_id.ib = NULL;
982 cma_exch(id_priv, CMA_DESTROYING);
983 cma_enable_remove(id_priv);
984 rdma_destroy_id(&id_priv->id);
988 cma_enable_remove(id_priv);
992 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
993 struct ib_cm_event *ib_event)
995 struct rdma_id_private *id_priv;
996 struct rdma_cm_id *id;
997 struct rdma_route *rt;
998 union cma_ip_addr *src, *dst;
1002 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1003 &ip_ver, &port, &src, &dst))
1006 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1011 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1012 ip_ver, port, src, dst);
1015 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
1016 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1021 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1022 if (rt->num_paths == 2)
1023 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1025 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
1026 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1027 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
1028 rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
1030 id_priv = container_of(id, struct rdma_id_private, id);
1031 id_priv->state = CMA_CONNECT;
1035 rdma_destroy_id(id);
1040 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1041 struct ib_cm_event *ib_event)
1043 struct rdma_id_private *id_priv;
1044 struct rdma_cm_id *id;
1045 union cma_ip_addr *src, *dst;
1050 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1056 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1057 &ip_ver, &port, &src, &dst))
1060 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1061 ip_ver, port, src, dst);
1063 ret = rdma_translate_ip(&id->route.addr.src_addr,
1064 &id->route.addr.dev_addr);
1068 id_priv = container_of(id, struct rdma_id_private, id);
1069 id_priv->state = CMA_CONNECT;
1072 rdma_destroy_id(id);
1076 static void cma_set_req_event_data(struct rdma_cm_event *event,
1077 struct ib_cm_req_event_param *req_data,
1078 void *private_data, int offset)
1080 event->param.conn.private_data = private_data + offset;
1081 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1082 event->param.conn.responder_resources = req_data->responder_resources;
1083 event->param.conn.initiator_depth = req_data->initiator_depth;
1084 event->param.conn.flow_control = req_data->flow_control;
1085 event->param.conn.retry_count = req_data->retry_count;
1086 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1087 event->param.conn.srq = req_data->srq;
1088 event->param.conn.qp_num = req_data->remote_qpn;
1091 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1093 struct rdma_id_private *listen_id, *conn_id;
1094 struct rdma_cm_event event;
1097 listen_id = cm_id->context;
1098 if (cma_disable_remove(listen_id, CMA_LISTEN))
1099 return -ECONNABORTED;
1101 memset(&event, 0, sizeof event);
1102 offset = cma_user_data_offset(listen_id->id.ps);
1103 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1104 if (cma_is_ud_ps(listen_id->id.ps)) {
1105 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1106 event.param.ud.private_data = ib_event->private_data + offset;
1107 event.param.ud.private_data_len =
1108 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1110 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1111 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1112 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1113 ib_event->private_data, offset);
1120 atomic_inc(&conn_id->dev_remove);
1122 ret = cma_acquire_dev(conn_id);
1123 mutex_unlock(&lock);
1125 goto release_conn_id;
1127 conn_id->cm_id.ib = cm_id;
1128 cm_id->context = conn_id;
1129 cm_id->cm_handler = cma_ib_handler;
1131 ret = conn_id->id.event_handler(&conn_id->id, &event);
1133 cma_enable_remove(conn_id);
1137 /* Destroy the CM ID by returning a non-zero value. */
1138 conn_id->cm_id.ib = NULL;
1141 cma_exch(conn_id, CMA_DESTROYING);
1142 cma_enable_remove(conn_id);
1143 rdma_destroy_id(&conn_id->id);
1146 cma_enable_remove(listen_id);
1150 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
1152 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
1155 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1156 struct ib_cm_compare_data *compare)
1158 struct cma_hdr *cma_data, *cma_mask;
1159 struct sdp_hh *sdp_data, *sdp_mask;
1161 struct in6_addr ip6_addr;
1163 memset(compare, 0, sizeof *compare);
1164 cma_data = (void *) compare->data;
1165 cma_mask = (void *) compare->mask;
1166 sdp_data = (void *) compare->data;
1167 sdp_mask = (void *) compare->mask;
1169 switch (addr->sa_family) {
1171 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
1172 if (ps == RDMA_PS_SDP) {
1173 sdp_set_ip_ver(sdp_data, 4);
1174 sdp_set_ip_ver(sdp_mask, 0xF);
1175 sdp_data->dst_addr.ip4.addr = ip4_addr;
1176 sdp_mask->dst_addr.ip4.addr = ~0;
1178 cma_set_ip_ver(cma_data, 4);
1179 cma_set_ip_ver(cma_mask, 0xF);
1180 cma_data->dst_addr.ip4.addr = ip4_addr;
1181 cma_mask->dst_addr.ip4.addr = ~0;
1185 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1186 if (ps == RDMA_PS_SDP) {
1187 sdp_set_ip_ver(sdp_data, 6);
1188 sdp_set_ip_ver(sdp_mask, 0xF);
1189 sdp_data->dst_addr.ip6 = ip6_addr;
1190 memset(&sdp_mask->dst_addr.ip6, 0xFF,
1191 sizeof sdp_mask->dst_addr.ip6);
1193 cma_set_ip_ver(cma_data, 6);
1194 cma_set_ip_ver(cma_mask, 0xF);
1195 cma_data->dst_addr.ip6 = ip6_addr;
1196 memset(&cma_mask->dst_addr.ip6, 0xFF,
1197 sizeof cma_mask->dst_addr.ip6);
1205 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1207 struct rdma_id_private *id_priv = iw_id->context;
1208 struct rdma_cm_event event;
1209 struct sockaddr_in *sin;
1212 if (cma_disable_remove(id_priv, CMA_CONNECT))
1215 memset(&event, 0, sizeof event);
1216 switch (iw_event->event) {
1217 case IW_CM_EVENT_CLOSE:
1218 event.event = RDMA_CM_EVENT_DISCONNECTED;
1220 case IW_CM_EVENT_CONNECT_REPLY:
1221 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1222 *sin = iw_event->local_addr;
1223 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1224 *sin = iw_event->remote_addr;
1225 switch (iw_event->status) {
1227 event.event = RDMA_CM_EVENT_ESTABLISHED;
1231 event.event = RDMA_CM_EVENT_REJECTED;
1234 event.event = RDMA_CM_EVENT_UNREACHABLE;
1237 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1241 case IW_CM_EVENT_ESTABLISHED:
1242 event.event = RDMA_CM_EVENT_ESTABLISHED;
1248 event.status = iw_event->status;
1249 event.param.conn.private_data = iw_event->private_data;
1250 event.param.conn.private_data_len = iw_event->private_data_len;
1251 ret = id_priv->id.event_handler(&id_priv->id, &event);
1253 /* Destroy the CM ID by returning a non-zero value. */
1254 id_priv->cm_id.iw = NULL;
1255 cma_exch(id_priv, CMA_DESTROYING);
1256 cma_enable_remove(id_priv);
1257 rdma_destroy_id(&id_priv->id);
1261 cma_enable_remove(id_priv);
1265 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1266 struct iw_cm_event *iw_event)
1268 struct rdma_cm_id *new_cm_id;
1269 struct rdma_id_private *listen_id, *conn_id;
1270 struct sockaddr_in *sin;
1271 struct net_device *dev = NULL;
1272 struct rdma_cm_event event;
1274 struct ib_device_attr attr;
1276 listen_id = cm_id->context;
1277 if (cma_disable_remove(listen_id, CMA_LISTEN))
1278 return -ECONNABORTED;
1280 /* Create a new RDMA id for the new IW CM ID */
1281 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1282 listen_id->id.context,
1288 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1289 atomic_inc(&conn_id->dev_remove);
1290 conn_id->state = CMA_CONNECT;
1292 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
1294 ret = -EADDRNOTAVAIL;
1295 cma_enable_remove(conn_id);
1296 rdma_destroy_id(new_cm_id);
1299 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1301 cma_enable_remove(conn_id);
1302 rdma_destroy_id(new_cm_id);
1307 ret = cma_acquire_dev(conn_id);
1308 mutex_unlock(&lock);
1310 cma_enable_remove(conn_id);
1311 rdma_destroy_id(new_cm_id);
1315 conn_id->cm_id.iw = cm_id;
1316 cm_id->context = conn_id;
1317 cm_id->cm_handler = cma_iw_handler;
1319 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1320 *sin = iw_event->local_addr;
1321 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1322 *sin = iw_event->remote_addr;
1324 ret = ib_query_device(conn_id->id.device, &attr);
1326 cma_enable_remove(conn_id);
1327 rdma_destroy_id(new_cm_id);
1331 memset(&event, 0, sizeof event);
1332 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1333 event.param.conn.private_data = iw_event->private_data;
1334 event.param.conn.private_data_len = iw_event->private_data_len;
1335 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
1336 event.param.conn.responder_resources = attr.max_qp_rd_atom;
1337 ret = conn_id->id.event_handler(&conn_id->id, &event);
1339 /* User wants to destroy the CM ID */
1340 conn_id->cm_id.iw = NULL;
1341 cma_exch(conn_id, CMA_DESTROYING);
1342 cma_enable_remove(conn_id);
1343 rdma_destroy_id(&conn_id->id);
1349 cma_enable_remove(listen_id);
1353 static int cma_ib_listen(struct rdma_id_private *id_priv)
1355 struct ib_cm_compare_data compare_data;
1356 struct sockaddr *addr;
1360 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1362 if (IS_ERR(id_priv->cm_id.ib))
1363 return PTR_ERR(id_priv->cm_id.ib);
1365 addr = &id_priv->id.route.addr.src_addr;
1366 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1367 if (cma_any_addr(addr))
1368 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1370 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1371 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1375 ib_destroy_cm_id(id_priv->cm_id.ib);
1376 id_priv->cm_id.ib = NULL;
1382 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1385 struct sockaddr_in *sin;
1387 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1388 iw_conn_req_handler,
1390 if (IS_ERR(id_priv->cm_id.iw))
1391 return PTR_ERR(id_priv->cm_id.iw);
1393 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1394 id_priv->cm_id.iw->local_addr = *sin;
1396 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1399 iw_destroy_cm_id(id_priv->cm_id.iw);
1400 id_priv->cm_id.iw = NULL;
1406 static int cma_listen_handler(struct rdma_cm_id *id,
1407 struct rdma_cm_event *event)
1409 struct rdma_id_private *id_priv = id->context;
1411 id->context = id_priv->id.context;
1412 id->event_handler = id_priv->id.event_handler;
1413 return id_priv->id.event_handler(id, event);
1416 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1417 struct cma_device *cma_dev)
1419 struct rdma_id_private *dev_id_priv;
1420 struct rdma_cm_id *id;
1423 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1427 dev_id_priv = container_of(id, struct rdma_id_private, id);
1429 dev_id_priv->state = CMA_ADDR_BOUND;
1430 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1431 ip_addr_size(&id_priv->id.route.addr.src_addr));
1433 cma_attach_to_dev(dev_id_priv, cma_dev);
1434 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1435 atomic_inc(&id_priv->refcount);
1436 dev_id_priv->internal_id = 1;
1438 ret = rdma_listen(id, id_priv->backlog);
1440 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
1441 "listening on device %s", ret, cma_dev->device->name);
1444 static void cma_listen_on_all(struct rdma_id_private *id_priv)
1446 struct cma_device *cma_dev;
1449 list_add_tail(&id_priv->list, &listen_any_list);
1450 list_for_each_entry(cma_dev, &dev_list, list)
1451 cma_listen_on_dev(id_priv, cma_dev);
1452 mutex_unlock(&lock);
1455 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1457 struct sockaddr_in addr_in;
1459 memset(&addr_in, 0, sizeof addr_in);
1460 addr_in.sin_family = af;
1461 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1464 int rdma_listen(struct rdma_cm_id *id, int backlog)
1466 struct rdma_id_private *id_priv;
1469 id_priv = container_of(id, struct rdma_id_private, id);
1470 if (id_priv->state == CMA_IDLE) {
1471 ret = cma_bind_any(id, AF_INET);
1476 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1479 id_priv->backlog = backlog;
1481 switch (rdma_node_get_transport(id->device->node_type)) {
1482 case RDMA_TRANSPORT_IB:
1483 ret = cma_ib_listen(id_priv);
1487 case RDMA_TRANSPORT_IWARP:
1488 ret = cma_iw_listen(id_priv, backlog);
1497 cma_listen_on_all(id_priv);
1501 id_priv->backlog = 0;
1502 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1505 EXPORT_SYMBOL(rdma_listen);
1507 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1509 struct rdma_id_private *id_priv;
1511 id_priv = container_of(id, struct rdma_id_private, id);
1512 id_priv->tos = (u8) tos;
1514 EXPORT_SYMBOL(rdma_set_service_type);
1516 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1519 struct cma_work *work = context;
1520 struct rdma_route *route;
1522 route = &work->id->id.route;
1525 route->num_paths = 1;
1526 *route->path_rec = *path_rec;
1528 work->old_state = CMA_ROUTE_QUERY;
1529 work->new_state = CMA_ADDR_RESOLVED;
1530 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1531 work->event.status = status;
1534 queue_work(cma_wq, &work->work);
1537 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1538 struct cma_work *work)
1540 struct rdma_addr *addr = &id_priv->id.route.addr;
1541 struct ib_sa_path_rec path_rec;
1542 ib_sa_comp_mask comp_mask;
1543 struct sockaddr_in6 *sin6;
1545 memset(&path_rec, 0, sizeof path_rec);
1546 ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
1547 ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
1548 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1549 path_rec.numb_path = 1;
1550 path_rec.reversible = 1;
1551 path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr);
1553 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1554 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1555 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1557 if (addr->src_addr.sa_family == AF_INET) {
1558 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1559 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1561 sin6 = (struct sockaddr_in6 *) &addr->src_addr;
1562 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1563 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1566 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1567 id_priv->id.port_num, &path_rec,
1568 comp_mask, timeout_ms,
1569 GFP_KERNEL, cma_query_handler,
1570 work, &id_priv->query);
1572 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1575 static void cma_work_handler(struct work_struct *_work)
1577 struct cma_work *work = container_of(_work, struct cma_work, work);
1578 struct rdma_id_private *id_priv = work->id;
1581 atomic_inc(&id_priv->dev_remove);
1582 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1585 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1586 cma_exch(id_priv, CMA_DESTROYING);
1590 cma_enable_remove(id_priv);
1591 cma_deref_id(id_priv);
1593 rdma_destroy_id(&id_priv->id);
1597 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1599 struct rdma_route *route = &id_priv->id.route;
1600 struct cma_work *work;
1603 work = kzalloc(sizeof *work, GFP_KERNEL);
1608 INIT_WORK(&work->work, cma_work_handler);
1609 work->old_state = CMA_ROUTE_QUERY;
1610 work->new_state = CMA_ROUTE_RESOLVED;
1611 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1613 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1614 if (!route->path_rec) {
1619 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1625 kfree(route->path_rec);
1626 route->path_rec = NULL;
1632 int rdma_set_ib_paths(struct rdma_cm_id *id,
1633 struct ib_sa_path_rec *path_rec, int num_paths)
1635 struct rdma_id_private *id_priv;
1638 id_priv = container_of(id, struct rdma_id_private, id);
1639 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1642 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1643 if (!id->route.path_rec) {
1648 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1651 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1654 EXPORT_SYMBOL(rdma_set_ib_paths);
1656 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1658 struct cma_work *work;
1660 work = kzalloc(sizeof *work, GFP_KERNEL);
1665 INIT_WORK(&work->work, cma_work_handler);
1666 work->old_state = CMA_ROUTE_QUERY;
1667 work->new_state = CMA_ROUTE_RESOLVED;
1668 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1669 queue_work(cma_wq, &work->work);
1673 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1675 struct rdma_id_private *id_priv;
1678 id_priv = container_of(id, struct rdma_id_private, id);
1679 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1682 atomic_inc(&id_priv->refcount);
1683 switch (rdma_node_get_transport(id->device->node_type)) {
1684 case RDMA_TRANSPORT_IB:
1685 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1687 case RDMA_TRANSPORT_IWARP:
1688 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1699 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1700 cma_deref_id(id_priv);
1703 EXPORT_SYMBOL(rdma_resolve_route);
1705 static int cma_bind_loopback(struct rdma_id_private *id_priv)
1707 struct cma_device *cma_dev;
1708 struct ib_port_attr port_attr;
1715 if (list_empty(&dev_list)) {
1719 list_for_each_entry(cma_dev, &dev_list, list)
1720 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1721 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1722 port_attr.state == IB_PORT_ACTIVE)
1726 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1729 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1733 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1737 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1738 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1739 id_priv->id.port_num = p;
1740 cma_attach_to_dev(id_priv, cma_dev);
1742 mutex_unlock(&lock);
1746 static void addr_handler(int status, struct sockaddr *src_addr,
1747 struct rdma_dev_addr *dev_addr, void *context)
1749 struct rdma_id_private *id_priv = context;
1750 struct rdma_cm_event event;
1752 memset(&event, 0, sizeof event);
1753 atomic_inc(&id_priv->dev_remove);
1756 * Grab mutex to block rdma_destroy_id() from removing the device while
1757 * we're trying to acquire it.
1760 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1761 mutex_unlock(&lock);
1765 if (!status && !id_priv->cma_dev)
1766 status = cma_acquire_dev(id_priv);
1767 mutex_unlock(&lock);
1770 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1772 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1773 event.status = status;
1775 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1776 ip_addr_size(src_addr));
1777 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1780 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1781 cma_exch(id_priv, CMA_DESTROYING);
1782 cma_enable_remove(id_priv);
1783 cma_deref_id(id_priv);
1784 rdma_destroy_id(&id_priv->id);
1788 cma_enable_remove(id_priv);
1789 cma_deref_id(id_priv);
1792 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1794 struct cma_work *work;
1795 struct sockaddr_in *src_in, *dst_in;
1799 work = kzalloc(sizeof *work, GFP_KERNEL);
1803 if (!id_priv->cma_dev) {
1804 ret = cma_bind_loopback(id_priv);
1809 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1810 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1812 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1813 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1814 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1815 src_in->sin_family = dst_in->sin_family;
1816 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1820 INIT_WORK(&work->work, cma_work_handler);
1821 work->old_state = CMA_ADDR_QUERY;
1822 work->new_state = CMA_ADDR_RESOLVED;
1823 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1824 queue_work(cma_wq, &work->work);
1831 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1832 struct sockaddr *dst_addr)
1834 if (src_addr && src_addr->sa_family)
1835 return rdma_bind_addr(id, src_addr);
1837 return cma_bind_any(id, dst_addr->sa_family);
1840 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1841 struct sockaddr *dst_addr, int timeout_ms)
1843 struct rdma_id_private *id_priv;
1846 id_priv = container_of(id, struct rdma_id_private, id);
1847 if (id_priv->state == CMA_IDLE) {
1848 ret = cma_bind_addr(id, src_addr, dst_addr);
1853 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1856 atomic_inc(&id_priv->refcount);
1857 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1858 if (cma_any_addr(dst_addr))
1859 ret = cma_resolve_loopback(id_priv);
1861 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1862 dst_addr, &id->route.addr.dev_addr,
1863 timeout_ms, addr_handler, id_priv);
1869 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1870 cma_deref_id(id_priv);
1873 EXPORT_SYMBOL(rdma_resolve_addr);
1875 static void cma_bind_port(struct rdma_bind_list *bind_list,
1876 struct rdma_id_private *id_priv)
1878 struct sockaddr_in *sin;
1880 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1881 sin->sin_port = htons(bind_list->port);
1882 id_priv->bind_list = bind_list;
1883 hlist_add_head(&id_priv->node, &bind_list->owners);
1886 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1887 unsigned short snum)
1889 struct rdma_bind_list *bind_list;
1892 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1897 ret = idr_get_new_above(ps, bind_list, snum, &port);
1898 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1904 ret = -EADDRNOTAVAIL;
1909 bind_list->port = (unsigned short) port;
1910 cma_bind_port(bind_list, id_priv);
1913 idr_remove(ps, port);
1919 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1921 struct rdma_bind_list *bind_list;
1922 int port, ret, low, high;
1924 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1929 /* FIXME: add proper port randomization per like inet_csk_get_port */
1931 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1932 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1937 inet_get_local_port_range(&low, &high);
1939 if (next_port != low) {
1940 idr_remove(ps, port);
1944 ret = -EADDRNOTAVAIL;
1951 next_port = port + 1;
1954 bind_list->port = (unsigned short) port;
1955 cma_bind_port(bind_list, id_priv);
1958 idr_remove(ps, port);
1964 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
1966 struct rdma_id_private *cur_id;
1967 struct sockaddr_in *sin, *cur_sin;
1968 struct rdma_bind_list *bind_list;
1969 struct hlist_node *node;
1970 unsigned short snum;
1972 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1973 snum = ntohs(sin->sin_port);
1974 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
1977 bind_list = idr_find(ps, snum);
1979 return cma_alloc_port(ps, id_priv, snum);
1982 * We don't support binding to any address if anyone is bound to
1983 * a specific address on the same port.
1985 if (cma_any_addr(&id_priv->id.route.addr.src_addr))
1986 return -EADDRNOTAVAIL;
1988 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
1989 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1990 return -EADDRNOTAVAIL;
1992 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1993 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
1997 cma_bind_port(bind_list, id_priv);
2001 static int cma_get_port(struct rdma_id_private *id_priv)
2006 switch (id_priv->id.ps) {
2020 return -EPROTONOSUPPORT;
2024 if (cma_any_port(&id_priv->id.route.addr.src_addr))
2025 ret = cma_alloc_any_port(ps, id_priv);
2027 ret = cma_use_port(ps, id_priv);
2028 mutex_unlock(&lock);
2033 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2035 struct rdma_id_private *id_priv;
2038 if (addr->sa_family != AF_INET)
2039 return -EAFNOSUPPORT;
2041 id_priv = container_of(id, struct rdma_id_private, id);
2042 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
2045 if (!cma_any_addr(addr)) {
2046 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2051 ret = cma_acquire_dev(id_priv);
2052 mutex_unlock(&lock);
2057 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2058 ret = cma_get_port(id_priv);
2064 if (!cma_any_addr(addr)) {
2066 cma_detach_from_dev(id_priv);
2067 mutex_unlock(&lock);
2070 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
2073 EXPORT_SYMBOL(rdma_bind_addr);
2075 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
2076 struct rdma_route *route)
2078 struct sockaddr_in *src4, *dst4;
2079 struct cma_hdr *cma_hdr;
2080 struct sdp_hh *sdp_hdr;
2082 src4 = (struct sockaddr_in *) &route->addr.src_addr;
2083 dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
2088 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2090 sdp_set_ip_ver(sdp_hdr, 4);
2091 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2092 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2093 sdp_hdr->port = src4->sin_port;
2097 cma_hdr->cma_version = CMA_VERSION;
2098 cma_set_ip_ver(cma_hdr, 4);
2099 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2100 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2101 cma_hdr->port = src4->sin_port;
2107 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2108 struct ib_cm_event *ib_event)
2110 struct rdma_id_private *id_priv = cm_id->context;
2111 struct rdma_cm_event event;
2112 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2115 if (cma_disable_remove(id_priv, CMA_CONNECT))
2118 memset(&event, 0, sizeof event);
2119 switch (ib_event->event) {
2120 case IB_CM_SIDR_REQ_ERROR:
2121 event.event = RDMA_CM_EVENT_UNREACHABLE;
2122 event.status = -ETIMEDOUT;
2124 case IB_CM_SIDR_REP_RECEIVED:
2125 event.param.ud.private_data = ib_event->private_data;
2126 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2127 if (rep->status != IB_SIDR_SUCCESS) {
2128 event.event = RDMA_CM_EVENT_UNREACHABLE;
2129 event.status = ib_event->param.sidr_rep_rcvd.status;
2132 if (id_priv->qkey != rep->qkey) {
2133 event.event = RDMA_CM_EVENT_UNREACHABLE;
2134 event.status = -EINVAL;
2137 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2138 id_priv->id.route.path_rec,
2139 &event.param.ud.ah_attr);
2140 event.param.ud.qp_num = rep->qpn;
2141 event.param.ud.qkey = rep->qkey;
2142 event.event = RDMA_CM_EVENT_ESTABLISHED;
2146 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
2151 ret = id_priv->id.event_handler(&id_priv->id, &event);
2153 /* Destroy the CM ID by returning a non-zero value. */
2154 id_priv->cm_id.ib = NULL;
2155 cma_exch(id_priv, CMA_DESTROYING);
2156 cma_enable_remove(id_priv);
2157 rdma_destroy_id(&id_priv->id);
2161 cma_enable_remove(id_priv);
2165 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2166 struct rdma_conn_param *conn_param)
2168 struct ib_cm_sidr_req_param req;
2169 struct rdma_route *route;
2172 req.private_data_len = sizeof(struct cma_hdr) +
2173 conn_param->private_data_len;
2174 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2175 if (!req.private_data)
2178 if (conn_param->private_data && conn_param->private_data_len)
2179 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
2180 conn_param->private_data, conn_param->private_data_len);
2182 route = &id_priv->id.route;
2183 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2187 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2188 cma_sidr_rep_handler, id_priv);
2189 if (IS_ERR(id_priv->cm_id.ib)) {
2190 ret = PTR_ERR(id_priv->cm_id.ib);
2194 req.path = route->path_rec;
2195 req.service_id = cma_get_service_id(id_priv->id.ps,
2196 &route->addr.dst_addr);
2197 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2198 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2200 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2202 ib_destroy_cm_id(id_priv->cm_id.ib);
2203 id_priv->cm_id.ib = NULL;
2206 kfree(req.private_data);
2210 static int cma_connect_ib(struct rdma_id_private *id_priv,
2211 struct rdma_conn_param *conn_param)
2213 struct ib_cm_req_param req;
2214 struct rdma_route *route;
2218 memset(&req, 0, sizeof req);
2219 offset = cma_user_data_offset(id_priv->id.ps);
2220 req.private_data_len = offset + conn_param->private_data_len;
2221 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2225 if (conn_param->private_data && conn_param->private_data_len)
2226 memcpy(private_data + offset, conn_param->private_data,
2227 conn_param->private_data_len);
2229 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2231 if (IS_ERR(id_priv->cm_id.ib)) {
2232 ret = PTR_ERR(id_priv->cm_id.ib);
2236 route = &id_priv->id.route;
2237 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2240 req.private_data = private_data;
2242 req.primary_path = &route->path_rec[0];
2243 if (route->num_paths == 2)
2244 req.alternate_path = &route->path_rec[1];
2246 req.service_id = cma_get_service_id(id_priv->id.ps,
2247 &route->addr.dst_addr);
2248 req.qp_num = id_priv->qp_num;
2249 req.qp_type = IB_QPT_RC;
2250 req.starting_psn = id_priv->seq_num;
2251 req.responder_resources = conn_param->responder_resources;
2252 req.initiator_depth = conn_param->initiator_depth;
2253 req.flow_control = conn_param->flow_control;
2254 req.retry_count = conn_param->retry_count;
2255 req.rnr_retry_count = conn_param->rnr_retry_count;
2256 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2257 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2258 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2259 req.srq = id_priv->srq ? 1 : 0;
2261 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2263 if (ret && !IS_ERR(id_priv->cm_id.ib)) {
2264 ib_destroy_cm_id(id_priv->cm_id.ib);
2265 id_priv->cm_id.ib = NULL;
2268 kfree(private_data);
2272 static int cma_connect_iw(struct rdma_id_private *id_priv,
2273 struct rdma_conn_param *conn_param)
2275 struct iw_cm_id *cm_id;
2276 struct sockaddr_in* sin;
2278 struct iw_cm_conn_param iw_param;
2280 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2281 if (IS_ERR(cm_id)) {
2282 ret = PTR_ERR(cm_id);
2286 id_priv->cm_id.iw = cm_id;
2288 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2289 cm_id->local_addr = *sin;
2291 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2292 cm_id->remote_addr = *sin;
2294 ret = cma_modify_qp_rtr(id_priv, conn_param);
2298 iw_param.ord = conn_param->initiator_depth;
2299 iw_param.ird = conn_param->responder_resources;
2300 iw_param.private_data = conn_param->private_data;
2301 iw_param.private_data_len = conn_param->private_data_len;
2303 iw_param.qpn = id_priv->qp_num;
2305 iw_param.qpn = conn_param->qp_num;
2306 ret = iw_cm_connect(cm_id, &iw_param);
2308 if (ret && !IS_ERR(cm_id)) {
2309 iw_destroy_cm_id(cm_id);
2310 id_priv->cm_id.iw = NULL;
2315 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2317 struct rdma_id_private *id_priv;
2320 id_priv = container_of(id, struct rdma_id_private, id);
2321 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
2325 id_priv->qp_num = conn_param->qp_num;
2326 id_priv->srq = conn_param->srq;
2329 switch (rdma_node_get_transport(id->device->node_type)) {
2330 case RDMA_TRANSPORT_IB:
2331 if (cma_is_ud_ps(id->ps))
2332 ret = cma_resolve_ib_udp(id_priv, conn_param);
2334 ret = cma_connect_ib(id_priv, conn_param);
2336 case RDMA_TRANSPORT_IWARP:
2337 ret = cma_connect_iw(id_priv, conn_param);
2348 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
2351 EXPORT_SYMBOL(rdma_connect);
2353 static int cma_accept_ib(struct rdma_id_private *id_priv,
2354 struct rdma_conn_param *conn_param)
2356 struct ib_cm_rep_param rep;
2359 ret = cma_modify_qp_rtr(id_priv, conn_param);
2363 ret = cma_modify_qp_rts(id_priv, conn_param);
2367 memset(&rep, 0, sizeof rep);
2368 rep.qp_num = id_priv->qp_num;
2369 rep.starting_psn = id_priv->seq_num;
2370 rep.private_data = conn_param->private_data;
2371 rep.private_data_len = conn_param->private_data_len;
2372 rep.responder_resources = conn_param->responder_resources;
2373 rep.initiator_depth = conn_param->initiator_depth;
2374 rep.failover_accepted = 0;
2375 rep.flow_control = conn_param->flow_control;
2376 rep.rnr_retry_count = conn_param->rnr_retry_count;
2377 rep.srq = id_priv->srq ? 1 : 0;
2379 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2384 static int cma_accept_iw(struct rdma_id_private *id_priv,
2385 struct rdma_conn_param *conn_param)
2387 struct iw_cm_conn_param iw_param;
2390 ret = cma_modify_qp_rtr(id_priv, conn_param);
2394 iw_param.ord = conn_param->initiator_depth;
2395 iw_param.ird = conn_param->responder_resources;
2396 iw_param.private_data = conn_param->private_data;
2397 iw_param.private_data_len = conn_param->private_data_len;
2398 if (id_priv->id.qp) {
2399 iw_param.qpn = id_priv->qp_num;
2401 iw_param.qpn = conn_param->qp_num;
2403 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2406 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2407 enum ib_cm_sidr_status status,
2408 const void *private_data, int private_data_len)
2410 struct ib_cm_sidr_rep_param rep;
2412 memset(&rep, 0, sizeof rep);
2413 rep.status = status;
2414 if (status == IB_SIDR_SUCCESS) {
2415 rep.qp_num = id_priv->qp_num;
2416 rep.qkey = id_priv->qkey;
2418 rep.private_data = private_data;
2419 rep.private_data_len = private_data_len;
2421 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2424 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2426 struct rdma_id_private *id_priv;
2429 id_priv = container_of(id, struct rdma_id_private, id);
2430 if (!cma_comp(id_priv, CMA_CONNECT))
2433 if (!id->qp && conn_param) {
2434 id_priv->qp_num = conn_param->qp_num;
2435 id_priv->srq = conn_param->srq;
2438 switch (rdma_node_get_transport(id->device->node_type)) {
2439 case RDMA_TRANSPORT_IB:
2440 if (cma_is_ud_ps(id->ps))
2441 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2442 conn_param->private_data,
2443 conn_param->private_data_len);
2444 else if (conn_param)
2445 ret = cma_accept_ib(id_priv, conn_param);
2447 ret = cma_rep_recv(id_priv);
2449 case RDMA_TRANSPORT_IWARP:
2450 ret = cma_accept_iw(id_priv, conn_param);
2462 cma_modify_qp_err(id_priv);
2463 rdma_reject(id, NULL, 0);
2466 EXPORT_SYMBOL(rdma_accept);
2468 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2470 struct rdma_id_private *id_priv;
2473 id_priv = container_of(id, struct rdma_id_private, id);
2474 if (!cma_has_cm_dev(id_priv))
2477 switch (id->device->node_type) {
2478 case RDMA_NODE_IB_CA:
2479 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2487 EXPORT_SYMBOL(rdma_notify);
2489 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2490 u8 private_data_len)
2492 struct rdma_id_private *id_priv;
2495 id_priv = container_of(id, struct rdma_id_private, id);
2496 if (!cma_has_cm_dev(id_priv))
2499 switch (rdma_node_get_transport(id->device->node_type)) {
2500 case RDMA_TRANSPORT_IB:
2501 if (cma_is_ud_ps(id->ps))
2502 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2503 private_data, private_data_len);
2505 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2506 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2507 0, private_data, private_data_len);
2509 case RDMA_TRANSPORT_IWARP:
2510 ret = iw_cm_reject(id_priv->cm_id.iw,
2511 private_data, private_data_len);
2519 EXPORT_SYMBOL(rdma_reject);
2521 int rdma_disconnect(struct rdma_cm_id *id)
2523 struct rdma_id_private *id_priv;
2526 id_priv = container_of(id, struct rdma_id_private, id);
2527 if (!cma_has_cm_dev(id_priv))
2530 switch (rdma_node_get_transport(id->device->node_type)) {
2531 case RDMA_TRANSPORT_IB:
2532 ret = cma_modify_qp_err(id_priv);
2535 /* Initiate or respond to a disconnect. */
2536 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2537 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2539 case RDMA_TRANSPORT_IWARP:
2540 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2549 EXPORT_SYMBOL(rdma_disconnect);
2551 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2553 struct rdma_id_private *id_priv;
2554 struct cma_multicast *mc = multicast->context;
2555 struct rdma_cm_event event;
2558 id_priv = mc->id_priv;
2559 if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
2560 cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
2563 mutex_lock(&id_priv->qp_mutex);
2564 if (!status && id_priv->id.qp)
2565 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2566 multicast->rec.mlid);
2567 mutex_unlock(&id_priv->qp_mutex);
2569 memset(&event, 0, sizeof event);
2570 event.status = status;
2571 event.param.ud.private_data = mc->context;
2573 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
2574 ib_init_ah_from_mcmember(id_priv->id.device,
2575 id_priv->id.port_num, &multicast->rec,
2576 &event.param.ud.ah_attr);
2577 event.param.ud.qp_num = 0xFFFFFF;
2578 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
2580 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
2582 ret = id_priv->id.event_handler(&id_priv->id, &event);
2584 cma_exch(id_priv, CMA_DESTROYING);
2585 cma_enable_remove(id_priv);
2586 rdma_destroy_id(&id_priv->id);
2590 cma_enable_remove(id_priv);
2594 static void cma_set_mgid(struct rdma_id_private *id_priv,
2595 struct sockaddr *addr, union ib_gid *mgid)
2597 unsigned char mc_map[MAX_ADDR_LEN];
2598 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2599 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
2600 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
2602 if (cma_any_addr(addr)) {
2603 memset(mgid, 0, sizeof *mgid);
2604 } else if ((addr->sa_family == AF_INET6) &&
2605 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
2607 /* IPv6 address is an SA assigned MGID. */
2608 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2610 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
2611 if (id_priv->id.ps == RDMA_PS_UDP)
2612 mc_map[7] = 0x01; /* Use RDMA CM signature */
2613 *mgid = *(union ib_gid *) (mc_map + 4);
2617 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2618 struct cma_multicast *mc)
2620 struct ib_sa_mcmember_rec rec;
2621 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2622 ib_sa_comp_mask comp_mask;
2625 ib_addr_get_mgid(dev_addr, &rec.mgid);
2626 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2631 cma_set_mgid(id_priv, &mc->addr, &rec.mgid);
2632 if (id_priv->id.ps == RDMA_PS_UDP)
2633 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2634 ib_addr_get_sgid(dev_addr, &rec.port_gid);
2635 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2638 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
2639 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
2640 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
2641 IB_SA_MCMEMBER_REC_FLOW_LABEL |
2642 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2644 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2645 id_priv->id.port_num, &rec,
2646 comp_mask, GFP_KERNEL,
2647 cma_ib_mc_handler, mc);
2648 if (IS_ERR(mc->multicast.ib))
2649 return PTR_ERR(mc->multicast.ib);
2654 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
2657 struct rdma_id_private *id_priv;
2658 struct cma_multicast *mc;
2661 id_priv = container_of(id, struct rdma_id_private, id);
2662 if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
2663 !cma_comp(id_priv, CMA_ADDR_RESOLVED))
2666 mc = kmalloc(sizeof *mc, GFP_KERNEL);
2670 memcpy(&mc->addr, addr, ip_addr_size(addr));
2671 mc->context = context;
2672 mc->id_priv = id_priv;
2674 spin_lock(&id_priv->lock);
2675 list_add(&mc->list, &id_priv->mc_list);
2676 spin_unlock(&id_priv->lock);
2678 switch (rdma_node_get_transport(id->device->node_type)) {
2679 case RDMA_TRANSPORT_IB:
2680 ret = cma_join_ib_multicast(id_priv, mc);
2688 spin_lock_irq(&id_priv->lock);
2689 list_del(&mc->list);
2690 spin_unlock_irq(&id_priv->lock);
2695 EXPORT_SYMBOL(rdma_join_multicast);
2697 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2699 struct rdma_id_private *id_priv;
2700 struct cma_multicast *mc;
2702 id_priv = container_of(id, struct rdma_id_private, id);
2703 spin_lock_irq(&id_priv->lock);
2704 list_for_each_entry(mc, &id_priv->mc_list, list) {
2705 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
2706 list_del(&mc->list);
2707 spin_unlock_irq(&id_priv->lock);
2710 ib_detach_mcast(id->qp,
2711 &mc->multicast.ib->rec.mgid,
2712 mc->multicast.ib->rec.mlid);
2713 ib_sa_free_multicast(mc->multicast.ib);
2718 spin_unlock_irq(&id_priv->lock);
2720 EXPORT_SYMBOL(rdma_leave_multicast);
2722 static void cma_add_one(struct ib_device *device)
2724 struct cma_device *cma_dev;
2725 struct rdma_id_private *id_priv;
2727 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2731 cma_dev->device = device;
2733 init_completion(&cma_dev->comp);
2734 atomic_set(&cma_dev->refcount, 1);
2735 INIT_LIST_HEAD(&cma_dev->id_list);
2736 ib_set_client_data(device, &cma_client, cma_dev);
2739 list_add_tail(&cma_dev->list, &dev_list);
2740 list_for_each_entry(id_priv, &listen_any_list, list)
2741 cma_listen_on_dev(id_priv, cma_dev);
2742 mutex_unlock(&lock);
2745 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2747 struct rdma_cm_event event;
2748 enum cma_state state;
2750 /* Record that we want to remove the device */
2751 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2752 if (state == CMA_DESTROYING)
2755 cma_cancel_operation(id_priv, state);
2756 wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove));
2758 /* Check for destruction from another callback. */
2759 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2762 memset(&event, 0, sizeof event);
2763 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2764 return id_priv->id.event_handler(&id_priv->id, &event);
2767 static void cma_process_remove(struct cma_device *cma_dev)
2769 struct rdma_id_private *id_priv;
2773 while (!list_empty(&cma_dev->id_list)) {
2774 id_priv = list_entry(cma_dev->id_list.next,
2775 struct rdma_id_private, list);
2777 list_del(&id_priv->listen_list);
2778 list_del_init(&id_priv->list);
2779 atomic_inc(&id_priv->refcount);
2780 mutex_unlock(&lock);
2782 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
2783 cma_deref_id(id_priv);
2785 rdma_destroy_id(&id_priv->id);
2789 mutex_unlock(&lock);
2791 cma_deref_dev(cma_dev);
2792 wait_for_completion(&cma_dev->comp);
2795 static void cma_remove_one(struct ib_device *device)
2797 struct cma_device *cma_dev;
2799 cma_dev = ib_get_client_data(device, &cma_client);
2804 list_del(&cma_dev->list);
2805 mutex_unlock(&lock);
2807 cma_process_remove(cma_dev);
2811 static int cma_init(void)
2813 int ret, low, high, remaining;
2815 get_random_bytes(&next_port, sizeof next_port);
2816 inet_get_local_port_range(&low, &high);
2817 remaining = (high - low) + 1;
2818 next_port = ((unsigned int) next_port % remaining) + low;
2820 cma_wq = create_singlethread_workqueue("rdma_cm");
2824 ib_sa_register_client(&sa_client);
2825 rdma_addr_register_client(&addr_client);
2827 ret = ib_register_client(&cma_client);
2833 rdma_addr_unregister_client(&addr_client);
2834 ib_sa_unregister_client(&sa_client);
2835 destroy_workqueue(cma_wq);
2839 static void cma_cleanup(void)
2841 ib_unregister_client(&cma_client);
2842 rdma_addr_unregister_client(&addr_client);
2843 ib_sa_unregister_client(&sa_client);
2844 destroy_workqueue(cma_wq);
2845 idr_destroy(&sdp_ps);
2846 idr_destroy(&tcp_ps);
2847 idr_destroy(&udp_ps);
2848 idr_destroy(&ipoib_ps);
2851 module_init(cma_init);
2852 module_exit(cma_cleanup);