2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/list.h>
35 #include <linux/rcupdate.h>
37 #include "ipath_verbs.h"
40 * Global table of GID to attached QPs.
41 * The table is global to all ipath devices since a send from one QP/device
42 * needs to be locally routed to any locally attached QPs on the same
43 * or different device.
45 static struct rb_root mcast_tree;
46 static DEFINE_SPINLOCK(mcast_lock);
49 * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
52 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
54 struct ipath_mcast_qp *mqp;
56 mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
61 atomic_inc(&qp->refcount);
67 static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
69 struct ipath_qp *qp = mqp->qp;
71 /* Notify ipath_destroy_qp() if it is waiting. */
72 if (atomic_dec_and_test(&qp->refcount))
79 * ipath_mcast_alloc - allocate the multicast GID structure
80 * @mgid: the multicast GID
82 * A list of QPs will be attached to this structure.
84 static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
86 struct ipath_mcast *mcast;
88 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
93 INIT_LIST_HEAD(&mcast->qp_list);
94 init_waitqueue_head(&mcast->wait);
95 atomic_set(&mcast->refcount, 0);
96 mcast->n_attached = 0;
102 static void ipath_mcast_free(struct ipath_mcast *mcast)
104 struct ipath_mcast_qp *p, *tmp;
106 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
107 ipath_mcast_qp_free(p);
113 * ipath_mcast_find - search the global table for the given multicast GID
114 * @mgid: the multicast GID to search for
116 * Returns NULL if not found.
118 * The caller is responsible for decrementing the reference count if found.
120 struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
124 struct ipath_mcast *mcast;
126 spin_lock_irqsave(&mcast_lock, flags);
127 n = mcast_tree.rb_node;
131 mcast = rb_entry(n, struct ipath_mcast, rb_node);
133 ret = memcmp(mgid->raw, mcast->mgid.raw,
134 sizeof(union ib_gid));
140 atomic_inc(&mcast->refcount);
141 spin_unlock_irqrestore(&mcast_lock, flags);
145 spin_unlock_irqrestore(&mcast_lock, flags);
154 * ipath_mcast_add - insert mcast GID into table and attach QP struct
155 * @mcast: the mcast GID table
156 * @mqp: the QP to attach
158 * Return zero if both were added. Return EEXIST if the GID was already in
159 * the table but the QP was added. Return ESRCH if the QP was already
160 * attached and neither structure was added.
162 static int ipath_mcast_add(struct ipath_ibdev *dev,
163 struct ipath_mcast *mcast,
164 struct ipath_mcast_qp *mqp)
166 struct rb_node **n = &mcast_tree.rb_node;
167 struct rb_node *pn = NULL;
170 spin_lock_irq(&mcast_lock);
173 struct ipath_mcast *tmcast;
174 struct ipath_mcast_qp *p;
177 tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
179 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
180 sizeof(union ib_gid));
190 /* Search the QP list to see if this is already there. */
191 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
192 if (p->qp == mqp->qp) {
197 if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
202 tmcast->n_attached++;
204 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
209 spin_lock(&dev->n_mcast_grps_lock);
210 if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
211 spin_unlock(&dev->n_mcast_grps_lock);
216 dev->n_mcast_grps_allocated++;
217 spin_unlock(&dev->n_mcast_grps_lock);
221 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
223 atomic_inc(&mcast->refcount);
224 rb_link_node(&mcast->rb_node, pn, n);
225 rb_insert_color(&mcast->rb_node, &mcast_tree);
230 spin_unlock_irq(&mcast_lock);
235 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
237 struct ipath_qp *qp = to_iqp(ibqp);
238 struct ipath_ibdev *dev = to_idev(ibqp->device);
239 struct ipath_mcast *mcast;
240 struct ipath_mcast_qp *mqp;
244 * Allocate data structures since its better to do this outside of
245 * spin locks and it will most likely be needed.
247 mcast = ipath_mcast_alloc(gid);
252 mqp = ipath_mcast_qp_alloc(qp);
254 ipath_mcast_free(mcast);
258 switch (ipath_mcast_add(dev, mcast, mqp)) {
260 /* Neither was used: can't attach the same QP twice. */
261 ipath_mcast_qp_free(mqp);
262 ipath_mcast_free(mcast);
265 case EEXIST: /* The mcast wasn't used */
266 ipath_mcast_free(mcast);
269 /* Exceeded the maximum number of mcast groups. */
270 ipath_mcast_qp_free(mqp);
271 ipath_mcast_free(mcast);
284 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
286 struct ipath_qp *qp = to_iqp(ibqp);
287 struct ipath_ibdev *dev = to_idev(ibqp->device);
288 struct ipath_mcast *mcast = NULL;
289 struct ipath_mcast_qp *p, *tmp;
294 spin_lock_irq(&mcast_lock);
296 /* Find the GID in the mcast table. */
297 n = mcast_tree.rb_node;
300 spin_unlock_irq(&mcast_lock);
305 mcast = rb_entry(n, struct ipath_mcast, rb_node);
306 ret = memcmp(gid->raw, mcast->mgid.raw,
307 sizeof(union ib_gid));
316 /* Search the QP list. */
317 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
321 * We found it, so remove it, but don't poison the forward
322 * link until we are sure there are no list walkers.
324 list_del_rcu(&p->list);
327 /* If this was the last attached QP, remove the GID too. */
328 if (list_empty(&mcast->qp_list)) {
329 rb_erase(&mcast->rb_node, &mcast_tree);
335 spin_unlock_irq(&mcast_lock);
339 * Wait for any list walkers to finish before freeing the
342 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
343 ipath_mcast_qp_free(p);
346 atomic_dec(&mcast->refcount);
347 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
348 ipath_mcast_free(mcast);
349 spin_lock_irq(&dev->n_mcast_grps_lock);
350 dev->n_mcast_grps_allocated--;
351 spin_unlock_irq(&dev->n_mcast_grps_lock);
360 int ipath_mcast_tree_empty(void)
362 return mcast_tree.rb_node == NULL;