2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
35 #include <linux/dma-mapping.h>
41 MODULE_LICENSE("Dual BSD/GPL");
42 MODULE_DESCRIPTION("kernel IB MAD API");
43 MODULE_AUTHOR("Hal Rosenstock");
44 MODULE_AUTHOR("Sean Hefty");
47 kmem_cache_t *ib_mad_cache;
48 static struct list_head ib_mad_port_list;
49 static u32 ib_mad_client_id = 0;
52 static spinlock_t ib_mad_port_list_lock;
55 /* Forward declarations */
56 static int method_in_use(struct ib_mad_mgmt_method_table **method,
57 struct ib_mad_reg_req *mad_reg_req);
58 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59 static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv,
61 struct ib_mad *mad, int solicited);
62 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad);
64 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67 static void timeout_sends(void *data);
68 static void cancel_sends(void *data);
69 static void local_completions(void *data);
70 static int solicited_mad(struct ib_mad *mad);
71 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
72 struct ib_mad_agent_private *agent_priv,
74 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
75 struct ib_mad_agent_private *agent_priv);
78 * Returns a ib_mad_port_private structure or NULL for a device/port
79 * Assumes ib_mad_port_list_lock is being held
81 static inline struct ib_mad_port_private *
82 __ib_get_mad_port(struct ib_device *device, int port_num)
84 struct ib_mad_port_private *entry;
86 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
87 if (entry->device == device && entry->port_num == port_num)
94 * Wrapper function to return a ib_mad_port_private structure or NULL
97 static inline struct ib_mad_port_private *
98 ib_get_mad_port(struct ib_device *device, int port_num)
100 struct ib_mad_port_private *entry;
103 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
104 entry = __ib_get_mad_port(device, port_num);
105 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
110 static inline u8 convert_mgmt_class(u8 mgmt_class)
112 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
113 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
117 static int get_spl_qp_index(enum ib_qp_type qp_type)
130 static int vendor_class_index(u8 mgmt_class)
132 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
135 static int is_vendor_class(u8 mgmt_class)
137 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
138 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
143 static int is_vendor_oui(char *oui)
145 if (oui[0] || oui[1] || oui[2])
150 static int is_vendor_method_in_use(
151 struct ib_mad_mgmt_vendor_class *vendor_class,
152 struct ib_mad_reg_req *mad_reg_req)
154 struct ib_mad_mgmt_method_table *method;
157 for (i = 0; i < MAX_MGMT_OUI; i++) {
158 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
159 method = vendor_class->method_table[i];
161 if (method_in_use(&method, mad_reg_req))
172 * ib_register_mad_agent - Register to send/receive MADs
174 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
176 enum ib_qp_type qp_type,
177 struct ib_mad_reg_req *mad_reg_req,
179 ib_mad_send_handler send_handler,
180 ib_mad_recv_handler recv_handler,
183 struct ib_mad_port_private *port_priv;
184 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
185 struct ib_mad_agent_private *mad_agent_priv;
186 struct ib_mad_reg_req *reg_req = NULL;
187 struct ib_mad_mgmt_class_table *class;
188 struct ib_mad_mgmt_vendor_class_table *vendor;
189 struct ib_mad_mgmt_vendor_class *vendor_class;
190 struct ib_mad_mgmt_method_table *method;
193 u8 mgmt_class, vclass;
195 /* Validate parameters */
196 qpn = get_spl_qp_index(qp_type);
201 goto error1; /* XXX: until RMPP implemented */
203 /* Validate MAD registration request if supplied */
205 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
209 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
211 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
212 * one in this range currently allowed
214 if (mad_reg_req->mgmt_class !=
215 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
217 } else if (mad_reg_req->mgmt_class == 0) {
219 * Class 0 is reserved in IBA and is used for
220 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
223 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
225 * If class is in "new" vendor range,
226 * ensure supplied OUI is not zero
228 if (!is_vendor_oui(mad_reg_req->oui))
231 /* Make sure class supplied is consistent with QP type */
232 if (qp_type == IB_QPT_SMI) {
233 if ((mad_reg_req->mgmt_class !=
234 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
235 (mad_reg_req->mgmt_class !=
236 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
239 if ((mad_reg_req->mgmt_class ==
240 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
241 (mad_reg_req->mgmt_class ==
242 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
246 /* No registration request supplied */
251 /* Validate device and port */
252 port_priv = ib_get_mad_port(device, port_num);
254 ret = ERR_PTR(-ENODEV);
258 /* Allocate structures */
259 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
260 if (!mad_agent_priv) {
261 ret = ERR_PTR(-ENOMEM);
264 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
266 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
267 IB_ACCESS_LOCAL_WRITE);
268 if (IS_ERR(mad_agent_priv->agent.mr)) {
269 ret = ERR_PTR(-ENOMEM);
274 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
276 ret = ERR_PTR(-ENOMEM);
279 /* Make a copy of the MAD registration request */
280 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
283 /* Now, fill in the various structures */
284 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
285 mad_agent_priv->reg_req = reg_req;
286 mad_agent_priv->rmpp_version = rmpp_version;
287 mad_agent_priv->agent.device = device;
288 mad_agent_priv->agent.recv_handler = recv_handler;
289 mad_agent_priv->agent.send_handler = send_handler;
290 mad_agent_priv->agent.context = context;
291 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
292 mad_agent_priv->agent.port_num = port_num;
294 spin_lock_irqsave(&port_priv->reg_lock, flags);
295 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
298 * Make sure MAD registration (if supplied)
299 * is non overlapping with any existing ones
302 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
303 if (!is_vendor_class(mgmt_class)) {
304 class = port_priv->version[mad_reg_req->
305 mgmt_class_version].class;
307 method = class->method_table[mgmt_class];
309 if (method_in_use(&method,
314 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
317 /* "New" vendor class range */
318 vendor = port_priv->version[mad_reg_req->
319 mgmt_class_version].vendor;
321 vclass = vendor_class_index(mgmt_class);
322 vendor_class = vendor->vendor_class[vclass];
324 if (is_vendor_method_in_use(
330 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
338 /* Add mad agent into port's agent list */
339 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
340 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
342 spin_lock_init(&mad_agent_priv->lock);
343 INIT_LIST_HEAD(&mad_agent_priv->send_list);
344 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
345 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
346 INIT_LIST_HEAD(&mad_agent_priv->local_list);
347 INIT_WORK(&mad_agent_priv->local_work, local_completions,
349 INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
350 INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
351 atomic_set(&mad_agent_priv->refcount, 1);
352 init_waitqueue_head(&mad_agent_priv->wait);
354 return &mad_agent_priv->agent;
357 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
360 kfree(mad_agent_priv);
362 ib_dereg_mr(mad_agent_priv->agent.mr);
366 EXPORT_SYMBOL(ib_register_mad_agent);
368 static inline int is_snooping_sends(int mad_snoop_flags)
370 return (mad_snoop_flags &
371 (/*IB_MAD_SNOOP_POSTED_SENDS |
372 IB_MAD_SNOOP_RMPP_SENDS |*/
373 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
374 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
377 static inline int is_snooping_recvs(int mad_snoop_flags)
379 return (mad_snoop_flags &
380 (IB_MAD_SNOOP_RECVS /*|
381 IB_MAD_SNOOP_RMPP_RECVS*/));
384 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
385 struct ib_mad_snoop_private *mad_snoop_priv)
387 struct ib_mad_snoop_private **new_snoop_table;
391 spin_lock_irqsave(&qp_info->snoop_lock, flags);
392 /* Check for empty slot in array. */
393 for (i = 0; i < qp_info->snoop_table_size; i++)
394 if (!qp_info->snoop_table[i])
397 if (i == qp_info->snoop_table_size) {
399 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
400 qp_info->snoop_table_size + 1,
402 if (!new_snoop_table) {
406 if (qp_info->snoop_table) {
407 memcpy(new_snoop_table, qp_info->snoop_table,
408 sizeof mad_snoop_priv *
409 qp_info->snoop_table_size);
410 kfree(qp_info->snoop_table);
412 qp_info->snoop_table = new_snoop_table;
413 qp_info->snoop_table_size++;
415 qp_info->snoop_table[i] = mad_snoop_priv;
416 atomic_inc(&qp_info->snoop_count);
418 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
422 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
424 enum ib_qp_type qp_type,
426 ib_mad_snoop_handler snoop_handler,
427 ib_mad_recv_handler recv_handler,
430 struct ib_mad_port_private *port_priv;
431 struct ib_mad_agent *ret;
432 struct ib_mad_snoop_private *mad_snoop_priv;
435 /* Validate parameters */
436 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
437 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
438 ret = ERR_PTR(-EINVAL);
441 qpn = get_spl_qp_index(qp_type);
443 ret = ERR_PTR(-EINVAL);
446 port_priv = ib_get_mad_port(device, port_num);
448 ret = ERR_PTR(-ENODEV);
451 /* Allocate structures */
452 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
453 if (!mad_snoop_priv) {
454 ret = ERR_PTR(-ENOMEM);
458 /* Now, fill in the various structures */
459 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
460 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
461 mad_snoop_priv->agent.device = device;
462 mad_snoop_priv->agent.recv_handler = recv_handler;
463 mad_snoop_priv->agent.snoop_handler = snoop_handler;
464 mad_snoop_priv->agent.context = context;
465 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
466 mad_snoop_priv->agent.port_num = port_num;
467 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
468 init_waitqueue_head(&mad_snoop_priv->wait);
469 mad_snoop_priv->snoop_index = register_snoop_agent(
470 &port_priv->qp_info[qpn],
472 if (mad_snoop_priv->snoop_index < 0) {
473 ret = ERR_PTR(mad_snoop_priv->snoop_index);
477 atomic_set(&mad_snoop_priv->refcount, 1);
478 return &mad_snoop_priv->agent;
481 kfree(mad_snoop_priv);
485 EXPORT_SYMBOL(ib_register_mad_snoop);
487 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
489 struct ib_mad_port_private *port_priv;
492 /* Note that we could still be handling received MADs */
495 * Canceling all sends results in dropping received response
496 * MADs, preventing us from queuing additional work
498 cancel_mads(mad_agent_priv);
499 port_priv = mad_agent_priv->qp_info->port_priv;
500 cancel_delayed_work(&mad_agent_priv->timed_work);
502 spin_lock_irqsave(&port_priv->reg_lock, flags);
503 remove_mad_reg_req(mad_agent_priv);
504 list_del(&mad_agent_priv->agent_list);
505 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
507 flush_workqueue(port_priv->wq);
509 atomic_dec(&mad_agent_priv->refcount);
510 wait_event(mad_agent_priv->wait,
511 !atomic_read(&mad_agent_priv->refcount));
513 if (mad_agent_priv->reg_req)
514 kfree(mad_agent_priv->reg_req);
515 ib_dereg_mr(mad_agent_priv->agent.mr);
516 kfree(mad_agent_priv);
519 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
521 struct ib_mad_qp_info *qp_info;
524 qp_info = mad_snoop_priv->qp_info;
525 spin_lock_irqsave(&qp_info->snoop_lock, flags);
526 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
527 atomic_dec(&qp_info->snoop_count);
528 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
530 atomic_dec(&mad_snoop_priv->refcount);
531 wait_event(mad_snoop_priv->wait,
532 !atomic_read(&mad_snoop_priv->refcount));
534 kfree(mad_snoop_priv);
538 * ib_unregister_mad_agent - Unregisters a client from using MAD services
540 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
542 struct ib_mad_agent_private *mad_agent_priv;
543 struct ib_mad_snoop_private *mad_snoop_priv;
545 /* If the TID is zero, the agent can only snoop. */
546 if (mad_agent->hi_tid) {
547 mad_agent_priv = container_of(mad_agent,
548 struct ib_mad_agent_private,
550 unregister_mad_agent(mad_agent_priv);
552 mad_snoop_priv = container_of(mad_agent,
553 struct ib_mad_snoop_private,
555 unregister_mad_snoop(mad_snoop_priv);
559 EXPORT_SYMBOL(ib_unregister_mad_agent);
561 static void dequeue_mad(struct ib_mad_list_head *mad_list)
563 struct ib_mad_queue *mad_queue;
566 BUG_ON(!mad_list->mad_queue);
567 mad_queue = mad_list->mad_queue;
568 spin_lock_irqsave(&mad_queue->lock, flags);
569 list_del(&mad_list->list);
571 spin_unlock_irqrestore(&mad_queue->lock, flags);
574 static void snoop_send(struct ib_mad_qp_info *qp_info,
575 struct ib_send_wr *send_wr,
576 struct ib_mad_send_wc *mad_send_wc,
579 struct ib_mad_snoop_private *mad_snoop_priv;
583 spin_lock_irqsave(&qp_info->snoop_lock, flags);
584 for (i = 0; i < qp_info->snoop_table_size; i++) {
585 mad_snoop_priv = qp_info->snoop_table[i];
586 if (!mad_snoop_priv ||
587 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
590 atomic_inc(&mad_snoop_priv->refcount);
591 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
592 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
593 send_wr, mad_send_wc);
594 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
595 wake_up(&mad_snoop_priv->wait);
596 spin_lock_irqsave(&qp_info->snoop_lock, flags);
598 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
601 static void snoop_recv(struct ib_mad_qp_info *qp_info,
602 struct ib_mad_recv_wc *mad_recv_wc,
605 struct ib_mad_snoop_private *mad_snoop_priv;
609 spin_lock_irqsave(&qp_info->snoop_lock, flags);
610 for (i = 0; i < qp_info->snoop_table_size; i++) {
611 mad_snoop_priv = qp_info->snoop_table[i];
612 if (!mad_snoop_priv ||
613 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
616 atomic_inc(&mad_snoop_priv->refcount);
617 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
618 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
620 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
621 wake_up(&mad_snoop_priv->wait);
622 spin_lock_irqsave(&qp_info->snoop_lock, flags);
624 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
627 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
630 memset(wc, 0, sizeof *wc);
632 wc->status = IB_WC_SUCCESS;
633 wc->opcode = IB_WC_RECV;
634 wc->pkey_index = pkey_index;
635 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
640 wc->dlid_path_bits = 0;
641 wc->port_num = port_num;
645 * Return 0 if SMP is to be sent
646 * Return 1 if SMP was consumed locally (whether or not solicited)
647 * Return < 0 if error
649 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
651 struct ib_send_wr *send_wr)
655 struct ib_mad_local_private *local;
656 struct ib_mad_private *mad_priv;
657 struct ib_mad_port_private *port_priv;
658 struct ib_mad_agent_private *recv_mad_agent = NULL;
659 struct ib_device *device = mad_agent_priv->agent.device;
660 u8 port_num = mad_agent_priv->agent.port_num;
663 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
665 printk(KERN_ERR PFX "Invalid directed route\n");
668 /* Check to post send on QP or process locally */
669 ret = smi_check_local_dr_smp(smp, device, port_num);
670 if (!ret || !device->process_mad)
673 local = kmalloc(sizeof *local, GFP_ATOMIC);
676 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
679 local->mad_priv = NULL;
680 local->recv_mad_agent = NULL;
681 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
684 printk(KERN_ERR PFX "No memory for local response MAD\n");
689 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
690 send_wr->wr.ud.port_num, &mad_wc);
692 /* No GRH for DR SMP */
693 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
694 (struct ib_mad *)smp,
695 (struct ib_mad *)&mad_priv->mad);
698 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
700 * See if response is solicited and
701 * there is a recv handler
703 if (solicited_mad(&mad_priv->mad.mad) &&
704 mad_agent_priv->agent.recv_handler) {
705 local->mad_priv = mad_priv;
706 local->recv_mad_agent = mad_agent_priv;
708 * Reference MAD agent until receive
709 * side of local completion handled
711 atomic_inc(&mad_agent_priv->refcount);
713 kmem_cache_free(ib_mad_cache, mad_priv);
715 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
716 kmem_cache_free(ib_mad_cache, mad_priv);
718 case IB_MAD_RESULT_SUCCESS:
719 /* Treat like an incoming receive MAD */
720 solicited = solicited_mad(&mad_priv->mad.mad);
721 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
722 mad_agent_priv->agent.port_num);
724 mad_priv->mad.mad.mad_hdr.tid =
725 ((struct ib_mad *)smp)->mad_hdr.tid;
726 recv_mad_agent = find_mad_agent(port_priv,
730 if (!port_priv || !recv_mad_agent) {
731 kmem_cache_free(ib_mad_cache, mad_priv);
736 local->mad_priv = mad_priv;
737 local->recv_mad_agent = recv_mad_agent;
740 kmem_cache_free(ib_mad_cache, mad_priv);
746 local->send_wr = *send_wr;
747 local->send_wr.sg_list = local->sg_list;
748 memcpy(local->sg_list, send_wr->sg_list,
749 sizeof *send_wr->sg_list * send_wr->num_sge);
750 local->send_wr.next = NULL;
751 local->tid = send_wr->wr.ud.mad_hdr->tid;
752 local->wr_id = send_wr->wr_id;
753 /* Reference MAD agent until send side of local completion handled */
754 atomic_inc(&mad_agent_priv->refcount);
755 /* Queue local completion to local list */
756 spin_lock_irqsave(&mad_agent_priv->lock, flags);
757 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
758 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
759 queue_work(mad_agent_priv->qp_info->port_priv->wq,
760 &mad_agent_priv->local_work);
766 static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv,
767 struct ib_mad_send_wr_private *mad_send_wr)
769 struct ib_mad_qp_info *qp_info;
770 struct ib_send_wr *bad_send_wr;
774 /* Replace user's WR ID with our own to find WR upon completion */
775 qp_info = mad_agent_priv->qp_info;
776 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
777 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
778 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
780 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
781 if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {
782 list_add_tail(&mad_send_wr->mad_list.list,
783 &qp_info->send_queue.list);
784 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
785 ret = ib_post_send(mad_agent_priv->agent.qp,
786 &mad_send_wr->send_wr, &bad_send_wr);
788 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
789 dequeue_mad(&mad_send_wr->mad_list);
792 list_add_tail(&mad_send_wr->mad_list.list,
793 &qp_info->overflow_list);
794 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
801 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
802 * with the registered client
804 int ib_post_send_mad(struct ib_mad_agent *mad_agent,
805 struct ib_send_wr *send_wr,
806 struct ib_send_wr **bad_send_wr)
809 struct ib_mad_agent_private *mad_agent_priv;
811 /* Validate supplied parameters */
815 if (!mad_agent || !send_wr)
818 if (!mad_agent->send_handler)
821 mad_agent_priv = container_of(mad_agent,
822 struct ib_mad_agent_private,
825 /* Walk list of send WRs and post each on send list */
828 struct ib_send_wr *next_send_wr;
829 struct ib_mad_send_wr_private *mad_send_wr;
832 /* Validate more parameters */
833 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
836 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)
839 if (!send_wr->wr.ud.mad_hdr) {
840 printk(KERN_ERR PFX "MAD header must be supplied "
841 "in WR %p\n", send_wr);
846 * Save pointer to next work request to post in case the
847 * current one completes, and the user modifies the work
848 * request associated with the completion
850 next_send_wr = (struct ib_send_wr *)send_wr->next;
852 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
853 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
854 ret = handle_outgoing_dr_smp(mad_agent_priv, smp,
856 if (ret < 0) /* error */
858 else if (ret == 1) /* locally consumed */
862 /* Allocate MAD send WR tracking structure */
863 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
865 printk(KERN_ERR PFX "No memory for "
866 "ib_mad_send_wr_private\n");
871 mad_send_wr->send_wr = *send_wr;
872 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
873 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
874 sizeof *send_wr->sg_list * send_wr->num_sge);
875 mad_send_wr->send_wr.next = NULL;
876 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
877 mad_send_wr->agent = mad_agent;
878 /* Timeout will be updated after send completes */
879 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
881 mad_send_wr->retry = 0;
882 /* One reference for each work request to QP + response */
883 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
884 mad_send_wr->status = IB_WC_SUCCESS;
886 /* Reference MAD agent until send completes */
887 atomic_inc(&mad_agent_priv->refcount);
888 spin_lock_irqsave(&mad_agent_priv->lock, flags);
889 list_add_tail(&mad_send_wr->agent_list,
890 &mad_agent_priv->send_list);
891 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
893 ret = ib_send_mad(mad_agent_priv, mad_send_wr);
895 /* Fail send request */
896 spin_lock_irqsave(&mad_agent_priv->lock, flags);
897 list_del(&mad_send_wr->agent_list);
898 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
899 atomic_dec(&mad_agent_priv->refcount);
903 send_wr = next_send_wr;
908 *bad_send_wr = send_wr;
912 EXPORT_SYMBOL(ib_post_send_mad);
915 * ib_free_recv_mad - Returns data buffers used to receive
916 * a MAD to the access layer
918 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
920 struct ib_mad_recv_buf *entry;
921 struct ib_mad_private_header *mad_priv_hdr;
922 struct ib_mad_private *priv;
924 mad_priv_hdr = container_of(mad_recv_wc,
925 struct ib_mad_private_header,
927 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
930 * Walk receive buffer list associated with this WC
931 * No need to remove them from list of receive buffers
933 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
934 /* Free previous receive buffer */
935 kmem_cache_free(ib_mad_cache, priv);
936 mad_priv_hdr = container_of(mad_recv_wc,
937 struct ib_mad_private_header,
939 priv = container_of(mad_priv_hdr, struct ib_mad_private,
943 /* Free last buffer */
944 kmem_cache_free(ib_mad_cache, priv);
946 EXPORT_SYMBOL(ib_free_recv_mad);
948 void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
951 printk(KERN_ERR PFX "ib_coalesce_recv_mad() not implemented yet\n");
953 EXPORT_SYMBOL(ib_coalesce_recv_mad);
955 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
957 ib_mad_send_handler send_handler,
958 ib_mad_recv_handler recv_handler,
961 return ERR_PTR(-EINVAL); /* XXX: for now */
963 EXPORT_SYMBOL(ib_redirect_mad_qp);
965 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
968 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
971 EXPORT_SYMBOL(ib_process_mad_wc);
973 static int method_in_use(struct ib_mad_mgmt_method_table **method,
974 struct ib_mad_reg_req *mad_reg_req)
978 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
979 i < IB_MGMT_MAX_METHODS;
980 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
982 if ((*method)->agent[i]) {
983 printk(KERN_ERR PFX "Method %d already in use\n", i);
990 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
992 /* Allocate management method table */
993 *method = kmalloc(sizeof **method, GFP_ATOMIC);
995 printk(KERN_ERR PFX "No memory for "
996 "ib_mad_mgmt_method_table\n");
999 /* Clear management method table */
1000 memset(*method, 0, sizeof **method);
1006 * Check to see if there are any methods still in use
1008 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1012 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1013 if (method->agent[i])
1019 * Check to see if there are any method tables for this class still in use
1021 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1025 for (i = 0; i < MAX_MGMT_CLASS; i++)
1026 if (class->method_table[i])
1031 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1035 for (i = 0; i < MAX_MGMT_OUI; i++)
1036 if (vendor_class->method_table[i])
1041 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1046 for (i = 0; i < MAX_MGMT_OUI; i++)
1047 /* Is there matching OUI for this vendor class ? */
1048 if (!memcmp(vendor_class->oui[i], oui, 3))
1054 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1058 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1059 if (vendor->vendor_class[i])
1065 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1066 struct ib_mad_agent_private *agent)
1070 /* Remove any methods for this mad agent */
1071 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1072 if (method->agent[i] == agent) {
1073 method->agent[i] = NULL;
1078 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1079 struct ib_mad_agent_private *agent_priv,
1082 struct ib_mad_port_private *port_priv;
1083 struct ib_mad_mgmt_class_table **class;
1084 struct ib_mad_mgmt_method_table **method;
1087 port_priv = agent_priv->qp_info->port_priv;
1088 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1090 /* Allocate management class table for "new" class version */
1091 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1093 printk(KERN_ERR PFX "No memory for "
1094 "ib_mad_mgmt_class_table\n");
1098 /* Clear management class table */
1099 memset(*class, 0, sizeof(**class));
1100 /* Allocate method table for this management class */
1101 method = &(*class)->method_table[mgmt_class];
1102 if ((ret = allocate_method_table(method)))
1105 method = &(*class)->method_table[mgmt_class];
1107 /* Allocate method table for this management class */
1108 if ((ret = allocate_method_table(method)))
1113 /* Now, make sure methods are not already in use */
1114 if (method_in_use(method, mad_reg_req))
1117 /* Finally, add in methods being registered */
1118 for (i = find_first_bit(mad_reg_req->method_mask,
1119 IB_MGMT_MAX_METHODS);
1120 i < IB_MGMT_MAX_METHODS;
1121 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1123 (*method)->agent[i] = agent_priv;
1128 /* Remove any methods for this mad agent */
1129 remove_methods_mad_agent(*method, agent_priv);
1130 /* Now, check to see if there are any methods in use */
1131 if (!check_method_table(*method)) {
1132 /* If not, release management method table */
1145 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1146 struct ib_mad_agent_private *agent_priv)
1148 struct ib_mad_port_private *port_priv;
1149 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1150 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1151 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1152 struct ib_mad_mgmt_method_table **method;
1153 int i, ret = -ENOMEM;
1156 /* "New" vendor (with OUI) class */
1157 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1158 port_priv = agent_priv->qp_info->port_priv;
1159 vendor_table = &port_priv->version[
1160 mad_reg_req->mgmt_class_version].vendor;
1161 if (!*vendor_table) {
1162 /* Allocate mgmt vendor class table for "new" class version */
1163 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1165 printk(KERN_ERR PFX "No memory for "
1166 "ib_mad_mgmt_vendor_class_table\n");
1169 /* Clear management vendor class table */
1170 memset(vendor, 0, sizeof(*vendor));
1171 *vendor_table = vendor;
1173 if (!(*vendor_table)->vendor_class[vclass]) {
1174 /* Allocate table for this management vendor class */
1175 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1176 if (!vendor_class) {
1177 printk(KERN_ERR PFX "No memory for "
1178 "ib_mad_mgmt_vendor_class\n");
1181 memset(vendor_class, 0, sizeof(*vendor_class));
1182 (*vendor_table)->vendor_class[vclass] = vendor_class;
1184 for (i = 0; i < MAX_MGMT_OUI; i++) {
1185 /* Is there matching OUI for this vendor class ? */
1186 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1187 mad_reg_req->oui, 3)) {
1188 method = &(*vendor_table)->vendor_class[
1189 vclass]->method_table[i];
1194 for (i = 0; i < MAX_MGMT_OUI; i++) {
1195 /* OUI slot available ? */
1196 if (!is_vendor_oui((*vendor_table)->vendor_class[
1198 method = &(*vendor_table)->vendor_class[
1199 vclass]->method_table[i];
1201 /* Allocate method table for this OUI */
1202 if ((ret = allocate_method_table(method)))
1204 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1205 mad_reg_req->oui, 3);
1209 printk(KERN_ERR PFX "All OUI slots in use\n");
1213 /* Now, make sure methods are not already in use */
1214 if (method_in_use(method, mad_reg_req))
1217 /* Finally, add in methods being registered */
1218 for (i = find_first_bit(mad_reg_req->method_mask,
1219 IB_MGMT_MAX_METHODS);
1220 i < IB_MGMT_MAX_METHODS;
1221 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1223 (*method)->agent[i] = agent_priv;
1228 /* Remove any methods for this mad agent */
1229 remove_methods_mad_agent(*method, agent_priv);
1230 /* Now, check to see if there are any methods in use */
1231 if (!check_method_table(*method)) {
1232 /* If not, release management method table */
1239 (*vendor_table)->vendor_class[vclass] = NULL;
1240 kfree(vendor_class);
1244 *vendor_table = NULL;
1251 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1253 struct ib_mad_port_private *port_priv;
1254 struct ib_mad_mgmt_class_table *class;
1255 struct ib_mad_mgmt_method_table *method;
1256 struct ib_mad_mgmt_vendor_class_table *vendor;
1257 struct ib_mad_mgmt_vendor_class *vendor_class;
1262 * Was MAD registration request supplied
1263 * with original registration ?
1265 if (!agent_priv->reg_req) {
1269 port_priv = agent_priv->qp_info->port_priv;
1270 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1271 class = port_priv->version[
1272 agent_priv->reg_req->mgmt_class_version].class;
1276 method = class->method_table[mgmt_class];
1278 /* Remove any methods for this mad agent */
1279 remove_methods_mad_agent(method, agent_priv);
1280 /* Now, check to see if there are any methods still in use */
1281 if (!check_method_table(method)) {
1282 /* If not, release management method table */
1284 class->method_table[mgmt_class] = NULL;
1285 /* Any management classes left ? */
1286 if (!check_class_table(class)) {
1287 /* If not, release management class table */
1290 agent_priv->reg_req->
1291 mgmt_class_version].class = NULL;
1297 if (!is_vendor_class(mgmt_class))
1300 /* normalize mgmt_class to vendor range 2 */
1301 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1302 vendor = port_priv->version[
1303 agent_priv->reg_req->mgmt_class_version].vendor;
1308 vendor_class = vendor->vendor_class[mgmt_class];
1310 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1313 method = vendor_class->method_table[index];
1315 /* Remove any methods for this mad agent */
1316 remove_methods_mad_agent(method, agent_priv);
1318 * Now, check to see if there are
1319 * any methods still in use
1321 if (!check_method_table(method)) {
1322 /* If not, release management method table */
1324 vendor_class->method_table[index] = NULL;
1325 memset(vendor_class->oui[index], 0, 3);
1326 /* Any OUIs left ? */
1327 if (!check_vendor_class(vendor_class)) {
1328 /* If not, release vendor class table */
1329 kfree(vendor_class);
1330 vendor->vendor_class[mgmt_class] = NULL;
1331 /* Any other vendor classes left ? */
1332 if (!check_vendor_table(vendor)) {
1335 agent_priv->reg_req->
1336 mgmt_class_version].
1348 static int response_mad(struct ib_mad *mad)
1350 /* Trap represses are responses although response bit is reset */
1351 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
1352 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
1355 static int solicited_mad(struct ib_mad *mad)
1357 /* CM MADs are never solicited */
1358 if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CM) {
1362 /* XXX: Determine whether MAD is using RMPP */
1364 /* Not using RMPP */
1365 /* Is this MAD a response to a previous MAD ? */
1366 return response_mad(mad);
1369 static struct ib_mad_agent_private *
1370 find_mad_agent(struct ib_mad_port_private *port_priv,
1374 struct ib_mad_agent_private *mad_agent = NULL;
1375 unsigned long flags;
1377 spin_lock_irqsave(&port_priv->reg_lock, flags);
1380 * Whether MAD was solicited determines type of routing to
1385 struct ib_mad_agent_private *entry;
1388 * Routing is based on high 32 bits of transaction ID
1391 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1392 list_for_each_entry(entry, &port_priv->agent_list,
1394 if (entry->agent.hi_tid == hi_tid) {
1400 struct ib_mad_mgmt_class_table *class;
1401 struct ib_mad_mgmt_method_table *method;
1402 struct ib_mad_mgmt_vendor_class_table *vendor;
1403 struct ib_mad_mgmt_vendor_class *vendor_class;
1404 struct ib_vendor_mad *vendor_mad;
1408 * Routing is based on version, class, and method
1409 * For "newer" vendor MADs, also based on OUI
1411 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1413 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1414 class = port_priv->version[
1415 mad->mad_hdr.class_version].class;
1418 method = class->method_table[convert_mgmt_class(
1419 mad->mad_hdr.mgmt_class)];
1421 mad_agent = method->agent[mad->mad_hdr.method &
1422 ~IB_MGMT_METHOD_RESP];
1424 vendor = port_priv->version[
1425 mad->mad_hdr.class_version].vendor;
1428 vendor_class = vendor->vendor_class[vendor_class_index(
1429 mad->mad_hdr.mgmt_class)];
1432 /* Find matching OUI */
1433 vendor_mad = (struct ib_vendor_mad *)mad;
1434 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1437 method = vendor_class->method_table[index];
1439 mad_agent = method->agent[mad->mad_hdr.method &
1440 ~IB_MGMT_METHOD_RESP];
1446 if (mad_agent->agent.recv_handler)
1447 atomic_inc(&mad_agent->refcount);
1449 printk(KERN_NOTICE PFX "No receive handler for client "
1451 &mad_agent->agent, port_priv->port_num);
1456 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1461 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1465 /* Make sure MAD base version is understood */
1466 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1467 printk(KERN_ERR PFX "MAD received with unsupported base "
1468 "version %d\n", mad->mad_hdr.base_version);
1472 /* Filter SMI packets sent to other than QP0 */
1473 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1474 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1478 /* Filter GSI packets sent to QP0 */
1488 * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet
1490 static struct ib_mad_private *
1491 reassemble_recv(struct ib_mad_agent_private *mad_agent_priv,
1492 struct ib_mad_private *recv)
1494 /* Until we have RMPP, all receives are reassembled!... */
1495 INIT_LIST_HEAD(&recv->header.recv_wc.recv_buf.list);
1499 static struct ib_mad_send_wr_private*
1500 find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1503 struct ib_mad_send_wr_private *mad_send_wr;
1505 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1507 if (mad_send_wr->tid == tid)
1512 * It's possible to receive the response before we've
1513 * been notified that the send has completed
1515 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1517 if (mad_send_wr->tid == tid && mad_send_wr->timeout) {
1518 /* Verify request has not been canceled */
1519 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1526 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1527 struct ib_mad_private *recv,
1530 struct ib_mad_send_wr_private *mad_send_wr;
1531 struct ib_mad_send_wc mad_send_wc;
1532 unsigned long flags;
1534 /* Fully reassemble receive before processing */
1535 recv = reassemble_recv(mad_agent_priv, recv);
1537 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1538 wake_up(&mad_agent_priv->wait);
1542 /* Complete corresponding request */
1544 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1545 mad_send_wr = find_send_req(mad_agent_priv,
1546 recv->mad.mad.mad_hdr.tid);
1548 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1549 ib_free_recv_mad(&recv->header.recv_wc);
1550 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1551 wake_up(&mad_agent_priv->wait);
1554 /* Timeout = 0 means that we won't wait for a response */
1555 mad_send_wr->timeout = 0;
1556 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1558 /* Defined behavior is to complete response before request */
1559 recv->header.recv_wc.wc->wr_id = mad_send_wr->wr_id;
1560 mad_agent_priv->agent.recv_handler(
1561 &mad_agent_priv->agent,
1562 &recv->header.recv_wc);
1563 atomic_dec(&mad_agent_priv->refcount);
1565 mad_send_wc.status = IB_WC_SUCCESS;
1566 mad_send_wc.vendor_err = 0;
1567 mad_send_wc.wr_id = mad_send_wr->wr_id;
1568 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1570 mad_agent_priv->agent.recv_handler(
1571 &mad_agent_priv->agent,
1572 &recv->header.recv_wc);
1573 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1574 wake_up(&mad_agent_priv->wait);
1578 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1581 struct ib_mad_qp_info *qp_info;
1582 struct ib_mad_private_header *mad_priv_hdr;
1583 struct ib_mad_private *recv, *response;
1584 struct ib_mad_list_head *mad_list;
1585 struct ib_mad_agent_private *mad_agent;
1588 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1590 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1591 "for response buffer\n");
1593 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1594 qp_info = mad_list->mad_queue->qp_info;
1595 dequeue_mad(mad_list);
1597 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1599 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1600 dma_unmap_single(port_priv->device->dma_device,
1601 pci_unmap_addr(&recv->header, mapping),
1602 sizeof(struct ib_mad_private) -
1603 sizeof(struct ib_mad_private_header),
1606 /* Setup MAD receive work completion from "normal" work completion */
1607 recv->header.wc = *wc;
1608 recv->header.recv_wc.wc = &recv->header.wc;
1609 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1610 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1611 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1613 if (atomic_read(&qp_info->snoop_count))
1614 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1617 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1620 if (recv->mad.mad.mad_hdr.mgmt_class ==
1621 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1622 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1623 port_priv->device->node_type,
1624 port_priv->port_num,
1625 port_priv->device->phys_port_cnt))
1627 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1629 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1630 port_priv->device->node_type,
1631 port_priv->port_num))
1633 if (!smi_check_local_dr_smp(&recv->mad.smp,
1635 port_priv->port_num))
1640 /* Give driver "right of first refusal" on incoming MAD */
1641 if (port_priv->device->process_mad) {
1645 printk(KERN_ERR PFX "No memory for response MAD\n");
1647 * Is it better to assume that
1648 * it wouldn't be processed ?
1653 ret = port_priv->device->process_mad(port_priv->device, 0,
1654 port_priv->port_num,
1657 &response->mad.mad);
1658 if (ret & IB_MAD_RESULT_SUCCESS) {
1659 if (ret & IB_MAD_RESULT_CONSUMED)
1661 if (ret & IB_MAD_RESULT_REPLY) {
1663 if (!agent_send(response, &recv->grh, wc,
1665 port_priv->port_num))
1672 /* Determine corresponding MAD agent for incoming receive MAD */
1673 solicited = solicited_mad(&recv->mad.mad);
1674 mad_agent = find_mad_agent(port_priv, &recv->mad.mad, solicited);
1676 ib_mad_complete_recv(mad_agent, recv, solicited);
1678 * recv is freed up in error cases in ib_mad_complete_recv
1679 * or via recv_handler in ib_mad_complete_recv()
1685 /* Post another receive request for this QP */
1687 ib_mad_post_receive_mads(qp_info, response);
1689 kmem_cache_free(ib_mad_cache, recv);
1691 ib_mad_post_receive_mads(qp_info, recv);
1694 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1696 struct ib_mad_send_wr_private *mad_send_wr;
1697 unsigned long delay;
1699 if (list_empty(&mad_agent_priv->wait_list)) {
1700 cancel_delayed_work(&mad_agent_priv->timed_work);
1702 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1703 struct ib_mad_send_wr_private,
1706 if (time_after(mad_agent_priv->timeout,
1707 mad_send_wr->timeout)) {
1708 mad_agent_priv->timeout = mad_send_wr->timeout;
1709 cancel_delayed_work(&mad_agent_priv->timed_work);
1710 delay = mad_send_wr->timeout - jiffies;
1711 if ((long)delay <= 0)
1713 queue_delayed_work(mad_agent_priv->qp_info->
1715 &mad_agent_priv->timed_work, delay);
1720 static void wait_for_response(struct ib_mad_agent_private *mad_agent_priv,
1721 struct ib_mad_send_wr_private *mad_send_wr )
1723 struct ib_mad_send_wr_private *temp_mad_send_wr;
1724 struct list_head *list_item;
1725 unsigned long delay;
1727 list_del(&mad_send_wr->agent_list);
1729 delay = mad_send_wr->timeout;
1730 mad_send_wr->timeout += jiffies;
1732 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1733 temp_mad_send_wr = list_entry(list_item,
1734 struct ib_mad_send_wr_private,
1736 if (time_after(mad_send_wr->timeout,
1737 temp_mad_send_wr->timeout))
1740 list_add(&mad_send_wr->agent_list, list_item);
1742 /* Reschedule a work item if we have a shorter timeout */
1743 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1744 cancel_delayed_work(&mad_agent_priv->timed_work);
1745 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1746 &mad_agent_priv->timed_work, delay);
1751 * Process a send work completion
1753 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1754 struct ib_mad_send_wc *mad_send_wc)
1756 struct ib_mad_agent_private *mad_agent_priv;
1757 unsigned long flags;
1759 mad_agent_priv = container_of(mad_send_wr->agent,
1760 struct ib_mad_agent_private, agent);
1762 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1763 if (mad_send_wc->status != IB_WC_SUCCESS &&
1764 mad_send_wr->status == IB_WC_SUCCESS) {
1765 mad_send_wr->status = mad_send_wc->status;
1766 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1769 if (--mad_send_wr->refcount > 0) {
1770 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1771 mad_send_wr->status == IB_WC_SUCCESS) {
1772 wait_for_response(mad_agent_priv, mad_send_wr);
1774 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1778 /* Remove send from MAD agent and notify client of completion */
1779 list_del(&mad_send_wr->agent_list);
1780 adjust_timeout(mad_agent_priv);
1781 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1783 if (mad_send_wr->status != IB_WC_SUCCESS )
1784 mad_send_wc->status = mad_send_wr->status;
1785 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1788 /* Release reference on agent taken when sending */
1789 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1790 wake_up(&mad_agent_priv->wait);
1795 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1798 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1799 struct ib_mad_list_head *mad_list;
1800 struct ib_mad_qp_info *qp_info;
1801 struct ib_mad_queue *send_queue;
1802 struct ib_send_wr *bad_send_wr;
1803 unsigned long flags;
1806 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1807 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1809 send_queue = mad_list->mad_queue;
1810 qp_info = send_queue->qp_info;
1813 queued_send_wr = NULL;
1814 spin_lock_irqsave(&send_queue->lock, flags);
1815 list_del(&mad_list->list);
1817 /* Move queued send to the send queue */
1818 if (send_queue->count-- > send_queue->max_active) {
1819 mad_list = container_of(qp_info->overflow_list.next,
1820 struct ib_mad_list_head, list);
1821 queued_send_wr = container_of(mad_list,
1822 struct ib_mad_send_wr_private,
1824 list_del(&mad_list->list);
1825 list_add_tail(&mad_list->list, &send_queue->list);
1827 spin_unlock_irqrestore(&send_queue->lock, flags);
1829 /* Restore client wr_id in WC and complete send */
1830 wc->wr_id = mad_send_wr->wr_id;
1831 if (atomic_read(&qp_info->snoop_count))
1832 snoop_send(qp_info, &mad_send_wr->send_wr,
1833 (struct ib_mad_send_wc *)wc,
1834 IB_MAD_SNOOP_SEND_COMPLETIONS);
1835 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
1837 if (queued_send_wr) {
1838 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1841 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1842 mad_send_wr = queued_send_wr;
1843 wc->status = IB_WC_LOC_QP_OP_ERR;
1849 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1851 struct ib_mad_send_wr_private *mad_send_wr;
1852 struct ib_mad_list_head *mad_list;
1853 unsigned long flags;
1855 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1856 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1857 mad_send_wr = container_of(mad_list,
1858 struct ib_mad_send_wr_private,
1860 mad_send_wr->retry = 1;
1862 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1865 static void mad_error_handler(struct ib_mad_port_private *port_priv,
1868 struct ib_mad_list_head *mad_list;
1869 struct ib_mad_qp_info *qp_info;
1870 struct ib_mad_send_wr_private *mad_send_wr;
1873 /* Determine if failure was a send or receive */
1874 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1875 qp_info = mad_list->mad_queue->qp_info;
1876 if (mad_list->mad_queue == &qp_info->recv_queue)
1878 * Receive errors indicate that the QP has entered the error
1879 * state - error handling/shutdown code will cleanup
1884 * Send errors will transition the QP to SQE - move
1885 * QP to RTS and repost flushed work requests
1887 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1889 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1890 if (mad_send_wr->retry) {
1892 struct ib_send_wr *bad_send_wr;
1894 mad_send_wr->retry = 0;
1895 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1898 ib_mad_send_done_handler(port_priv, wc);
1900 ib_mad_send_done_handler(port_priv, wc);
1902 struct ib_qp_attr *attr;
1904 /* Transition QP to RTS and fail offending send */
1905 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1907 attr->qp_state = IB_QPS_RTS;
1908 attr->cur_qp_state = IB_QPS_SQE;
1909 ret = ib_modify_qp(qp_info->qp, attr,
1910 IB_QP_STATE | IB_QP_CUR_STATE);
1913 printk(KERN_ERR PFX "mad_error_handler - "
1914 "ib_modify_qp to RTS : %d\n", ret);
1916 mark_sends_for_retry(qp_info);
1918 ib_mad_send_done_handler(port_priv, wc);
1923 * IB MAD completion callback
1925 static void ib_mad_completion_handler(void *data)
1927 struct ib_mad_port_private *port_priv;
1930 port_priv = (struct ib_mad_port_private *)data;
1931 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1933 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1934 if (wc.status == IB_WC_SUCCESS) {
1935 switch (wc.opcode) {
1937 ib_mad_send_done_handler(port_priv, &wc);
1940 ib_mad_recv_done_handler(port_priv, &wc);
1947 mad_error_handler(port_priv, &wc);
1951 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1953 unsigned long flags;
1954 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
1955 struct ib_mad_send_wc mad_send_wc;
1956 struct list_head cancel_list;
1958 INIT_LIST_HEAD(&cancel_list);
1960 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1961 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
1962 &mad_agent_priv->send_list, agent_list) {
1963 if (mad_send_wr->status == IB_WC_SUCCESS) {
1964 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1965 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1969 /* Empty wait list to prevent receives from finding a request */
1970 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
1971 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1973 /* Report all cancelled requests */
1974 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1975 mad_send_wc.vendor_err = 0;
1977 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
1978 &cancel_list, agent_list) {
1979 mad_send_wc.wr_id = mad_send_wr->wr_id;
1980 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1983 list_del(&mad_send_wr->agent_list);
1985 atomic_dec(&mad_agent_priv->refcount);
1989 static struct ib_mad_send_wr_private*
1990 find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
1993 struct ib_mad_send_wr_private *mad_send_wr;
1995 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1997 if (mad_send_wr->wr_id == wr_id)
2001 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2003 if (mad_send_wr->wr_id == wr_id)
2009 void cancel_sends(void *data)
2011 struct ib_mad_agent_private *mad_agent_priv;
2012 struct ib_mad_send_wr_private *mad_send_wr;
2013 struct ib_mad_send_wc mad_send_wc;
2014 unsigned long flags;
2016 mad_agent_priv = data;
2018 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2019 mad_send_wc.vendor_err = 0;
2021 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2022 while (!list_empty(&mad_agent_priv->canceled_list)) {
2023 mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
2024 struct ib_mad_send_wr_private,
2027 list_del(&mad_send_wr->agent_list);
2028 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2030 mad_send_wc.wr_id = mad_send_wr->wr_id;
2031 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2035 if (atomic_dec_and_test(&mad_agent_priv->refcount))
2036 wake_up(&mad_agent_priv->wait);
2037 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2039 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2042 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2045 struct ib_mad_agent_private *mad_agent_priv;
2046 struct ib_mad_send_wr_private *mad_send_wr;
2047 unsigned long flags;
2049 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2051 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2052 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2054 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2058 if (mad_send_wr->status == IB_WC_SUCCESS)
2059 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2061 if (mad_send_wr->refcount != 0) {
2062 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2063 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2067 list_del(&mad_send_wr->agent_list);
2068 list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list);
2069 adjust_timeout(mad_agent_priv);
2070 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2072 queue_work(mad_agent_priv->qp_info->port_priv->wq,
2073 &mad_agent_priv->canceled_work);
2077 EXPORT_SYMBOL(ib_cancel_mad);
2079 static void local_completions(void *data)
2081 struct ib_mad_agent_private *mad_agent_priv;
2082 struct ib_mad_local_private *local;
2083 struct ib_mad_agent_private *recv_mad_agent;
2084 unsigned long flags;
2086 struct ib_mad_send_wc mad_send_wc;
2088 mad_agent_priv = (struct ib_mad_agent_private *)data;
2090 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2091 while (!list_empty(&mad_agent_priv->local_list)) {
2092 local = list_entry(mad_agent_priv->local_list.next,
2093 struct ib_mad_local_private,
2095 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2096 if (local->mad_priv) {
2097 recv_mad_agent = local->recv_mad_agent;
2098 if (!recv_mad_agent) {
2099 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2100 kmem_cache_free(ib_mad_cache, local->mad_priv);
2101 goto local_send_completion;
2105 * Defined behavior is to complete response
2108 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
2110 recv_mad_agent->agent.port_num, &wc);
2112 local->mad_priv->header.recv_wc.wc = &wc;
2113 local->mad_priv->header.recv_wc.mad_len =
2114 sizeof(struct ib_mad);
2115 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list);
2116 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2117 local->mad_priv->header.recv_wc.recv_buf.mad =
2118 &local->mad_priv->mad.mad;
2119 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2120 snoop_recv(recv_mad_agent->qp_info,
2121 &local->mad_priv->header.recv_wc,
2122 IB_MAD_SNOOP_RECVS);
2123 recv_mad_agent->agent.recv_handler(
2124 &recv_mad_agent->agent,
2125 &local->mad_priv->header.recv_wc);
2126 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2127 atomic_dec(&recv_mad_agent->refcount);
2128 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2131 local_send_completion:
2133 mad_send_wc.status = IB_WC_SUCCESS;
2134 mad_send_wc.vendor_err = 0;
2135 mad_send_wc.wr_id = local->wr_id;
2136 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2137 snoop_send(mad_agent_priv->qp_info, &local->send_wr,
2139 IB_MAD_SNOOP_SEND_COMPLETIONS);
2140 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2143 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2144 list_del(&local->completion_list);
2145 atomic_dec(&mad_agent_priv->refcount);
2148 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2151 static void timeout_sends(void *data)
2153 struct ib_mad_agent_private *mad_agent_priv;
2154 struct ib_mad_send_wr_private *mad_send_wr;
2155 struct ib_mad_send_wc mad_send_wc;
2156 unsigned long flags, delay;
2158 mad_agent_priv = (struct ib_mad_agent_private *)data;
2160 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2161 mad_send_wc.vendor_err = 0;
2163 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2164 while (!list_empty(&mad_agent_priv->wait_list)) {
2165 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2166 struct ib_mad_send_wr_private,
2169 if (time_after(mad_send_wr->timeout, jiffies)) {
2170 delay = mad_send_wr->timeout - jiffies;
2171 if ((long)delay <= 0)
2173 queue_delayed_work(mad_agent_priv->qp_info->
2175 &mad_agent_priv->timed_work, delay);
2179 list_del(&mad_send_wr->agent_list);
2180 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2182 mad_send_wc.wr_id = mad_send_wr->wr_id;
2183 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2187 atomic_dec(&mad_agent_priv->refcount);
2188 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2190 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2193 static void ib_mad_thread_completion_handler(struct ib_cq *cq)
2195 struct ib_mad_port_private *port_priv = cq->cq_context;
2197 queue_work(port_priv->wq, &port_priv->work);
2201 * Allocate receive MADs and post receive WRs for them
2203 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2204 struct ib_mad_private *mad)
2206 unsigned long flags;
2208 struct ib_mad_private *mad_priv;
2209 struct ib_sge sg_list;
2210 struct ib_recv_wr recv_wr, *bad_recv_wr;
2211 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2213 /* Initialize common scatter list fields */
2214 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2215 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2217 /* Initialize common receive WR fields */
2218 recv_wr.next = NULL;
2219 recv_wr.sg_list = &sg_list;
2220 recv_wr.num_sge = 1;
2223 /* Allocate and map receive buffer */
2228 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2230 printk(KERN_ERR PFX "No memory for receive buffer\n");
2235 sg_list.addr = dma_map_single(qp_info->port_priv->
2239 sizeof mad_priv->header,
2241 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2242 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2243 mad_priv->header.mad_list.mad_queue = recv_queue;
2245 /* Post receive WR */
2246 spin_lock_irqsave(&recv_queue->lock, flags);
2247 post = (++recv_queue->count < recv_queue->max_active);
2248 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2249 spin_unlock_irqrestore(&recv_queue->lock, flags);
2250 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2252 spin_lock_irqsave(&recv_queue->lock, flags);
2253 list_del(&mad_priv->header.mad_list.list);
2254 recv_queue->count--;
2255 spin_unlock_irqrestore(&recv_queue->lock, flags);
2256 dma_unmap_single(qp_info->port_priv->device->dma_device,
2257 pci_unmap_addr(&mad_priv->header,
2260 sizeof mad_priv->header,
2262 kmem_cache_free(ib_mad_cache, mad_priv);
2263 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2272 * Return all the posted receive MADs
2274 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2276 struct ib_mad_private_header *mad_priv_hdr;
2277 struct ib_mad_private *recv;
2278 struct ib_mad_list_head *mad_list;
2280 while (!list_empty(&qp_info->recv_queue.list)) {
2282 mad_list = list_entry(qp_info->recv_queue.list.next,
2283 struct ib_mad_list_head, list);
2284 mad_priv_hdr = container_of(mad_list,
2285 struct ib_mad_private_header,
2287 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2290 /* Remove from posted receive MAD list */
2291 list_del(&mad_list->list);
2293 dma_unmap_single(qp_info->port_priv->device->dma_device,
2294 pci_unmap_addr(&recv->header, mapping),
2295 sizeof(struct ib_mad_private) -
2296 sizeof(struct ib_mad_private_header),
2298 kmem_cache_free(ib_mad_cache, recv);
2301 qp_info->recv_queue.count = 0;
2307 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2310 struct ib_qp_attr *attr;
2313 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2315 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2319 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2320 qp = port_priv->qp_info[i].qp;
2322 * PKey index for QP1 is irrelevant but
2323 * one is needed for the Reset to Init transition
2325 attr->qp_state = IB_QPS_INIT;
2326 attr->pkey_index = 0;
2327 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2328 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2329 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2331 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2332 "INIT: %d\n", i, ret);
2336 attr->qp_state = IB_QPS_RTR;
2337 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2339 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2340 "RTR: %d\n", i, ret);
2344 attr->qp_state = IB_QPS_RTS;
2345 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2346 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2348 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2349 "RTS: %d\n", i, ret);
2354 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2356 printk(KERN_ERR PFX "Failed to request completion "
2357 "notification: %d\n", ret);
2361 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2362 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2364 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2373 static void qp_event_handler(struct ib_event *event, void *qp_context)
2375 struct ib_mad_qp_info *qp_info = qp_context;
2377 /* It's worse than that! He's dead, Jim! */
2378 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2379 event->event, qp_info->qp->qp_num);
2382 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2383 struct ib_mad_queue *mad_queue)
2385 mad_queue->qp_info = qp_info;
2386 mad_queue->count = 0;
2387 spin_lock_init(&mad_queue->lock);
2388 INIT_LIST_HEAD(&mad_queue->list);
2391 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2392 struct ib_mad_qp_info *qp_info)
2394 qp_info->port_priv = port_priv;
2395 init_mad_queue(qp_info, &qp_info->send_queue);
2396 init_mad_queue(qp_info, &qp_info->recv_queue);
2397 INIT_LIST_HEAD(&qp_info->overflow_list);
2398 spin_lock_init(&qp_info->snoop_lock);
2399 qp_info->snoop_table = NULL;
2400 qp_info->snoop_table_size = 0;
2401 atomic_set(&qp_info->snoop_count, 0);
2404 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2405 enum ib_qp_type qp_type)
2407 struct ib_qp_init_attr qp_init_attr;
2410 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2411 qp_init_attr.send_cq = qp_info->port_priv->cq;
2412 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2413 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2414 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2415 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2416 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2417 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2418 qp_init_attr.qp_type = qp_type;
2419 qp_init_attr.port_num = qp_info->port_priv->port_num;
2420 qp_init_attr.qp_context = qp_info;
2421 qp_init_attr.event_handler = qp_event_handler;
2422 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2423 if (IS_ERR(qp_info->qp)) {
2424 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2425 get_spl_qp_index(qp_type));
2426 ret = PTR_ERR(qp_info->qp);
2429 /* Use minimum queue sizes unless the CQ is resized */
2430 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2431 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2438 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2440 ib_destroy_qp(qp_info->qp);
2441 if (qp_info->snoop_table)
2442 kfree(qp_info->snoop_table);
2447 * Create the QP, PD, MR, and CQ if needed
2449 static int ib_mad_port_open(struct ib_device *device,
2453 struct ib_mad_port_private *port_priv;
2454 unsigned long flags;
2455 char name[sizeof "ib_mad123"];
2457 /* First, check if port already open at MAD layer */
2458 port_priv = ib_get_mad_port(device, port_num);
2460 printk(KERN_DEBUG PFX "%s port %d already open\n",
2461 device->name, port_num);
2465 /* Create new device info */
2466 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2468 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2471 memset(port_priv, 0, sizeof *port_priv);
2472 port_priv->device = device;
2473 port_priv->port_num = port_num;
2474 spin_lock_init(&port_priv->reg_lock);
2475 INIT_LIST_HEAD(&port_priv->agent_list);
2476 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2477 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2479 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2480 port_priv->cq = ib_create_cq(port_priv->device,
2482 ib_mad_thread_completion_handler,
2483 NULL, port_priv, cq_size);
2484 if (IS_ERR(port_priv->cq)) {
2485 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2486 ret = PTR_ERR(port_priv->cq);
2490 port_priv->pd = ib_alloc_pd(device);
2491 if (IS_ERR(port_priv->pd)) {
2492 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2493 ret = PTR_ERR(port_priv->pd);
2497 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2498 if (IS_ERR(port_priv->mr)) {
2499 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2500 ret = PTR_ERR(port_priv->mr);
2504 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2507 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2511 snprintf(name, sizeof name, "ib_mad%d", port_num);
2512 port_priv->wq = create_singlethread_workqueue(name);
2513 if (!port_priv->wq) {
2517 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2519 ret = ib_mad_port_start(port_priv);
2521 printk(KERN_ERR PFX "Couldn't start port\n");
2525 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2526 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2527 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2531 destroy_workqueue(port_priv->wq);
2533 destroy_mad_qp(&port_priv->qp_info[1]);
2535 destroy_mad_qp(&port_priv->qp_info[0]);
2537 ib_dereg_mr(port_priv->mr);
2539 ib_dealloc_pd(port_priv->pd);
2541 ib_destroy_cq(port_priv->cq);
2542 cleanup_recv_queue(&port_priv->qp_info[1]);
2543 cleanup_recv_queue(&port_priv->qp_info[0]);
2552 * If there are no classes using the port, free the port
2553 * resources (CQ, MR, PD, QP) and remove the port's info structure
2555 static int ib_mad_port_close(struct ib_device *device, int port_num)
2557 struct ib_mad_port_private *port_priv;
2558 unsigned long flags;
2560 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2561 port_priv = __ib_get_mad_port(device, port_num);
2562 if (port_priv == NULL) {
2563 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2564 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2567 list_del(&port_priv->port_list);
2568 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2570 /* Stop processing completions. */
2571 flush_workqueue(port_priv->wq);
2572 destroy_workqueue(port_priv->wq);
2573 destroy_mad_qp(&port_priv->qp_info[1]);
2574 destroy_mad_qp(&port_priv->qp_info[0]);
2575 ib_dereg_mr(port_priv->mr);
2576 ib_dealloc_pd(port_priv->pd);
2577 ib_destroy_cq(port_priv->cq);
2578 cleanup_recv_queue(&port_priv->qp_info[1]);
2579 cleanup_recv_queue(&port_priv->qp_info[0]);
2580 /* XXX: Handle deallocation of MAD registration tables */
2587 static void ib_mad_init_device(struct ib_device *device)
2589 int ret, num_ports, cur_port, i, ret2;
2591 if (device->node_type == IB_NODE_SWITCH) {
2595 num_ports = device->phys_port_cnt;
2598 for (i = 0; i < num_ports; i++, cur_port++) {
2599 ret = ib_mad_port_open(device, cur_port);
2601 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2602 device->name, cur_port);
2603 goto error_device_open;
2605 ret = ib_agent_port_open(device, cur_port);
2607 printk(KERN_ERR PFX "Couldn't open %s port %d "
2609 device->name, cur_port);
2610 goto error_device_open;
2614 goto error_device_query;
2619 ret2 = ib_agent_port_close(device, cur_port);
2621 printk(KERN_ERR PFX "Couldn't close %s port %d "
2623 device->name, cur_port);
2625 ret2 = ib_mad_port_close(device, cur_port);
2627 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2628 device->name, cur_port);
2637 static void ib_mad_remove_device(struct ib_device *device)
2639 int ret = 0, i, num_ports, cur_port, ret2;
2641 if (device->node_type == IB_NODE_SWITCH) {
2645 num_ports = device->phys_port_cnt;
2648 for (i = 0; i < num_ports; i++, cur_port++) {
2649 ret2 = ib_agent_port_close(device, cur_port);
2651 printk(KERN_ERR PFX "Couldn't close %s port %d "
2653 device->name, cur_port);
2657 ret2 = ib_mad_port_close(device, cur_port);
2659 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2660 device->name, cur_port);
2667 static struct ib_client mad_client = {
2669 .add = ib_mad_init_device,
2670 .remove = ib_mad_remove_device
2673 static int __init ib_mad_init_module(void)
2677 spin_lock_init(&ib_mad_port_list_lock);
2678 spin_lock_init(&ib_agent_port_list_lock);
2680 ib_mad_cache = kmem_cache_create("ib_mad",
2681 sizeof(struct ib_mad_private),
2686 if (!ib_mad_cache) {
2687 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2692 INIT_LIST_HEAD(&ib_mad_port_list);
2694 if (ib_register_client(&mad_client)) {
2695 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2703 kmem_cache_destroy(ib_mad_cache);
2708 static void __exit ib_mad_cleanup_module(void)
2710 ib_unregister_client(&mad_client);
2712 if (kmem_cache_destroy(ib_mad_cache)) {
2713 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2717 module_init(ib_mad_init_module);
2718 module_exit(ib_mad_cleanup_module);