2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
36 #include <linux/dma-mapping.h>
37 #include <rdma/ib_cache.h>
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_DESCRIPTION("kernel IB MAD API");
46 MODULE_AUTHOR("Hal Rosenstock");
47 MODULE_AUTHOR("Sean Hefty");
49 static kmem_cache_t *ib_mad_cache;
51 static struct list_head ib_mad_port_list;
52 static u32 ib_mad_client_id = 0;
55 static spinlock_t ib_mad_port_list_lock;
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table **method,
60 struct ib_mad_reg_req *mad_reg_req);
61 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
62 static struct ib_mad_agent_private *find_mad_agent(
63 struct ib_mad_port_private *port_priv,
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad);
67 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68 static void timeout_sends(void *data);
69 static void local_completions(void *data);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv,
73 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74 struct ib_mad_agent_private *agent_priv);
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
80 static inline struct ib_mad_port_private *
81 __ib_get_mad_port(struct ib_device *device, int port_num)
83 struct ib_mad_port_private *entry;
85 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86 if (entry->device == device && entry->port_num == port_num)
93 * Wrapper function to return a ib_mad_port_private structure or NULL
96 static inline struct ib_mad_port_private *
97 ib_get_mad_port(struct ib_device *device, int port_num)
99 struct ib_mad_port_private *entry;
102 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103 entry = __ib_get_mad_port(device, port_num);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
109 static inline u8 convert_mgmt_class(u8 mgmt_class)
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
116 static int get_spl_qp_index(enum ib_qp_type qp_type)
129 static int vendor_class_index(u8 mgmt_class)
131 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
134 static int is_vendor_class(u8 mgmt_class)
136 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
142 static int is_vendor_oui(char *oui)
144 if (oui[0] || oui[1] || oui[2])
149 static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class *vendor_class,
151 struct ib_mad_reg_req *mad_reg_req)
153 struct ib_mad_mgmt_method_table *method;
156 for (i = 0; i < MAX_MGMT_OUI; i++) {
157 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158 method = vendor_class->method_table[i];
160 if (method_in_use(&method, mad_reg_req))
171 * ib_register_mad_agent - Register to send/receive MADs
173 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
175 enum ib_qp_type qp_type,
176 struct ib_mad_reg_req *mad_reg_req,
178 ib_mad_send_handler send_handler,
179 ib_mad_recv_handler recv_handler,
182 struct ib_mad_port_private *port_priv;
183 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184 struct ib_mad_agent_private *mad_agent_priv;
185 struct ib_mad_reg_req *reg_req = NULL;
186 struct ib_mad_mgmt_class_table *class;
187 struct ib_mad_mgmt_vendor_class_table *vendor;
188 struct ib_mad_mgmt_vendor_class *vendor_class;
189 struct ib_mad_mgmt_method_table *method;
192 u8 mgmt_class, vclass;
194 /* Validate parameters */
195 qpn = get_spl_qp_index(qp_type);
199 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
202 /* Validate MAD registration request if supplied */
204 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
208 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
213 if (mad_reg_req->mgmt_class !=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
216 } else if (mad_reg_req->mgmt_class == 0) {
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
222 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
227 if (!is_vendor_oui(mad_reg_req->oui))
230 /* Make sure class supplied is consistent with RMPP */
231 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
235 /* Make sure class supplied is consistent with QP type */
236 if (qp_type == IB_QPT_SMI) {
237 if ((mad_reg_req->mgmt_class !=
238 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
239 (mad_reg_req->mgmt_class !=
240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
243 if ((mad_reg_req->mgmt_class ==
244 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
245 (mad_reg_req->mgmt_class ==
246 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
250 /* No registration request supplied */
255 /* Validate device and port */
256 port_priv = ib_get_mad_port(device, port_num);
258 ret = ERR_PTR(-ENODEV);
262 /* Allocate structures */
263 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
264 if (!mad_agent_priv) {
265 ret = ERR_PTR(-ENOMEM);
269 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
270 IB_ACCESS_LOCAL_WRITE);
271 if (IS_ERR(mad_agent_priv->agent.mr)) {
272 ret = ERR_PTR(-ENOMEM);
277 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
279 ret = ERR_PTR(-ENOMEM);
282 /* Make a copy of the MAD registration request */
283 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
286 /* Now, fill in the various structures */
287 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
288 mad_agent_priv->reg_req = reg_req;
289 mad_agent_priv->agent.rmpp_version = rmpp_version;
290 mad_agent_priv->agent.device = device;
291 mad_agent_priv->agent.recv_handler = recv_handler;
292 mad_agent_priv->agent.send_handler = send_handler;
293 mad_agent_priv->agent.context = context;
294 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
295 mad_agent_priv->agent.port_num = port_num;
297 spin_lock_irqsave(&port_priv->reg_lock, flags);
298 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
301 * Make sure MAD registration (if supplied)
302 * is non overlapping with any existing ones
305 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
306 if (!is_vendor_class(mgmt_class)) {
307 class = port_priv->version[mad_reg_req->
308 mgmt_class_version].class;
310 method = class->method_table[mgmt_class];
312 if (method_in_use(&method,
317 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
320 /* "New" vendor class range */
321 vendor = port_priv->version[mad_reg_req->
322 mgmt_class_version].vendor;
324 vclass = vendor_class_index(mgmt_class);
325 vendor_class = vendor->vendor_class[vclass];
327 if (is_vendor_method_in_use(
333 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
341 /* Add mad agent into port's agent list */
342 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
343 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
345 spin_lock_init(&mad_agent_priv->lock);
346 INIT_LIST_HEAD(&mad_agent_priv->send_list);
347 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
348 INIT_LIST_HEAD(&mad_agent_priv->done_list);
349 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
350 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
351 INIT_LIST_HEAD(&mad_agent_priv->local_list);
352 INIT_WORK(&mad_agent_priv->local_work, local_completions,
354 atomic_set(&mad_agent_priv->refcount, 1);
355 init_completion(&mad_agent_priv->comp);
357 return &mad_agent_priv->agent;
360 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
363 ib_dereg_mr(mad_agent_priv->agent.mr);
365 kfree(mad_agent_priv);
369 EXPORT_SYMBOL(ib_register_mad_agent);
371 static inline int is_snooping_sends(int mad_snoop_flags)
373 return (mad_snoop_flags &
374 (/*IB_MAD_SNOOP_POSTED_SENDS |
375 IB_MAD_SNOOP_RMPP_SENDS |*/
376 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
377 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
380 static inline int is_snooping_recvs(int mad_snoop_flags)
382 return (mad_snoop_flags &
383 (IB_MAD_SNOOP_RECVS /*|
384 IB_MAD_SNOOP_RMPP_RECVS*/));
387 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
388 struct ib_mad_snoop_private *mad_snoop_priv)
390 struct ib_mad_snoop_private **new_snoop_table;
394 spin_lock_irqsave(&qp_info->snoop_lock, flags);
395 /* Check for empty slot in array. */
396 for (i = 0; i < qp_info->snoop_table_size; i++)
397 if (!qp_info->snoop_table[i])
400 if (i == qp_info->snoop_table_size) {
402 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
403 qp_info->snoop_table_size + 1,
405 if (!new_snoop_table) {
409 if (qp_info->snoop_table) {
410 memcpy(new_snoop_table, qp_info->snoop_table,
411 sizeof mad_snoop_priv *
412 qp_info->snoop_table_size);
413 kfree(qp_info->snoop_table);
415 qp_info->snoop_table = new_snoop_table;
416 qp_info->snoop_table_size++;
418 qp_info->snoop_table[i] = mad_snoop_priv;
419 atomic_inc(&qp_info->snoop_count);
421 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
425 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
427 enum ib_qp_type qp_type,
429 ib_mad_snoop_handler snoop_handler,
430 ib_mad_recv_handler recv_handler,
433 struct ib_mad_port_private *port_priv;
434 struct ib_mad_agent *ret;
435 struct ib_mad_snoop_private *mad_snoop_priv;
438 /* Validate parameters */
439 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
440 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
441 ret = ERR_PTR(-EINVAL);
444 qpn = get_spl_qp_index(qp_type);
446 ret = ERR_PTR(-EINVAL);
449 port_priv = ib_get_mad_port(device, port_num);
451 ret = ERR_PTR(-ENODEV);
454 /* Allocate structures */
455 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
456 if (!mad_snoop_priv) {
457 ret = ERR_PTR(-ENOMEM);
461 /* Now, fill in the various structures */
462 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
463 mad_snoop_priv->agent.device = device;
464 mad_snoop_priv->agent.recv_handler = recv_handler;
465 mad_snoop_priv->agent.snoop_handler = snoop_handler;
466 mad_snoop_priv->agent.context = context;
467 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
468 mad_snoop_priv->agent.port_num = port_num;
469 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
470 init_completion(&mad_snoop_priv->comp);
471 mad_snoop_priv->snoop_index = register_snoop_agent(
472 &port_priv->qp_info[qpn],
474 if (mad_snoop_priv->snoop_index < 0) {
475 ret = ERR_PTR(mad_snoop_priv->snoop_index);
479 atomic_set(&mad_snoop_priv->refcount, 1);
480 return &mad_snoop_priv->agent;
483 kfree(mad_snoop_priv);
487 EXPORT_SYMBOL(ib_register_mad_snoop);
489 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
491 if (atomic_dec_and_test(&mad_agent_priv->refcount))
492 complete(&mad_agent_priv->comp);
495 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
497 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
498 complete(&mad_snoop_priv->comp);
501 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
503 struct ib_mad_port_private *port_priv;
506 /* Note that we could still be handling received MADs */
509 * Canceling all sends results in dropping received response
510 * MADs, preventing us from queuing additional work
512 cancel_mads(mad_agent_priv);
513 port_priv = mad_agent_priv->qp_info->port_priv;
514 cancel_delayed_work(&mad_agent_priv->timed_work);
516 spin_lock_irqsave(&port_priv->reg_lock, flags);
517 remove_mad_reg_req(mad_agent_priv);
518 list_del(&mad_agent_priv->agent_list);
519 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
521 flush_workqueue(port_priv->wq);
522 ib_cancel_rmpp_recvs(mad_agent_priv);
524 deref_mad_agent(mad_agent_priv);
525 wait_for_completion(&mad_agent_priv->comp);
527 kfree(mad_agent_priv->reg_req);
528 ib_dereg_mr(mad_agent_priv->agent.mr);
529 kfree(mad_agent_priv);
532 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
534 struct ib_mad_qp_info *qp_info;
537 qp_info = mad_snoop_priv->qp_info;
538 spin_lock_irqsave(&qp_info->snoop_lock, flags);
539 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
540 atomic_dec(&qp_info->snoop_count);
541 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
543 deref_snoop_agent(mad_snoop_priv);
544 wait_for_completion(&mad_snoop_priv->comp);
546 kfree(mad_snoop_priv);
550 * ib_unregister_mad_agent - Unregisters a client from using MAD services
552 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
554 struct ib_mad_agent_private *mad_agent_priv;
555 struct ib_mad_snoop_private *mad_snoop_priv;
557 /* If the TID is zero, the agent can only snoop. */
558 if (mad_agent->hi_tid) {
559 mad_agent_priv = container_of(mad_agent,
560 struct ib_mad_agent_private,
562 unregister_mad_agent(mad_agent_priv);
564 mad_snoop_priv = container_of(mad_agent,
565 struct ib_mad_snoop_private,
567 unregister_mad_snoop(mad_snoop_priv);
571 EXPORT_SYMBOL(ib_unregister_mad_agent);
573 static inline int response_mad(struct ib_mad *mad)
575 /* Trap represses are responses although response bit is reset */
576 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
577 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
580 static void dequeue_mad(struct ib_mad_list_head *mad_list)
582 struct ib_mad_queue *mad_queue;
585 BUG_ON(!mad_list->mad_queue);
586 mad_queue = mad_list->mad_queue;
587 spin_lock_irqsave(&mad_queue->lock, flags);
588 list_del(&mad_list->list);
590 spin_unlock_irqrestore(&mad_queue->lock, flags);
593 static void snoop_send(struct ib_mad_qp_info *qp_info,
594 struct ib_mad_send_buf *send_buf,
595 struct ib_mad_send_wc *mad_send_wc,
598 struct ib_mad_snoop_private *mad_snoop_priv;
602 spin_lock_irqsave(&qp_info->snoop_lock, flags);
603 for (i = 0; i < qp_info->snoop_table_size; i++) {
604 mad_snoop_priv = qp_info->snoop_table[i];
605 if (!mad_snoop_priv ||
606 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
609 atomic_inc(&mad_snoop_priv->refcount);
610 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
611 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
612 send_buf, mad_send_wc);
613 deref_snoop_agent(mad_snoop_priv);
614 spin_lock_irqsave(&qp_info->snoop_lock, flags);
616 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
619 static void snoop_recv(struct ib_mad_qp_info *qp_info,
620 struct ib_mad_recv_wc *mad_recv_wc,
623 struct ib_mad_snoop_private *mad_snoop_priv;
627 spin_lock_irqsave(&qp_info->snoop_lock, flags);
628 for (i = 0; i < qp_info->snoop_table_size; i++) {
629 mad_snoop_priv = qp_info->snoop_table[i];
630 if (!mad_snoop_priv ||
631 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
634 atomic_inc(&mad_snoop_priv->refcount);
635 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
636 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
638 deref_snoop_agent(mad_snoop_priv);
639 spin_lock_irqsave(&qp_info->snoop_lock, flags);
641 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
644 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
647 memset(wc, 0, sizeof *wc);
649 wc->status = IB_WC_SUCCESS;
650 wc->opcode = IB_WC_RECV;
651 wc->pkey_index = pkey_index;
652 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
657 wc->dlid_path_bits = 0;
658 wc->port_num = port_num;
662 * Return 0 if SMP is to be sent
663 * Return 1 if SMP was consumed locally (whether or not solicited)
664 * Return < 0 if error
666 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
667 struct ib_mad_send_wr_private *mad_send_wr)
670 struct ib_smp *smp = mad_send_wr->send_buf.mad;
672 struct ib_mad_local_private *local;
673 struct ib_mad_private *mad_priv;
674 struct ib_mad_port_private *port_priv;
675 struct ib_mad_agent_private *recv_mad_agent = NULL;
676 struct ib_device *device = mad_agent_priv->agent.device;
677 u8 port_num = mad_agent_priv->agent.port_num;
679 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
682 * Directed route handling starts if the initial LID routed part of
683 * a request or the ending LID routed part of a response is empty.
684 * If we are at the start of the LID routed part, don't update the
685 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
687 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
689 !smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
691 printk(KERN_ERR PFX "Invalid directed route\n");
694 /* Check to post send on QP or process locally */
695 ret = smi_check_local_smp(smp, device);
699 local = kmalloc(sizeof *local, GFP_ATOMIC);
702 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
705 local->mad_priv = NULL;
706 local->recv_mad_agent = NULL;
707 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
710 printk(KERN_ERR PFX "No memory for local response MAD\n");
715 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
716 send_wr->wr.ud.pkey_index,
717 send_wr->wr.ud.port_num, &mad_wc);
719 /* No GRH for DR SMP */
720 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
721 (struct ib_mad *)smp,
722 (struct ib_mad *)&mad_priv->mad);
725 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
726 if (response_mad(&mad_priv->mad.mad) &&
727 mad_agent_priv->agent.recv_handler) {
728 local->mad_priv = mad_priv;
729 local->recv_mad_agent = mad_agent_priv;
731 * Reference MAD agent until receive
732 * side of local completion handled
734 atomic_inc(&mad_agent_priv->refcount);
736 kmem_cache_free(ib_mad_cache, mad_priv);
738 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
739 kmem_cache_free(ib_mad_cache, mad_priv);
741 case IB_MAD_RESULT_SUCCESS:
742 /* Treat like an incoming receive MAD */
743 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
744 mad_agent_priv->agent.port_num);
746 mad_priv->mad.mad.mad_hdr.tid =
747 ((struct ib_mad *)smp)->mad_hdr.tid;
748 recv_mad_agent = find_mad_agent(port_priv,
751 if (!port_priv || !recv_mad_agent) {
752 kmem_cache_free(ib_mad_cache, mad_priv);
757 local->mad_priv = mad_priv;
758 local->recv_mad_agent = recv_mad_agent;
761 kmem_cache_free(ib_mad_cache, mad_priv);
767 local->mad_send_wr = mad_send_wr;
768 /* Reference MAD agent until send side of local completion handled */
769 atomic_inc(&mad_agent_priv->refcount);
770 /* Queue local completion to local list */
771 spin_lock_irqsave(&mad_agent_priv->lock, flags);
772 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
773 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
774 queue_work(mad_agent_priv->qp_info->port_priv->wq,
775 &mad_agent_priv->local_work);
781 static int get_pad_size(int hdr_len, int data_len)
785 seg_size = sizeof(struct ib_mad) - hdr_len;
786 if (data_len && seg_size) {
787 pad = seg_size - data_len % seg_size;
788 return pad == seg_size ? 0 : pad;
793 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
795 struct ib_rmpp_segment *s, *t;
797 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
803 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
806 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
807 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
808 struct ib_rmpp_segment *seg = NULL;
809 int left, seg_size, pad;
811 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
812 seg_size = send_buf->seg_size;
815 /* Allocate data segments. */
816 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
817 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
819 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
820 "alloc failed for len %zd, gfp %#x\n",
821 sizeof (*seg) + seg_size, gfp_mask);
822 free_send_rmpp_list(send_wr);
825 seg->num = ++send_buf->seg_count;
826 list_add_tail(&seg->list, &send_wr->rmpp_list);
829 /* Zero any padding */
831 memset(seg->data + seg_size - pad, 0, pad);
833 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
835 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
836 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
838 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
839 struct ib_rmpp_segment, list);
840 send_wr->last_ack_seg = send_wr->cur_seg;
844 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
845 u32 remote_qpn, u16 pkey_index,
847 int hdr_len, int data_len,
850 struct ib_mad_agent_private *mad_agent_priv;
851 struct ib_mad_send_wr_private *mad_send_wr;
852 int pad, message_size, ret, size;
855 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
857 pad = get_pad_size(hdr_len, data_len);
858 message_size = hdr_len + data_len + pad;
860 if ((!mad_agent->rmpp_version &&
861 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
862 (!rmpp_active && message_size > sizeof(struct ib_mad)))
863 return ERR_PTR(-EINVAL);
865 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
866 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
868 return ERR_PTR(-ENOMEM);
870 mad_send_wr = buf + size;
871 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
872 mad_send_wr->send_buf.mad = buf;
873 mad_send_wr->send_buf.hdr_len = hdr_len;
874 mad_send_wr->send_buf.data_len = data_len;
875 mad_send_wr->pad = pad;
877 mad_send_wr->mad_agent_priv = mad_agent_priv;
878 mad_send_wr->sg_list[0].length = hdr_len;
879 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
880 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
881 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
883 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
884 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
885 mad_send_wr->send_wr.num_sge = 2;
886 mad_send_wr->send_wr.opcode = IB_WR_SEND;
887 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
888 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
889 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
890 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
893 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
900 mad_send_wr->send_buf.mad_agent = mad_agent;
901 atomic_inc(&mad_agent_priv->refcount);
902 return &mad_send_wr->send_buf;
904 EXPORT_SYMBOL(ib_create_send_mad);
906 int ib_get_mad_data_offset(u8 mgmt_class)
908 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
909 return IB_MGMT_SA_HDR;
910 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
911 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
912 (mgmt_class == IB_MGMT_CLASS_BIS))
913 return IB_MGMT_DEVICE_HDR;
914 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
915 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
916 return IB_MGMT_VENDOR_HDR;
918 return IB_MGMT_MAD_HDR;
920 EXPORT_SYMBOL(ib_get_mad_data_offset);
922 int ib_is_mad_class_rmpp(u8 mgmt_class)
924 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
925 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
926 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
927 (mgmt_class == IB_MGMT_CLASS_BIS) ||
928 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
929 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
933 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
935 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
937 struct ib_mad_send_wr_private *mad_send_wr;
938 struct list_head *list;
940 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
942 list = &mad_send_wr->cur_seg->list;
944 if (mad_send_wr->cur_seg->num < seg_num) {
945 list_for_each_entry(mad_send_wr->cur_seg, list, list)
946 if (mad_send_wr->cur_seg->num == seg_num)
948 } else if (mad_send_wr->cur_seg->num > seg_num) {
949 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
950 if (mad_send_wr->cur_seg->num == seg_num)
953 return mad_send_wr->cur_seg->data;
955 EXPORT_SYMBOL(ib_get_rmpp_segment);
957 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
959 if (mad_send_wr->send_buf.seg_count)
960 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
961 mad_send_wr->seg_num);
963 return mad_send_wr->send_buf.mad +
964 mad_send_wr->send_buf.hdr_len;
967 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
969 struct ib_mad_agent_private *mad_agent_priv;
970 struct ib_mad_send_wr_private *mad_send_wr;
972 mad_agent_priv = container_of(send_buf->mad_agent,
973 struct ib_mad_agent_private, agent);
974 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
977 free_send_rmpp_list(mad_send_wr);
978 kfree(send_buf->mad);
979 deref_mad_agent(mad_agent_priv);
981 EXPORT_SYMBOL(ib_free_send_mad);
983 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
985 struct ib_mad_qp_info *qp_info;
986 struct list_head *list;
987 struct ib_send_wr *bad_send_wr;
988 struct ib_mad_agent *mad_agent;
993 /* Set WR ID to find mad_send_wr upon completion */
994 qp_info = mad_send_wr->mad_agent_priv->qp_info;
995 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
996 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
998 mad_agent = mad_send_wr->send_buf.mad_agent;
999 sge = mad_send_wr->sg_list;
1000 sge[0].addr = dma_map_single(mad_agent->device->dma_device,
1001 mad_send_wr->send_buf.mad,
1004 pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
1006 sge[1].addr = dma_map_single(mad_agent->device->dma_device,
1007 ib_get_payload(mad_send_wr),
1010 pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
1012 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1013 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1014 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1016 list = &qp_info->send_queue.list;
1019 list = &qp_info->overflow_list;
1023 qp_info->send_queue.count++;
1024 list_add_tail(&mad_send_wr->mad_list.list, list);
1026 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1028 dma_unmap_single(mad_agent->device->dma_device,
1029 pci_unmap_addr(mad_send_wr, header_mapping),
1030 sge[0].length, DMA_TO_DEVICE);
1031 dma_unmap_single(mad_agent->device->dma_device,
1032 pci_unmap_addr(mad_send_wr, payload_mapping),
1033 sge[1].length, DMA_TO_DEVICE);
1039 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1040 * with the registered client
1042 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1043 struct ib_mad_send_buf **bad_send_buf)
1045 struct ib_mad_agent_private *mad_agent_priv;
1046 struct ib_mad_send_buf *next_send_buf;
1047 struct ib_mad_send_wr_private *mad_send_wr;
1048 unsigned long flags;
1051 /* Walk list of send WRs and post each on send list */
1052 for (; send_buf; send_buf = next_send_buf) {
1054 mad_send_wr = container_of(send_buf,
1055 struct ib_mad_send_wr_private,
1057 mad_agent_priv = mad_send_wr->mad_agent_priv;
1059 if (!send_buf->mad_agent->send_handler ||
1060 (send_buf->timeout_ms &&
1061 !send_buf->mad_agent->recv_handler)) {
1066 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1067 if (mad_agent_priv->agent.rmpp_version) {
1074 * Save pointer to next work request to post in case the
1075 * current one completes, and the user modifies the work
1076 * request associated with the completion
1078 next_send_buf = send_buf->next;
1079 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1081 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1082 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1083 ret = handle_outgoing_dr_smp(mad_agent_priv,
1085 if (ret < 0) /* error */
1087 else if (ret == 1) /* locally consumed */
1091 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1092 /* Timeout will be updated after send completes */
1093 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1094 mad_send_wr->retries = send_buf->retries;
1095 /* Reference for work request to QP + response */
1096 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1097 mad_send_wr->status = IB_WC_SUCCESS;
1099 /* Reference MAD agent until send completes */
1100 atomic_inc(&mad_agent_priv->refcount);
1101 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1102 list_add_tail(&mad_send_wr->agent_list,
1103 &mad_agent_priv->send_list);
1104 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1106 if (mad_agent_priv->agent.rmpp_version) {
1107 ret = ib_send_rmpp_mad(mad_send_wr);
1108 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1109 ret = ib_send_mad(mad_send_wr);
1111 ret = ib_send_mad(mad_send_wr);
1113 /* Fail send request */
1114 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1115 list_del(&mad_send_wr->agent_list);
1116 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1117 atomic_dec(&mad_agent_priv->refcount);
1124 *bad_send_buf = send_buf;
1127 EXPORT_SYMBOL(ib_post_send_mad);
1130 * ib_free_recv_mad - Returns data buffers used to receive
1131 * a MAD to the access layer
1133 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1135 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1136 struct ib_mad_private_header *mad_priv_hdr;
1137 struct ib_mad_private *priv;
1138 struct list_head free_list;
1140 INIT_LIST_HEAD(&free_list);
1141 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1143 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1145 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1147 mad_priv_hdr = container_of(mad_recv_wc,
1148 struct ib_mad_private_header,
1150 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1152 kmem_cache_free(ib_mad_cache, priv);
1155 EXPORT_SYMBOL(ib_free_recv_mad);
1157 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1159 ib_mad_send_handler send_handler,
1160 ib_mad_recv_handler recv_handler,
1163 return ERR_PTR(-EINVAL); /* XXX: for now */
1165 EXPORT_SYMBOL(ib_redirect_mad_qp);
1167 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1170 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1173 EXPORT_SYMBOL(ib_process_mad_wc);
1175 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1176 struct ib_mad_reg_req *mad_reg_req)
1180 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1181 i < IB_MGMT_MAX_METHODS;
1182 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1184 if ((*method)->agent[i]) {
1185 printk(KERN_ERR PFX "Method %d already in use\n", i);
1192 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1194 /* Allocate management method table */
1195 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1197 printk(KERN_ERR PFX "No memory for "
1198 "ib_mad_mgmt_method_table\n");
1206 * Check to see if there are any methods still in use
1208 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1212 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1213 if (method->agent[i])
1219 * Check to see if there are any method tables for this class still in use
1221 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1225 for (i = 0; i < MAX_MGMT_CLASS; i++)
1226 if (class->method_table[i])
1231 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1235 for (i = 0; i < MAX_MGMT_OUI; i++)
1236 if (vendor_class->method_table[i])
1241 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1246 for (i = 0; i < MAX_MGMT_OUI; i++)
1247 /* Is there matching OUI for this vendor class ? */
1248 if (!memcmp(vendor_class->oui[i], oui, 3))
1254 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1258 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1259 if (vendor->vendor_class[i])
1265 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1266 struct ib_mad_agent_private *agent)
1270 /* Remove any methods for this mad agent */
1271 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1272 if (method->agent[i] == agent) {
1273 method->agent[i] = NULL;
1278 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1279 struct ib_mad_agent_private *agent_priv,
1282 struct ib_mad_port_private *port_priv;
1283 struct ib_mad_mgmt_class_table **class;
1284 struct ib_mad_mgmt_method_table **method;
1287 port_priv = agent_priv->qp_info->port_priv;
1288 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1290 /* Allocate management class table for "new" class version */
1291 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1293 printk(KERN_ERR PFX "No memory for "
1294 "ib_mad_mgmt_class_table\n");
1299 /* Allocate method table for this management class */
1300 method = &(*class)->method_table[mgmt_class];
1301 if ((ret = allocate_method_table(method)))
1304 method = &(*class)->method_table[mgmt_class];
1306 /* Allocate method table for this management class */
1307 if ((ret = allocate_method_table(method)))
1312 /* Now, make sure methods are not already in use */
1313 if (method_in_use(method, mad_reg_req))
1316 /* Finally, add in methods being registered */
1317 for (i = find_first_bit(mad_reg_req->method_mask,
1318 IB_MGMT_MAX_METHODS);
1319 i < IB_MGMT_MAX_METHODS;
1320 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1322 (*method)->agent[i] = agent_priv;
1327 /* Remove any methods for this mad agent */
1328 remove_methods_mad_agent(*method, agent_priv);
1329 /* Now, check to see if there are any methods in use */
1330 if (!check_method_table(*method)) {
1331 /* If not, release management method table */
1344 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1345 struct ib_mad_agent_private *agent_priv)
1347 struct ib_mad_port_private *port_priv;
1348 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1349 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1350 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1351 struct ib_mad_mgmt_method_table **method;
1352 int i, ret = -ENOMEM;
1355 /* "New" vendor (with OUI) class */
1356 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1357 port_priv = agent_priv->qp_info->port_priv;
1358 vendor_table = &port_priv->version[
1359 mad_reg_req->mgmt_class_version].vendor;
1360 if (!*vendor_table) {
1361 /* Allocate mgmt vendor class table for "new" class version */
1362 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1364 printk(KERN_ERR PFX "No memory for "
1365 "ib_mad_mgmt_vendor_class_table\n");
1369 *vendor_table = vendor;
1371 if (!(*vendor_table)->vendor_class[vclass]) {
1372 /* Allocate table for this management vendor class */
1373 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1374 if (!vendor_class) {
1375 printk(KERN_ERR PFX "No memory for "
1376 "ib_mad_mgmt_vendor_class\n");
1380 (*vendor_table)->vendor_class[vclass] = vendor_class;
1382 for (i = 0; i < MAX_MGMT_OUI; i++) {
1383 /* Is there matching OUI for this vendor class ? */
1384 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1385 mad_reg_req->oui, 3)) {
1386 method = &(*vendor_table)->vendor_class[
1387 vclass]->method_table[i];
1392 for (i = 0; i < MAX_MGMT_OUI; i++) {
1393 /* OUI slot available ? */
1394 if (!is_vendor_oui((*vendor_table)->vendor_class[
1396 method = &(*vendor_table)->vendor_class[
1397 vclass]->method_table[i];
1399 /* Allocate method table for this OUI */
1400 if ((ret = allocate_method_table(method)))
1402 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1403 mad_reg_req->oui, 3);
1407 printk(KERN_ERR PFX "All OUI slots in use\n");
1411 /* Now, make sure methods are not already in use */
1412 if (method_in_use(method, mad_reg_req))
1415 /* Finally, add in methods being registered */
1416 for (i = find_first_bit(mad_reg_req->method_mask,
1417 IB_MGMT_MAX_METHODS);
1418 i < IB_MGMT_MAX_METHODS;
1419 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1421 (*method)->agent[i] = agent_priv;
1426 /* Remove any methods for this mad agent */
1427 remove_methods_mad_agent(*method, agent_priv);
1428 /* Now, check to see if there are any methods in use */
1429 if (!check_method_table(*method)) {
1430 /* If not, release management method table */
1437 (*vendor_table)->vendor_class[vclass] = NULL;
1438 kfree(vendor_class);
1442 *vendor_table = NULL;
1449 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1451 struct ib_mad_port_private *port_priv;
1452 struct ib_mad_mgmt_class_table *class;
1453 struct ib_mad_mgmt_method_table *method;
1454 struct ib_mad_mgmt_vendor_class_table *vendor;
1455 struct ib_mad_mgmt_vendor_class *vendor_class;
1460 * Was MAD registration request supplied
1461 * with original registration ?
1463 if (!agent_priv->reg_req) {
1467 port_priv = agent_priv->qp_info->port_priv;
1468 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1469 class = port_priv->version[
1470 agent_priv->reg_req->mgmt_class_version].class;
1474 method = class->method_table[mgmt_class];
1476 /* Remove any methods for this mad agent */
1477 remove_methods_mad_agent(method, agent_priv);
1478 /* Now, check to see if there are any methods still in use */
1479 if (!check_method_table(method)) {
1480 /* If not, release management method table */
1482 class->method_table[mgmt_class] = NULL;
1483 /* Any management classes left ? */
1484 if (!check_class_table(class)) {
1485 /* If not, release management class table */
1488 agent_priv->reg_req->
1489 mgmt_class_version].class = NULL;
1495 if (!is_vendor_class(mgmt_class))
1498 /* normalize mgmt_class to vendor range 2 */
1499 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1500 vendor = port_priv->version[
1501 agent_priv->reg_req->mgmt_class_version].vendor;
1506 vendor_class = vendor->vendor_class[mgmt_class];
1508 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1511 method = vendor_class->method_table[index];
1513 /* Remove any methods for this mad agent */
1514 remove_methods_mad_agent(method, agent_priv);
1516 * Now, check to see if there are
1517 * any methods still in use
1519 if (!check_method_table(method)) {
1520 /* If not, release management method table */
1522 vendor_class->method_table[index] = NULL;
1523 memset(vendor_class->oui[index], 0, 3);
1524 /* Any OUIs left ? */
1525 if (!check_vendor_class(vendor_class)) {
1526 /* If not, release vendor class table */
1527 kfree(vendor_class);
1528 vendor->vendor_class[mgmt_class] = NULL;
1529 /* Any other vendor classes left ? */
1530 if (!check_vendor_table(vendor)) {
1533 agent_priv->reg_req->
1534 mgmt_class_version].
1546 static struct ib_mad_agent_private *
1547 find_mad_agent(struct ib_mad_port_private *port_priv,
1550 struct ib_mad_agent_private *mad_agent = NULL;
1551 unsigned long flags;
1553 spin_lock_irqsave(&port_priv->reg_lock, flags);
1554 if (response_mad(mad)) {
1556 struct ib_mad_agent_private *entry;
1559 * Routing is based on high 32 bits of transaction ID
1562 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1563 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1564 if (entry->agent.hi_tid == hi_tid) {
1570 struct ib_mad_mgmt_class_table *class;
1571 struct ib_mad_mgmt_method_table *method;
1572 struct ib_mad_mgmt_vendor_class_table *vendor;
1573 struct ib_mad_mgmt_vendor_class *vendor_class;
1574 struct ib_vendor_mad *vendor_mad;
1578 * Routing is based on version, class, and method
1579 * For "newer" vendor MADs, also based on OUI
1581 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1583 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1584 class = port_priv->version[
1585 mad->mad_hdr.class_version].class;
1588 method = class->method_table[convert_mgmt_class(
1589 mad->mad_hdr.mgmt_class)];
1591 mad_agent = method->agent[mad->mad_hdr.method &
1592 ~IB_MGMT_METHOD_RESP];
1594 vendor = port_priv->version[
1595 mad->mad_hdr.class_version].vendor;
1598 vendor_class = vendor->vendor_class[vendor_class_index(
1599 mad->mad_hdr.mgmt_class)];
1602 /* Find matching OUI */
1603 vendor_mad = (struct ib_vendor_mad *)mad;
1604 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1607 method = vendor_class->method_table[index];
1609 mad_agent = method->agent[mad->mad_hdr.method &
1610 ~IB_MGMT_METHOD_RESP];
1616 if (mad_agent->agent.recv_handler)
1617 atomic_inc(&mad_agent->refcount);
1619 printk(KERN_NOTICE PFX "No receive handler for client "
1621 &mad_agent->agent, port_priv->port_num);
1626 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1631 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1635 /* Make sure MAD base version is understood */
1636 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1637 printk(KERN_ERR PFX "MAD received with unsupported base "
1638 "version %d\n", mad->mad_hdr.base_version);
1642 /* Filter SMI packets sent to other than QP0 */
1643 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1644 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1648 /* Filter GSI packets sent to QP0 */
1657 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1658 struct ib_mad_hdr *mad_hdr)
1660 struct ib_rmpp_mad *rmpp_mad;
1662 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1663 return !mad_agent_priv->agent.rmpp_version ||
1664 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1665 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1666 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1669 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1670 struct ib_mad_recv_wc *rwc)
1672 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1673 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1676 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1677 struct ib_mad_send_wr_private *wr,
1678 struct ib_mad_recv_wc *rwc )
1680 struct ib_ah_attr attr;
1681 u8 send_resp, rcv_resp;
1683 struct ib_device *device = mad_agent_priv->agent.device;
1684 u8 port_num = mad_agent_priv->agent.port_num;
1687 send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1688 mad_hdr.method & IB_MGMT_METHOD_RESP;
1689 rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1691 if (send_resp == rcv_resp)
1692 /* both requests, or both responses. GIDs different */
1695 if (ib_query_ah(wr->send_buf.ah, &attr))
1696 /* Assume not equal, to avoid false positives. */
1699 if (!!(attr.ah_flags & IB_AH_GRH) !=
1700 !!(rwc->wc->wc_flags & IB_WC_GRH))
1701 /* one has GID, other does not. Assume different */
1704 if (!send_resp && rcv_resp) {
1705 /* is request/response. */
1706 if (!(attr.ah_flags & IB_AH_GRH)) {
1707 if (ib_get_cached_lmc(device, port_num, &lmc))
1709 return (!lmc || !((attr.src_path_bits ^
1710 rwc->wc->dlid_path_bits) &
1713 if (ib_get_cached_gid(device, port_num,
1714 attr.grh.sgid_index, &sgid))
1716 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1721 if (!(attr.ah_flags & IB_AH_GRH))
1722 return attr.dlid == rwc->wc->slid;
1724 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1728 static inline int is_direct(u8 class)
1730 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1733 struct ib_mad_send_wr_private*
1734 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1735 struct ib_mad_recv_wc *wc)
1737 struct ib_mad_send_wr_private *wr;
1740 mad = (struct ib_mad *)wc->recv_buf.mad;
1742 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1743 if ((wr->tid == mad->mad_hdr.tid) &&
1744 rcv_has_same_class(wr, wc) &&
1746 * Don't check GID for direct routed MADs.
1747 * These might have permissive LIDs.
1749 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1750 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1755 * It's possible to receive the response before we've
1756 * been notified that the send has completed
1758 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1759 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1760 wr->tid == mad->mad_hdr.tid &&
1762 rcv_has_same_class(wr, wc) &&
1764 * Don't check GID for direct routed MADs.
1765 * These might have permissive LIDs.
1767 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1768 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1769 /* Verify request has not been canceled */
1770 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1775 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1777 mad_send_wr->timeout = 0;
1778 if (mad_send_wr->refcount == 1) {
1779 list_del(&mad_send_wr->agent_list);
1780 list_add_tail(&mad_send_wr->agent_list,
1781 &mad_send_wr->mad_agent_priv->done_list);
1785 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1786 struct ib_mad_recv_wc *mad_recv_wc)
1788 struct ib_mad_send_wr_private *mad_send_wr;
1789 struct ib_mad_send_wc mad_send_wc;
1790 unsigned long flags;
1792 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1793 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1794 if (mad_agent_priv->agent.rmpp_version) {
1795 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1798 deref_mad_agent(mad_agent_priv);
1803 /* Complete corresponding request */
1804 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1805 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1806 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1808 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1809 ib_free_recv_mad(mad_recv_wc);
1810 deref_mad_agent(mad_agent_priv);
1813 ib_mark_mad_done(mad_send_wr);
1814 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1816 /* Defined behavior is to complete response before request */
1817 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1818 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1820 atomic_dec(&mad_agent_priv->refcount);
1822 mad_send_wc.status = IB_WC_SUCCESS;
1823 mad_send_wc.vendor_err = 0;
1824 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1825 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1827 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1829 deref_mad_agent(mad_agent_priv);
1833 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1836 struct ib_mad_qp_info *qp_info;
1837 struct ib_mad_private_header *mad_priv_hdr;
1838 struct ib_mad_private *recv, *response;
1839 struct ib_mad_list_head *mad_list;
1840 struct ib_mad_agent_private *mad_agent;
1842 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1844 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1845 "for response buffer\n");
1847 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1848 qp_info = mad_list->mad_queue->qp_info;
1849 dequeue_mad(mad_list);
1851 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1853 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1854 dma_unmap_single(port_priv->device->dma_device,
1855 pci_unmap_addr(&recv->header, mapping),
1856 sizeof(struct ib_mad_private) -
1857 sizeof(struct ib_mad_private_header),
1860 /* Setup MAD receive work completion from "normal" work completion */
1861 recv->header.wc = *wc;
1862 recv->header.recv_wc.wc = &recv->header.wc;
1863 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1864 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1865 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1867 if (atomic_read(&qp_info->snoop_count))
1868 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1871 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1874 if (recv->mad.mad.mad_hdr.mgmt_class ==
1875 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1876 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1877 port_priv->device->node_type,
1878 port_priv->port_num,
1879 port_priv->device->phys_port_cnt))
1881 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1883 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1884 port_priv->device->node_type,
1885 port_priv->port_num))
1887 if (!smi_check_local_smp(&recv->mad.smp, port_priv->device))
1892 /* Give driver "right of first refusal" on incoming MAD */
1893 if (port_priv->device->process_mad) {
1897 printk(KERN_ERR PFX "No memory for response MAD\n");
1899 * Is it better to assume that
1900 * it wouldn't be processed ?
1905 ret = port_priv->device->process_mad(port_priv->device, 0,
1906 port_priv->port_num,
1909 &response->mad.mad);
1910 if (ret & IB_MAD_RESULT_SUCCESS) {
1911 if (ret & IB_MAD_RESULT_CONSUMED)
1913 if (ret & IB_MAD_RESULT_REPLY) {
1914 agent_send_response(&response->mad.mad,
1917 port_priv->port_num,
1918 qp_info->qp->qp_num);
1924 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1926 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1928 * recv is freed up in error cases in ib_mad_complete_recv
1929 * or via recv_handler in ib_mad_complete_recv()
1935 /* Post another receive request for this QP */
1937 ib_mad_post_receive_mads(qp_info, response);
1939 kmem_cache_free(ib_mad_cache, recv);
1941 ib_mad_post_receive_mads(qp_info, recv);
1944 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1946 struct ib_mad_send_wr_private *mad_send_wr;
1947 unsigned long delay;
1949 if (list_empty(&mad_agent_priv->wait_list)) {
1950 cancel_delayed_work(&mad_agent_priv->timed_work);
1952 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1953 struct ib_mad_send_wr_private,
1956 if (time_after(mad_agent_priv->timeout,
1957 mad_send_wr->timeout)) {
1958 mad_agent_priv->timeout = mad_send_wr->timeout;
1959 cancel_delayed_work(&mad_agent_priv->timed_work);
1960 delay = mad_send_wr->timeout - jiffies;
1961 if ((long)delay <= 0)
1963 queue_delayed_work(mad_agent_priv->qp_info->
1965 &mad_agent_priv->timed_work, delay);
1970 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1972 struct ib_mad_agent_private *mad_agent_priv;
1973 struct ib_mad_send_wr_private *temp_mad_send_wr;
1974 struct list_head *list_item;
1975 unsigned long delay;
1977 mad_agent_priv = mad_send_wr->mad_agent_priv;
1978 list_del(&mad_send_wr->agent_list);
1980 delay = mad_send_wr->timeout;
1981 mad_send_wr->timeout += jiffies;
1984 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1985 temp_mad_send_wr = list_entry(list_item,
1986 struct ib_mad_send_wr_private,
1988 if (time_after(mad_send_wr->timeout,
1989 temp_mad_send_wr->timeout))
1994 list_item = &mad_agent_priv->wait_list;
1995 list_add(&mad_send_wr->agent_list, list_item);
1997 /* Reschedule a work item if we have a shorter timeout */
1998 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1999 cancel_delayed_work(&mad_agent_priv->timed_work);
2000 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2001 &mad_agent_priv->timed_work, delay);
2005 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2008 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2009 wait_for_response(mad_send_wr);
2013 * Process a send work completion
2015 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2016 struct ib_mad_send_wc *mad_send_wc)
2018 struct ib_mad_agent_private *mad_agent_priv;
2019 unsigned long flags;
2022 mad_agent_priv = mad_send_wr->mad_agent_priv;
2023 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2024 if (mad_agent_priv->agent.rmpp_version) {
2025 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2026 if (ret == IB_RMPP_RESULT_CONSUMED)
2029 ret = IB_RMPP_RESULT_UNHANDLED;
2031 if (mad_send_wc->status != IB_WC_SUCCESS &&
2032 mad_send_wr->status == IB_WC_SUCCESS) {
2033 mad_send_wr->status = mad_send_wc->status;
2034 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2037 if (--mad_send_wr->refcount > 0) {
2038 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2039 mad_send_wr->status == IB_WC_SUCCESS) {
2040 wait_for_response(mad_send_wr);
2045 /* Remove send from MAD agent and notify client of completion */
2046 list_del(&mad_send_wr->agent_list);
2047 adjust_timeout(mad_agent_priv);
2048 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2050 if (mad_send_wr->status != IB_WC_SUCCESS )
2051 mad_send_wc->status = mad_send_wr->status;
2052 if (ret == IB_RMPP_RESULT_INTERNAL)
2053 ib_rmpp_send_handler(mad_send_wc);
2055 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2058 /* Release reference on agent taken when sending */
2059 deref_mad_agent(mad_agent_priv);
2062 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2065 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2068 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2069 struct ib_mad_list_head *mad_list;
2070 struct ib_mad_qp_info *qp_info;
2071 struct ib_mad_queue *send_queue;
2072 struct ib_send_wr *bad_send_wr;
2073 struct ib_mad_send_wc mad_send_wc;
2074 unsigned long flags;
2077 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2078 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2080 send_queue = mad_list->mad_queue;
2081 qp_info = send_queue->qp_info;
2084 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
2085 pci_unmap_addr(mad_send_wr, header_mapping),
2086 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2087 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
2088 pci_unmap_addr(mad_send_wr, payload_mapping),
2089 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2090 queued_send_wr = NULL;
2091 spin_lock_irqsave(&send_queue->lock, flags);
2092 list_del(&mad_list->list);
2094 /* Move queued send to the send queue */
2095 if (send_queue->count-- > send_queue->max_active) {
2096 mad_list = container_of(qp_info->overflow_list.next,
2097 struct ib_mad_list_head, list);
2098 queued_send_wr = container_of(mad_list,
2099 struct ib_mad_send_wr_private,
2101 list_del(&mad_list->list);
2102 list_add_tail(&mad_list->list, &send_queue->list);
2104 spin_unlock_irqrestore(&send_queue->lock, flags);
2106 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2107 mad_send_wc.status = wc->status;
2108 mad_send_wc.vendor_err = wc->vendor_err;
2109 if (atomic_read(&qp_info->snoop_count))
2110 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2111 IB_MAD_SNOOP_SEND_COMPLETIONS);
2112 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2114 if (queued_send_wr) {
2115 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2118 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2119 mad_send_wr = queued_send_wr;
2120 wc->status = IB_WC_LOC_QP_OP_ERR;
2126 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2128 struct ib_mad_send_wr_private *mad_send_wr;
2129 struct ib_mad_list_head *mad_list;
2130 unsigned long flags;
2132 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2133 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2134 mad_send_wr = container_of(mad_list,
2135 struct ib_mad_send_wr_private,
2137 mad_send_wr->retry = 1;
2139 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2142 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2145 struct ib_mad_list_head *mad_list;
2146 struct ib_mad_qp_info *qp_info;
2147 struct ib_mad_send_wr_private *mad_send_wr;
2150 /* Determine if failure was a send or receive */
2151 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2152 qp_info = mad_list->mad_queue->qp_info;
2153 if (mad_list->mad_queue == &qp_info->recv_queue)
2155 * Receive errors indicate that the QP has entered the error
2156 * state - error handling/shutdown code will cleanup
2161 * Send errors will transition the QP to SQE - move
2162 * QP to RTS and repost flushed work requests
2164 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2166 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2167 if (mad_send_wr->retry) {
2169 struct ib_send_wr *bad_send_wr;
2171 mad_send_wr->retry = 0;
2172 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2175 ib_mad_send_done_handler(port_priv, wc);
2177 ib_mad_send_done_handler(port_priv, wc);
2179 struct ib_qp_attr *attr;
2181 /* Transition QP to RTS and fail offending send */
2182 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2184 attr->qp_state = IB_QPS_RTS;
2185 attr->cur_qp_state = IB_QPS_SQE;
2186 ret = ib_modify_qp(qp_info->qp, attr,
2187 IB_QP_STATE | IB_QP_CUR_STATE);
2190 printk(KERN_ERR PFX "mad_error_handler - "
2191 "ib_modify_qp to RTS : %d\n", ret);
2193 mark_sends_for_retry(qp_info);
2195 ib_mad_send_done_handler(port_priv, wc);
2200 * IB MAD completion callback
2202 static void ib_mad_completion_handler(void *data)
2204 struct ib_mad_port_private *port_priv;
2207 port_priv = (struct ib_mad_port_private *)data;
2208 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2210 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2211 if (wc.status == IB_WC_SUCCESS) {
2212 switch (wc.opcode) {
2214 ib_mad_send_done_handler(port_priv, &wc);
2217 ib_mad_recv_done_handler(port_priv, &wc);
2224 mad_error_handler(port_priv, &wc);
2228 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2230 unsigned long flags;
2231 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2232 struct ib_mad_send_wc mad_send_wc;
2233 struct list_head cancel_list;
2235 INIT_LIST_HEAD(&cancel_list);
2237 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2238 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2239 &mad_agent_priv->send_list, agent_list) {
2240 if (mad_send_wr->status == IB_WC_SUCCESS) {
2241 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2242 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2246 /* Empty wait list to prevent receives from finding a request */
2247 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2248 /* Empty local completion list as well */
2249 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2250 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2252 /* Report all cancelled requests */
2253 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2254 mad_send_wc.vendor_err = 0;
2256 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2257 &cancel_list, agent_list) {
2258 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2259 list_del(&mad_send_wr->agent_list);
2260 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2262 atomic_dec(&mad_agent_priv->refcount);
2266 static struct ib_mad_send_wr_private*
2267 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2268 struct ib_mad_send_buf *send_buf)
2270 struct ib_mad_send_wr_private *mad_send_wr;
2272 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2274 if (&mad_send_wr->send_buf == send_buf)
2278 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2280 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2281 &mad_send_wr->send_buf == send_buf)
2287 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2288 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2290 struct ib_mad_agent_private *mad_agent_priv;
2291 struct ib_mad_send_wr_private *mad_send_wr;
2292 unsigned long flags;
2295 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2297 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2298 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2299 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2300 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2304 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2306 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2307 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2310 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2312 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2314 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2316 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2319 EXPORT_SYMBOL(ib_modify_mad);
2321 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2322 struct ib_mad_send_buf *send_buf)
2324 ib_modify_mad(mad_agent, send_buf, 0);
2326 EXPORT_SYMBOL(ib_cancel_mad);
2328 static void local_completions(void *data)
2330 struct ib_mad_agent_private *mad_agent_priv;
2331 struct ib_mad_local_private *local;
2332 struct ib_mad_agent_private *recv_mad_agent;
2333 unsigned long flags;
2336 struct ib_mad_send_wc mad_send_wc;
2338 mad_agent_priv = (struct ib_mad_agent_private *)data;
2340 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2341 while (!list_empty(&mad_agent_priv->local_list)) {
2342 local = list_entry(mad_agent_priv->local_list.next,
2343 struct ib_mad_local_private,
2345 list_del(&local->completion_list);
2346 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2347 if (local->mad_priv) {
2348 recv_mad_agent = local->recv_mad_agent;
2349 if (!recv_mad_agent) {
2350 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2351 goto local_send_completion;
2356 * Defined behavior is to complete response
2359 build_smp_wc((unsigned long) local->mad_send_wr,
2360 be16_to_cpu(IB_LID_PERMISSIVE),
2361 0, recv_mad_agent->agent.port_num, &wc);
2363 local->mad_priv->header.recv_wc.wc = &wc;
2364 local->mad_priv->header.recv_wc.mad_len =
2365 sizeof(struct ib_mad);
2366 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2367 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2368 &local->mad_priv->header.recv_wc.rmpp_list);
2369 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2370 local->mad_priv->header.recv_wc.recv_buf.mad =
2371 &local->mad_priv->mad.mad;
2372 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2373 snoop_recv(recv_mad_agent->qp_info,
2374 &local->mad_priv->header.recv_wc,
2375 IB_MAD_SNOOP_RECVS);
2376 recv_mad_agent->agent.recv_handler(
2377 &recv_mad_agent->agent,
2378 &local->mad_priv->header.recv_wc);
2379 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2380 atomic_dec(&recv_mad_agent->refcount);
2381 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2384 local_send_completion:
2386 mad_send_wc.status = IB_WC_SUCCESS;
2387 mad_send_wc.vendor_err = 0;
2388 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2389 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2390 snoop_send(mad_agent_priv->qp_info,
2391 &local->mad_send_wr->send_buf,
2392 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2393 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2396 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2397 atomic_dec(&mad_agent_priv->refcount);
2399 kmem_cache_free(ib_mad_cache, local->mad_priv);
2402 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2405 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2409 if (!mad_send_wr->retries--)
2412 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2414 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2415 ret = ib_retry_rmpp(mad_send_wr);
2417 case IB_RMPP_RESULT_UNHANDLED:
2418 ret = ib_send_mad(mad_send_wr);
2420 case IB_RMPP_RESULT_CONSUMED:
2428 ret = ib_send_mad(mad_send_wr);
2431 mad_send_wr->refcount++;
2432 list_add_tail(&mad_send_wr->agent_list,
2433 &mad_send_wr->mad_agent_priv->send_list);
2438 static void timeout_sends(void *data)
2440 struct ib_mad_agent_private *mad_agent_priv;
2441 struct ib_mad_send_wr_private *mad_send_wr;
2442 struct ib_mad_send_wc mad_send_wc;
2443 unsigned long flags, delay;
2445 mad_agent_priv = (struct ib_mad_agent_private *)data;
2446 mad_send_wc.vendor_err = 0;
2448 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2449 while (!list_empty(&mad_agent_priv->wait_list)) {
2450 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2451 struct ib_mad_send_wr_private,
2454 if (time_after(mad_send_wr->timeout, jiffies)) {
2455 delay = mad_send_wr->timeout - jiffies;
2456 if ((long)delay <= 0)
2458 queue_delayed_work(mad_agent_priv->qp_info->
2460 &mad_agent_priv->timed_work, delay);
2464 list_del(&mad_send_wr->agent_list);
2465 if (mad_send_wr->status == IB_WC_SUCCESS &&
2466 !retry_send(mad_send_wr))
2469 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2471 if (mad_send_wr->status == IB_WC_SUCCESS)
2472 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2474 mad_send_wc.status = mad_send_wr->status;
2475 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2476 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2479 atomic_dec(&mad_agent_priv->refcount);
2480 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2482 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2485 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2487 struct ib_mad_port_private *port_priv = cq->cq_context;
2488 unsigned long flags;
2490 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2491 if (!list_empty(&port_priv->port_list))
2492 queue_work(port_priv->wq, &port_priv->work);
2493 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2497 * Allocate receive MADs and post receive WRs for them
2499 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2500 struct ib_mad_private *mad)
2502 unsigned long flags;
2504 struct ib_mad_private *mad_priv;
2505 struct ib_sge sg_list;
2506 struct ib_recv_wr recv_wr, *bad_recv_wr;
2507 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2509 /* Initialize common scatter list fields */
2510 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2511 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2513 /* Initialize common receive WR fields */
2514 recv_wr.next = NULL;
2515 recv_wr.sg_list = &sg_list;
2516 recv_wr.num_sge = 1;
2519 /* Allocate and map receive buffer */
2524 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2526 printk(KERN_ERR PFX "No memory for receive buffer\n");
2531 sg_list.addr = dma_map_single(qp_info->port_priv->
2535 sizeof mad_priv->header,
2537 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2538 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2539 mad_priv->header.mad_list.mad_queue = recv_queue;
2541 /* Post receive WR */
2542 spin_lock_irqsave(&recv_queue->lock, flags);
2543 post = (++recv_queue->count < recv_queue->max_active);
2544 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2545 spin_unlock_irqrestore(&recv_queue->lock, flags);
2546 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2548 spin_lock_irqsave(&recv_queue->lock, flags);
2549 list_del(&mad_priv->header.mad_list.list);
2550 recv_queue->count--;
2551 spin_unlock_irqrestore(&recv_queue->lock, flags);
2552 dma_unmap_single(qp_info->port_priv->device->dma_device,
2553 pci_unmap_addr(&mad_priv->header,
2556 sizeof mad_priv->header,
2558 kmem_cache_free(ib_mad_cache, mad_priv);
2559 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2568 * Return all the posted receive MADs
2570 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2572 struct ib_mad_private_header *mad_priv_hdr;
2573 struct ib_mad_private *recv;
2574 struct ib_mad_list_head *mad_list;
2576 while (!list_empty(&qp_info->recv_queue.list)) {
2578 mad_list = list_entry(qp_info->recv_queue.list.next,
2579 struct ib_mad_list_head, list);
2580 mad_priv_hdr = container_of(mad_list,
2581 struct ib_mad_private_header,
2583 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2586 /* Remove from posted receive MAD list */
2587 list_del(&mad_list->list);
2589 dma_unmap_single(qp_info->port_priv->device->dma_device,
2590 pci_unmap_addr(&recv->header, mapping),
2591 sizeof(struct ib_mad_private) -
2592 sizeof(struct ib_mad_private_header),
2594 kmem_cache_free(ib_mad_cache, recv);
2597 qp_info->recv_queue.count = 0;
2603 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2606 struct ib_qp_attr *attr;
2609 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2611 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2615 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2616 qp = port_priv->qp_info[i].qp;
2618 * PKey index for QP1 is irrelevant but
2619 * one is needed for the Reset to Init transition
2621 attr->qp_state = IB_QPS_INIT;
2622 attr->pkey_index = 0;
2623 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2624 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2625 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2627 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2628 "INIT: %d\n", i, ret);
2632 attr->qp_state = IB_QPS_RTR;
2633 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2635 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2636 "RTR: %d\n", i, ret);
2640 attr->qp_state = IB_QPS_RTS;
2641 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2642 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2644 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2645 "RTS: %d\n", i, ret);
2650 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2652 printk(KERN_ERR PFX "Failed to request completion "
2653 "notification: %d\n", ret);
2657 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2658 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2660 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2669 static void qp_event_handler(struct ib_event *event, void *qp_context)
2671 struct ib_mad_qp_info *qp_info = qp_context;
2673 /* It's worse than that! He's dead, Jim! */
2674 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2675 event->event, qp_info->qp->qp_num);
2678 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2679 struct ib_mad_queue *mad_queue)
2681 mad_queue->qp_info = qp_info;
2682 mad_queue->count = 0;
2683 spin_lock_init(&mad_queue->lock);
2684 INIT_LIST_HEAD(&mad_queue->list);
2687 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2688 struct ib_mad_qp_info *qp_info)
2690 qp_info->port_priv = port_priv;
2691 init_mad_queue(qp_info, &qp_info->send_queue);
2692 init_mad_queue(qp_info, &qp_info->recv_queue);
2693 INIT_LIST_HEAD(&qp_info->overflow_list);
2694 spin_lock_init(&qp_info->snoop_lock);
2695 qp_info->snoop_table = NULL;
2696 qp_info->snoop_table_size = 0;
2697 atomic_set(&qp_info->snoop_count, 0);
2700 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2701 enum ib_qp_type qp_type)
2703 struct ib_qp_init_attr qp_init_attr;
2706 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2707 qp_init_attr.send_cq = qp_info->port_priv->cq;
2708 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2709 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2710 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2711 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2712 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2713 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2714 qp_init_attr.qp_type = qp_type;
2715 qp_init_attr.port_num = qp_info->port_priv->port_num;
2716 qp_init_attr.qp_context = qp_info;
2717 qp_init_attr.event_handler = qp_event_handler;
2718 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2719 if (IS_ERR(qp_info->qp)) {
2720 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2721 get_spl_qp_index(qp_type));
2722 ret = PTR_ERR(qp_info->qp);
2725 /* Use minimum queue sizes unless the CQ is resized */
2726 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2727 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2734 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2736 ib_destroy_qp(qp_info->qp);
2737 kfree(qp_info->snoop_table);
2742 * Create the QP, PD, MR, and CQ if needed
2744 static int ib_mad_port_open(struct ib_device *device,
2748 struct ib_mad_port_private *port_priv;
2749 unsigned long flags;
2750 char name[sizeof "ib_mad123"];
2752 /* Create new device info */
2753 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2755 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2759 port_priv->device = device;
2760 port_priv->port_num = port_num;
2761 spin_lock_init(&port_priv->reg_lock);
2762 INIT_LIST_HEAD(&port_priv->agent_list);
2763 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2764 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2766 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2767 port_priv->cq = ib_create_cq(port_priv->device,
2768 ib_mad_thread_completion_handler,
2769 NULL, port_priv, cq_size);
2770 if (IS_ERR(port_priv->cq)) {
2771 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2772 ret = PTR_ERR(port_priv->cq);
2776 port_priv->pd = ib_alloc_pd(device);
2777 if (IS_ERR(port_priv->pd)) {
2778 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2779 ret = PTR_ERR(port_priv->pd);
2783 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2784 if (IS_ERR(port_priv->mr)) {
2785 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2786 ret = PTR_ERR(port_priv->mr);
2790 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2793 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2797 snprintf(name, sizeof name, "ib_mad%d", port_num);
2798 port_priv->wq = create_singlethread_workqueue(name);
2799 if (!port_priv->wq) {
2803 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2805 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2806 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2807 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2809 ret = ib_mad_port_start(port_priv);
2811 printk(KERN_ERR PFX "Couldn't start port\n");
2818 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2819 list_del_init(&port_priv->port_list);
2820 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2822 destroy_workqueue(port_priv->wq);
2824 destroy_mad_qp(&port_priv->qp_info[1]);
2826 destroy_mad_qp(&port_priv->qp_info[0]);
2828 ib_dereg_mr(port_priv->mr);
2830 ib_dealloc_pd(port_priv->pd);
2832 ib_destroy_cq(port_priv->cq);
2833 cleanup_recv_queue(&port_priv->qp_info[1]);
2834 cleanup_recv_queue(&port_priv->qp_info[0]);
2843 * If there are no classes using the port, free the port
2844 * resources (CQ, MR, PD, QP) and remove the port's info structure
2846 static int ib_mad_port_close(struct ib_device *device, int port_num)
2848 struct ib_mad_port_private *port_priv;
2849 unsigned long flags;
2851 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2852 port_priv = __ib_get_mad_port(device, port_num);
2853 if (port_priv == NULL) {
2854 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2855 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2858 list_del_init(&port_priv->port_list);
2859 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2861 destroy_workqueue(port_priv->wq);
2862 destroy_mad_qp(&port_priv->qp_info[1]);
2863 destroy_mad_qp(&port_priv->qp_info[0]);
2864 ib_dereg_mr(port_priv->mr);
2865 ib_dealloc_pd(port_priv->pd);
2866 ib_destroy_cq(port_priv->cq);
2867 cleanup_recv_queue(&port_priv->qp_info[1]);
2868 cleanup_recv_queue(&port_priv->qp_info[0]);
2869 /* XXX: Handle deallocation of MAD registration tables */
2876 static void ib_mad_init_device(struct ib_device *device)
2880 if (device->node_type == IB_NODE_SWITCH) {
2885 end = device->phys_port_cnt;
2888 for (i = start; i <= end; i++) {
2889 if (ib_mad_port_open(device, i)) {
2890 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2894 if (ib_agent_port_open(device, i)) {
2895 printk(KERN_ERR PFX "Couldn't open %s port %d "
2904 if (ib_mad_port_close(device, i))
2905 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2911 while (i >= start) {
2912 if (ib_agent_port_close(device, i))
2913 printk(KERN_ERR PFX "Couldn't close %s port %d "
2916 if (ib_mad_port_close(device, i))
2917 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2923 static void ib_mad_remove_device(struct ib_device *device)
2925 int i, num_ports, cur_port;
2927 if (device->node_type == IB_NODE_SWITCH) {
2931 num_ports = device->phys_port_cnt;
2934 for (i = 0; i < num_ports; i++, cur_port++) {
2935 if (ib_agent_port_close(device, cur_port))
2936 printk(KERN_ERR PFX "Couldn't close %s port %d "
2938 device->name, cur_port);
2939 if (ib_mad_port_close(device, cur_port))
2940 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2941 device->name, cur_port);
2945 static struct ib_client mad_client = {
2947 .add = ib_mad_init_device,
2948 .remove = ib_mad_remove_device
2951 static int __init ib_mad_init_module(void)
2955 spin_lock_init(&ib_mad_port_list_lock);
2957 ib_mad_cache = kmem_cache_create("ib_mad",
2958 sizeof(struct ib_mad_private),
2963 if (!ib_mad_cache) {
2964 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2969 INIT_LIST_HEAD(&ib_mad_port_list);
2971 if (ib_register_client(&mad_client)) {
2972 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2980 kmem_cache_destroy(ib_mad_cache);
2985 static void __exit ib_mad_cleanup_module(void)
2987 ib_unregister_client(&mad_client);
2989 if (kmem_cache_destroy(ib_mad_cache)) {
2990 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2994 module_init(ib_mad_init_module);
2995 module_exit(ib_mad_cleanup_module);