2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
35 #include <linux/dma-mapping.h>
41 MODULE_LICENSE("Dual BSD/GPL");
42 MODULE_DESCRIPTION("kernel IB MAD API");
43 MODULE_AUTHOR("Hal Rosenstock");
44 MODULE_AUTHOR("Sean Hefty");
47 kmem_cache_t *ib_mad_cache;
48 static struct list_head ib_mad_port_list;
49 static u32 ib_mad_client_id = 0;
52 static spinlock_t ib_mad_port_list_lock;
55 /* Forward declarations */
56 static int method_in_use(struct ib_mad_mgmt_method_table **method,
57 struct ib_mad_reg_req *mad_reg_req);
58 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59 static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv,
62 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad);
64 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67 static void timeout_sends(void *data);
68 static void local_completions(void *data);
69 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
70 struct ib_mad_agent_private *agent_priv,
72 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
73 struct ib_mad_agent_private *agent_priv);
76 * Returns a ib_mad_port_private structure or NULL for a device/port
77 * Assumes ib_mad_port_list_lock is being held
79 static inline struct ib_mad_port_private *
80 __ib_get_mad_port(struct ib_device *device, int port_num)
82 struct ib_mad_port_private *entry;
84 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
85 if (entry->device == device && entry->port_num == port_num)
92 * Wrapper function to return a ib_mad_port_private structure or NULL
95 static inline struct ib_mad_port_private *
96 ib_get_mad_port(struct ib_device *device, int port_num)
98 struct ib_mad_port_private *entry;
101 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
102 entry = __ib_get_mad_port(device, port_num);
103 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
108 static inline u8 convert_mgmt_class(u8 mgmt_class)
110 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
111 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
115 static int get_spl_qp_index(enum ib_qp_type qp_type)
128 static int vendor_class_index(u8 mgmt_class)
130 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
133 static int is_vendor_class(u8 mgmt_class)
135 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
136 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
141 static int is_vendor_oui(char *oui)
143 if (oui[0] || oui[1] || oui[2])
148 static int is_vendor_method_in_use(
149 struct ib_mad_mgmt_vendor_class *vendor_class,
150 struct ib_mad_reg_req *mad_reg_req)
152 struct ib_mad_mgmt_method_table *method;
155 for (i = 0; i < MAX_MGMT_OUI; i++) {
156 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
157 method = vendor_class->method_table[i];
159 if (method_in_use(&method, mad_reg_req))
170 * ib_register_mad_agent - Register to send/receive MADs
172 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
174 enum ib_qp_type qp_type,
175 struct ib_mad_reg_req *mad_reg_req,
177 ib_mad_send_handler send_handler,
178 ib_mad_recv_handler recv_handler,
181 struct ib_mad_port_private *port_priv;
182 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
183 struct ib_mad_agent_private *mad_agent_priv;
184 struct ib_mad_reg_req *reg_req = NULL;
185 struct ib_mad_mgmt_class_table *class;
186 struct ib_mad_mgmt_vendor_class_table *vendor;
187 struct ib_mad_mgmt_vendor_class *vendor_class;
188 struct ib_mad_mgmt_method_table *method;
191 u8 mgmt_class, vclass;
193 /* Validate parameters */
194 qpn = get_spl_qp_index(qp_type);
199 goto error1; /* XXX: until RMPP implemented */
201 /* Validate MAD registration request if supplied */
203 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
207 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
209 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
210 * one in this range currently allowed
212 if (mad_reg_req->mgmt_class !=
213 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
215 } else if (mad_reg_req->mgmt_class == 0) {
217 * Class 0 is reserved in IBA and is used for
218 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
221 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
223 * If class is in "new" vendor range,
224 * ensure supplied OUI is not zero
226 if (!is_vendor_oui(mad_reg_req->oui))
229 /* Make sure class supplied is consistent with QP type */
230 if (qp_type == IB_QPT_SMI) {
231 if ((mad_reg_req->mgmt_class !=
232 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
233 (mad_reg_req->mgmt_class !=
234 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
237 if ((mad_reg_req->mgmt_class ==
238 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
239 (mad_reg_req->mgmt_class ==
240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
244 /* No registration request supplied */
249 /* Validate device and port */
250 port_priv = ib_get_mad_port(device, port_num);
252 ret = ERR_PTR(-ENODEV);
256 /* Allocate structures */
257 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
258 if (!mad_agent_priv) {
259 ret = ERR_PTR(-ENOMEM);
262 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
264 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
265 IB_ACCESS_LOCAL_WRITE);
266 if (IS_ERR(mad_agent_priv->agent.mr)) {
267 ret = ERR_PTR(-ENOMEM);
272 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
274 ret = ERR_PTR(-ENOMEM);
277 /* Make a copy of the MAD registration request */
278 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
281 /* Now, fill in the various structures */
282 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
283 mad_agent_priv->reg_req = reg_req;
284 mad_agent_priv->rmpp_version = rmpp_version;
285 mad_agent_priv->agent.device = device;
286 mad_agent_priv->agent.recv_handler = recv_handler;
287 mad_agent_priv->agent.send_handler = send_handler;
288 mad_agent_priv->agent.context = context;
289 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
290 mad_agent_priv->agent.port_num = port_num;
292 spin_lock_irqsave(&port_priv->reg_lock, flags);
293 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
296 * Make sure MAD registration (if supplied)
297 * is non overlapping with any existing ones
300 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
301 if (!is_vendor_class(mgmt_class)) {
302 class = port_priv->version[mad_reg_req->
303 mgmt_class_version].class;
305 method = class->method_table[mgmt_class];
307 if (method_in_use(&method,
312 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
315 /* "New" vendor class range */
316 vendor = port_priv->version[mad_reg_req->
317 mgmt_class_version].vendor;
319 vclass = vendor_class_index(mgmt_class);
320 vendor_class = vendor->vendor_class[vclass];
322 if (is_vendor_method_in_use(
328 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
336 /* Add mad agent into port's agent list */
337 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
338 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
340 spin_lock_init(&mad_agent_priv->lock);
341 INIT_LIST_HEAD(&mad_agent_priv->send_list);
342 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
343 INIT_LIST_HEAD(&mad_agent_priv->done_list);
344 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
345 INIT_LIST_HEAD(&mad_agent_priv->local_list);
346 INIT_WORK(&mad_agent_priv->local_work, local_completions,
348 atomic_set(&mad_agent_priv->refcount, 1);
349 init_waitqueue_head(&mad_agent_priv->wait);
351 return &mad_agent_priv->agent;
354 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
357 kfree(mad_agent_priv);
359 ib_dereg_mr(mad_agent_priv->agent.mr);
363 EXPORT_SYMBOL(ib_register_mad_agent);
365 static inline int is_snooping_sends(int mad_snoop_flags)
367 return (mad_snoop_flags &
368 (/*IB_MAD_SNOOP_POSTED_SENDS |
369 IB_MAD_SNOOP_RMPP_SENDS |*/
370 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
371 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
374 static inline int is_snooping_recvs(int mad_snoop_flags)
376 return (mad_snoop_flags &
377 (IB_MAD_SNOOP_RECVS /*|
378 IB_MAD_SNOOP_RMPP_RECVS*/));
381 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
382 struct ib_mad_snoop_private *mad_snoop_priv)
384 struct ib_mad_snoop_private **new_snoop_table;
388 spin_lock_irqsave(&qp_info->snoop_lock, flags);
389 /* Check for empty slot in array. */
390 for (i = 0; i < qp_info->snoop_table_size; i++)
391 if (!qp_info->snoop_table[i])
394 if (i == qp_info->snoop_table_size) {
396 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
397 qp_info->snoop_table_size + 1,
399 if (!new_snoop_table) {
403 if (qp_info->snoop_table) {
404 memcpy(new_snoop_table, qp_info->snoop_table,
405 sizeof mad_snoop_priv *
406 qp_info->snoop_table_size);
407 kfree(qp_info->snoop_table);
409 qp_info->snoop_table = new_snoop_table;
410 qp_info->snoop_table_size++;
412 qp_info->snoop_table[i] = mad_snoop_priv;
413 atomic_inc(&qp_info->snoop_count);
415 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
419 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
421 enum ib_qp_type qp_type,
423 ib_mad_snoop_handler snoop_handler,
424 ib_mad_recv_handler recv_handler,
427 struct ib_mad_port_private *port_priv;
428 struct ib_mad_agent *ret;
429 struct ib_mad_snoop_private *mad_snoop_priv;
432 /* Validate parameters */
433 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
434 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
435 ret = ERR_PTR(-EINVAL);
438 qpn = get_spl_qp_index(qp_type);
440 ret = ERR_PTR(-EINVAL);
443 port_priv = ib_get_mad_port(device, port_num);
445 ret = ERR_PTR(-ENODEV);
448 /* Allocate structures */
449 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
450 if (!mad_snoop_priv) {
451 ret = ERR_PTR(-ENOMEM);
455 /* Now, fill in the various structures */
456 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
457 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
458 mad_snoop_priv->agent.device = device;
459 mad_snoop_priv->agent.recv_handler = recv_handler;
460 mad_snoop_priv->agent.snoop_handler = snoop_handler;
461 mad_snoop_priv->agent.context = context;
462 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
463 mad_snoop_priv->agent.port_num = port_num;
464 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
465 init_waitqueue_head(&mad_snoop_priv->wait);
466 mad_snoop_priv->snoop_index = register_snoop_agent(
467 &port_priv->qp_info[qpn],
469 if (mad_snoop_priv->snoop_index < 0) {
470 ret = ERR_PTR(mad_snoop_priv->snoop_index);
474 atomic_set(&mad_snoop_priv->refcount, 1);
475 return &mad_snoop_priv->agent;
478 kfree(mad_snoop_priv);
482 EXPORT_SYMBOL(ib_register_mad_snoop);
484 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
486 struct ib_mad_port_private *port_priv;
489 /* Note that we could still be handling received MADs */
492 * Canceling all sends results in dropping received response
493 * MADs, preventing us from queuing additional work
495 cancel_mads(mad_agent_priv);
496 port_priv = mad_agent_priv->qp_info->port_priv;
497 cancel_delayed_work(&mad_agent_priv->timed_work);
499 spin_lock_irqsave(&port_priv->reg_lock, flags);
500 remove_mad_reg_req(mad_agent_priv);
501 list_del(&mad_agent_priv->agent_list);
502 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
504 flush_workqueue(port_priv->wq);
506 atomic_dec(&mad_agent_priv->refcount);
507 wait_event(mad_agent_priv->wait,
508 !atomic_read(&mad_agent_priv->refcount));
510 if (mad_agent_priv->reg_req)
511 kfree(mad_agent_priv->reg_req);
512 ib_dereg_mr(mad_agent_priv->agent.mr);
513 kfree(mad_agent_priv);
516 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
518 struct ib_mad_qp_info *qp_info;
521 qp_info = mad_snoop_priv->qp_info;
522 spin_lock_irqsave(&qp_info->snoop_lock, flags);
523 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
524 atomic_dec(&qp_info->snoop_count);
525 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
527 atomic_dec(&mad_snoop_priv->refcount);
528 wait_event(mad_snoop_priv->wait,
529 !atomic_read(&mad_snoop_priv->refcount));
531 kfree(mad_snoop_priv);
535 * ib_unregister_mad_agent - Unregisters a client from using MAD services
537 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
539 struct ib_mad_agent_private *mad_agent_priv;
540 struct ib_mad_snoop_private *mad_snoop_priv;
542 /* If the TID is zero, the agent can only snoop. */
543 if (mad_agent->hi_tid) {
544 mad_agent_priv = container_of(mad_agent,
545 struct ib_mad_agent_private,
547 unregister_mad_agent(mad_agent_priv);
549 mad_snoop_priv = container_of(mad_agent,
550 struct ib_mad_snoop_private,
552 unregister_mad_snoop(mad_snoop_priv);
556 EXPORT_SYMBOL(ib_unregister_mad_agent);
558 static inline int response_mad(struct ib_mad *mad)
560 /* Trap represses are responses although response bit is reset */
561 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
562 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
565 static void dequeue_mad(struct ib_mad_list_head *mad_list)
567 struct ib_mad_queue *mad_queue;
570 BUG_ON(!mad_list->mad_queue);
571 mad_queue = mad_list->mad_queue;
572 spin_lock_irqsave(&mad_queue->lock, flags);
573 list_del(&mad_list->list);
575 spin_unlock_irqrestore(&mad_queue->lock, flags);
578 static void snoop_send(struct ib_mad_qp_info *qp_info,
579 struct ib_send_wr *send_wr,
580 struct ib_mad_send_wc *mad_send_wc,
583 struct ib_mad_snoop_private *mad_snoop_priv;
587 spin_lock_irqsave(&qp_info->snoop_lock, flags);
588 for (i = 0; i < qp_info->snoop_table_size; i++) {
589 mad_snoop_priv = qp_info->snoop_table[i];
590 if (!mad_snoop_priv ||
591 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
594 atomic_inc(&mad_snoop_priv->refcount);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
596 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
597 send_wr, mad_send_wc);
598 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
599 wake_up(&mad_snoop_priv->wait);
600 spin_lock_irqsave(&qp_info->snoop_lock, flags);
602 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605 static void snoop_recv(struct ib_mad_qp_info *qp_info,
606 struct ib_mad_recv_wc *mad_recv_wc,
609 struct ib_mad_snoop_private *mad_snoop_priv;
613 spin_lock_irqsave(&qp_info->snoop_lock, flags);
614 for (i = 0; i < qp_info->snoop_table_size; i++) {
615 mad_snoop_priv = qp_info->snoop_table[i];
616 if (!mad_snoop_priv ||
617 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
620 atomic_inc(&mad_snoop_priv->refcount);
621 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
622 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
624 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
625 wake_up(&mad_snoop_priv->wait);
626 spin_lock_irqsave(&qp_info->snoop_lock, flags);
628 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
631 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
634 memset(wc, 0, sizeof *wc);
636 wc->status = IB_WC_SUCCESS;
637 wc->opcode = IB_WC_RECV;
638 wc->pkey_index = pkey_index;
639 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
644 wc->dlid_path_bits = 0;
645 wc->port_num = port_num;
649 * Return 0 if SMP is to be sent
650 * Return 1 if SMP was consumed locally (whether or not solicited)
651 * Return < 0 if error
653 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
655 struct ib_send_wr *send_wr)
659 struct ib_mad_local_private *local;
660 struct ib_mad_private *mad_priv;
661 struct ib_mad_port_private *port_priv;
662 struct ib_mad_agent_private *recv_mad_agent = NULL;
663 struct ib_device *device = mad_agent_priv->agent.device;
664 u8 port_num = mad_agent_priv->agent.port_num;
667 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
669 printk(KERN_ERR PFX "Invalid directed route\n");
672 /* Check to post send on QP or process locally */
673 ret = smi_check_local_dr_smp(smp, device, port_num);
674 if (!ret || !device->process_mad)
677 local = kmalloc(sizeof *local, GFP_ATOMIC);
680 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
683 local->mad_priv = NULL;
684 local->recv_mad_agent = NULL;
685 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
688 printk(KERN_ERR PFX "No memory for local response MAD\n");
693 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
694 send_wr->wr.ud.port_num, &mad_wc);
696 /* No GRH for DR SMP */
697 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
698 (struct ib_mad *)smp,
699 (struct ib_mad *)&mad_priv->mad);
702 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
703 if (response_mad(&mad_priv->mad.mad) &&
704 mad_agent_priv->agent.recv_handler) {
705 local->mad_priv = mad_priv;
706 local->recv_mad_agent = mad_agent_priv;
708 * Reference MAD agent until receive
709 * side of local completion handled
711 atomic_inc(&mad_agent_priv->refcount);
713 kmem_cache_free(ib_mad_cache, mad_priv);
715 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
716 kmem_cache_free(ib_mad_cache, mad_priv);
718 case IB_MAD_RESULT_SUCCESS:
719 /* Treat like an incoming receive MAD */
720 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
721 mad_agent_priv->agent.port_num);
723 mad_priv->mad.mad.mad_hdr.tid =
724 ((struct ib_mad *)smp)->mad_hdr.tid;
725 recv_mad_agent = find_mad_agent(port_priv,
728 if (!port_priv || !recv_mad_agent) {
729 kmem_cache_free(ib_mad_cache, mad_priv);
734 local->mad_priv = mad_priv;
735 local->recv_mad_agent = recv_mad_agent;
738 kmem_cache_free(ib_mad_cache, mad_priv);
744 local->send_wr = *send_wr;
745 local->send_wr.sg_list = local->sg_list;
746 memcpy(local->sg_list, send_wr->sg_list,
747 sizeof *send_wr->sg_list * send_wr->num_sge);
748 local->send_wr.next = NULL;
749 local->tid = send_wr->wr.ud.mad_hdr->tid;
750 local->wr_id = send_wr->wr_id;
751 /* Reference MAD agent until send side of local completion handled */
752 atomic_inc(&mad_agent_priv->refcount);
753 /* Queue local completion to local list */
754 spin_lock_irqsave(&mad_agent_priv->lock, flags);
755 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
756 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
757 queue_work(mad_agent_priv->qp_info->port_priv->wq,
758 &mad_agent_priv->local_work);
764 static int get_buf_length(int hdr_len, int data_len)
768 seg_size = sizeof(struct ib_mad) - hdr_len;
769 if (data_len && seg_size) {
770 pad = seg_size - data_len % seg_size;
775 return hdr_len + data_len + pad;
778 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
779 u32 remote_qpn, u16 pkey_index,
781 int hdr_len, int data_len,
782 unsigned int __nocast gfp_mask)
784 struct ib_mad_agent_private *mad_agent_priv;
785 struct ib_mad_send_buf *send_buf;
789 mad_agent_priv = container_of(mad_agent,
790 struct ib_mad_agent_private, agent);
791 buf_size = get_buf_length(hdr_len, data_len);
793 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
795 return ERR_PTR(-ENOMEM);
796 memset(buf, 0, sizeof *send_buf + buf_size);
798 send_buf = buf + buf_size;
801 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
802 buf, buf_size, DMA_TO_DEVICE);
803 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
804 send_buf->sge.length = buf_size;
805 send_buf->sge.lkey = mad_agent->mr->lkey;
807 send_buf->send_wr.wr_id = (unsigned long) send_buf;
808 send_buf->send_wr.sg_list = &send_buf->sge;
809 send_buf->send_wr.num_sge = 1;
810 send_buf->send_wr.opcode = IB_WR_SEND;
811 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
812 send_buf->send_wr.wr.ud.ah = ah;
813 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
814 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
815 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
816 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
817 send_buf->mad_agent = mad_agent;
818 atomic_inc(&mad_agent_priv->refcount);
821 EXPORT_SYMBOL(ib_create_send_mad);
823 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
825 struct ib_mad_agent_private *mad_agent_priv;
827 mad_agent_priv = container_of(send_buf->mad_agent,
828 struct ib_mad_agent_private, agent);
830 dma_unmap_single(send_buf->mad_agent->device->dma_device,
831 pci_unmap_addr(send_buf, mapping),
832 send_buf->sge.length, DMA_TO_DEVICE);
833 kfree(send_buf->mad);
835 if (atomic_dec_and_test(&mad_agent_priv->refcount))
836 wake_up(&mad_agent_priv->wait);
838 EXPORT_SYMBOL(ib_free_send_mad);
840 static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
842 struct ib_mad_qp_info *qp_info;
843 struct ib_send_wr *bad_send_wr;
847 /* Set WR ID to find mad_send_wr upon completion */
848 qp_info = mad_send_wr->mad_agent_priv->qp_info;
849 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
850 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
852 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
853 if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {
854 list_add_tail(&mad_send_wr->mad_list.list,
855 &qp_info->send_queue.list);
856 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
857 ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
858 &mad_send_wr->send_wr, &bad_send_wr);
860 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
861 dequeue_mad(&mad_send_wr->mad_list);
864 list_add_tail(&mad_send_wr->mad_list.list,
865 &qp_info->overflow_list);
866 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
873 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
874 * with the registered client
876 int ib_post_send_mad(struct ib_mad_agent *mad_agent,
877 struct ib_send_wr *send_wr,
878 struct ib_send_wr **bad_send_wr)
881 struct ib_mad_agent_private *mad_agent_priv;
883 /* Validate supplied parameters */
887 if (!mad_agent || !send_wr)
890 if (!mad_agent->send_handler)
893 mad_agent_priv = container_of(mad_agent,
894 struct ib_mad_agent_private,
897 /* Walk list of send WRs and post each on send list */
900 struct ib_send_wr *next_send_wr;
901 struct ib_mad_send_wr_private *mad_send_wr;
904 /* Validate more parameters */
905 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
908 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)
911 if (!send_wr->wr.ud.mad_hdr) {
912 printk(KERN_ERR PFX "MAD header must be supplied "
913 "in WR %p\n", send_wr);
918 * Save pointer to next work request to post in case the
919 * current one completes, and the user modifies the work
920 * request associated with the completion
922 next_send_wr = (struct ib_send_wr *)send_wr->next;
924 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
925 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
926 ret = handle_outgoing_dr_smp(mad_agent_priv, smp,
928 if (ret < 0) /* error */
930 else if (ret == 1) /* locally consumed */
934 /* Allocate MAD send WR tracking structure */
935 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
937 printk(KERN_ERR PFX "No memory for "
938 "ib_mad_send_wr_private\n");
943 mad_send_wr->send_wr = *send_wr;
944 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
945 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
946 sizeof *send_wr->sg_list * send_wr->num_sge);
947 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
948 mad_send_wr->send_wr.next = NULL;
949 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
950 mad_send_wr->mad_agent_priv = mad_agent_priv;
951 /* Timeout will be updated after send completes */
952 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
954 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
955 /* One reference for each work request to QP + response */
956 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
957 mad_send_wr->status = IB_WC_SUCCESS;
959 /* Reference MAD agent until send completes */
960 atomic_inc(&mad_agent_priv->refcount);
961 spin_lock_irqsave(&mad_agent_priv->lock, flags);
962 list_add_tail(&mad_send_wr->agent_list,
963 &mad_agent_priv->send_list);
964 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
966 ret = ib_send_mad(mad_send_wr);
968 /* Fail send request */
969 spin_lock_irqsave(&mad_agent_priv->lock, flags);
970 list_del(&mad_send_wr->agent_list);
971 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
972 atomic_dec(&mad_agent_priv->refcount);
976 send_wr = next_send_wr;
981 *bad_send_wr = send_wr;
985 EXPORT_SYMBOL(ib_post_send_mad);
988 * ib_free_recv_mad - Returns data buffers used to receive
989 * a MAD to the access layer
991 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
993 struct ib_mad_recv_buf *entry;
994 struct ib_mad_private_header *mad_priv_hdr;
995 struct ib_mad_private *priv;
997 mad_priv_hdr = container_of(mad_recv_wc,
998 struct ib_mad_private_header,
1000 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1003 * Walk receive buffer list associated with this WC
1004 * No need to remove them from list of receive buffers
1006 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
1007 /* Free previous receive buffer */
1008 kmem_cache_free(ib_mad_cache, priv);
1009 mad_priv_hdr = container_of(mad_recv_wc,
1010 struct ib_mad_private_header,
1012 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1016 /* Free last buffer */
1017 kmem_cache_free(ib_mad_cache, priv);
1019 EXPORT_SYMBOL(ib_free_recv_mad);
1021 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1023 ib_mad_send_handler send_handler,
1024 ib_mad_recv_handler recv_handler,
1027 return ERR_PTR(-EINVAL); /* XXX: for now */
1029 EXPORT_SYMBOL(ib_redirect_mad_qp);
1031 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1034 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1037 EXPORT_SYMBOL(ib_process_mad_wc);
1039 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1040 struct ib_mad_reg_req *mad_reg_req)
1044 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1045 i < IB_MGMT_MAX_METHODS;
1046 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1048 if ((*method)->agent[i]) {
1049 printk(KERN_ERR PFX "Method %d already in use\n", i);
1056 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1058 /* Allocate management method table */
1059 *method = kmalloc(sizeof **method, GFP_ATOMIC);
1061 printk(KERN_ERR PFX "No memory for "
1062 "ib_mad_mgmt_method_table\n");
1065 /* Clear management method table */
1066 memset(*method, 0, sizeof **method);
1072 * Check to see if there are any methods still in use
1074 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1078 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1079 if (method->agent[i])
1085 * Check to see if there are any method tables for this class still in use
1087 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1091 for (i = 0; i < MAX_MGMT_CLASS; i++)
1092 if (class->method_table[i])
1097 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1101 for (i = 0; i < MAX_MGMT_OUI; i++)
1102 if (vendor_class->method_table[i])
1107 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1112 for (i = 0; i < MAX_MGMT_OUI; i++)
1113 /* Is there matching OUI for this vendor class ? */
1114 if (!memcmp(vendor_class->oui[i], oui, 3))
1120 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1124 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1125 if (vendor->vendor_class[i])
1131 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1132 struct ib_mad_agent_private *agent)
1136 /* Remove any methods for this mad agent */
1137 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1138 if (method->agent[i] == agent) {
1139 method->agent[i] = NULL;
1144 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1145 struct ib_mad_agent_private *agent_priv,
1148 struct ib_mad_port_private *port_priv;
1149 struct ib_mad_mgmt_class_table **class;
1150 struct ib_mad_mgmt_method_table **method;
1153 port_priv = agent_priv->qp_info->port_priv;
1154 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1156 /* Allocate management class table for "new" class version */
1157 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1159 printk(KERN_ERR PFX "No memory for "
1160 "ib_mad_mgmt_class_table\n");
1164 /* Clear management class table */
1165 memset(*class, 0, sizeof(**class));
1166 /* Allocate method table for this management class */
1167 method = &(*class)->method_table[mgmt_class];
1168 if ((ret = allocate_method_table(method)))
1171 method = &(*class)->method_table[mgmt_class];
1173 /* Allocate method table for this management class */
1174 if ((ret = allocate_method_table(method)))
1179 /* Now, make sure methods are not already in use */
1180 if (method_in_use(method, mad_reg_req))
1183 /* Finally, add in methods being registered */
1184 for (i = find_first_bit(mad_reg_req->method_mask,
1185 IB_MGMT_MAX_METHODS);
1186 i < IB_MGMT_MAX_METHODS;
1187 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1189 (*method)->agent[i] = agent_priv;
1194 /* Remove any methods for this mad agent */
1195 remove_methods_mad_agent(*method, agent_priv);
1196 /* Now, check to see if there are any methods in use */
1197 if (!check_method_table(*method)) {
1198 /* If not, release management method table */
1211 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1212 struct ib_mad_agent_private *agent_priv)
1214 struct ib_mad_port_private *port_priv;
1215 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1216 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1217 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1218 struct ib_mad_mgmt_method_table **method;
1219 int i, ret = -ENOMEM;
1222 /* "New" vendor (with OUI) class */
1223 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1224 port_priv = agent_priv->qp_info->port_priv;
1225 vendor_table = &port_priv->version[
1226 mad_reg_req->mgmt_class_version].vendor;
1227 if (!*vendor_table) {
1228 /* Allocate mgmt vendor class table for "new" class version */
1229 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1231 printk(KERN_ERR PFX "No memory for "
1232 "ib_mad_mgmt_vendor_class_table\n");
1235 /* Clear management vendor class table */
1236 memset(vendor, 0, sizeof(*vendor));
1237 *vendor_table = vendor;
1239 if (!(*vendor_table)->vendor_class[vclass]) {
1240 /* Allocate table for this management vendor class */
1241 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1242 if (!vendor_class) {
1243 printk(KERN_ERR PFX "No memory for "
1244 "ib_mad_mgmt_vendor_class\n");
1247 memset(vendor_class, 0, sizeof(*vendor_class));
1248 (*vendor_table)->vendor_class[vclass] = vendor_class;
1250 for (i = 0; i < MAX_MGMT_OUI; i++) {
1251 /* Is there matching OUI for this vendor class ? */
1252 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1253 mad_reg_req->oui, 3)) {
1254 method = &(*vendor_table)->vendor_class[
1255 vclass]->method_table[i];
1260 for (i = 0; i < MAX_MGMT_OUI; i++) {
1261 /* OUI slot available ? */
1262 if (!is_vendor_oui((*vendor_table)->vendor_class[
1264 method = &(*vendor_table)->vendor_class[
1265 vclass]->method_table[i];
1267 /* Allocate method table for this OUI */
1268 if ((ret = allocate_method_table(method)))
1270 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1271 mad_reg_req->oui, 3);
1275 printk(KERN_ERR PFX "All OUI slots in use\n");
1279 /* Now, make sure methods are not already in use */
1280 if (method_in_use(method, mad_reg_req))
1283 /* Finally, add in methods being registered */
1284 for (i = find_first_bit(mad_reg_req->method_mask,
1285 IB_MGMT_MAX_METHODS);
1286 i < IB_MGMT_MAX_METHODS;
1287 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1289 (*method)->agent[i] = agent_priv;
1294 /* Remove any methods for this mad agent */
1295 remove_methods_mad_agent(*method, agent_priv);
1296 /* Now, check to see if there are any methods in use */
1297 if (!check_method_table(*method)) {
1298 /* If not, release management method table */
1305 (*vendor_table)->vendor_class[vclass] = NULL;
1306 kfree(vendor_class);
1310 *vendor_table = NULL;
1317 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1319 struct ib_mad_port_private *port_priv;
1320 struct ib_mad_mgmt_class_table *class;
1321 struct ib_mad_mgmt_method_table *method;
1322 struct ib_mad_mgmt_vendor_class_table *vendor;
1323 struct ib_mad_mgmt_vendor_class *vendor_class;
1328 * Was MAD registration request supplied
1329 * with original registration ?
1331 if (!agent_priv->reg_req) {
1335 port_priv = agent_priv->qp_info->port_priv;
1336 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1337 class = port_priv->version[
1338 agent_priv->reg_req->mgmt_class_version].class;
1342 method = class->method_table[mgmt_class];
1344 /* Remove any methods for this mad agent */
1345 remove_methods_mad_agent(method, agent_priv);
1346 /* Now, check to see if there are any methods still in use */
1347 if (!check_method_table(method)) {
1348 /* If not, release management method table */
1350 class->method_table[mgmt_class] = NULL;
1351 /* Any management classes left ? */
1352 if (!check_class_table(class)) {
1353 /* If not, release management class table */
1356 agent_priv->reg_req->
1357 mgmt_class_version].class = NULL;
1363 if (!is_vendor_class(mgmt_class))
1366 /* normalize mgmt_class to vendor range 2 */
1367 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1368 vendor = port_priv->version[
1369 agent_priv->reg_req->mgmt_class_version].vendor;
1374 vendor_class = vendor->vendor_class[mgmt_class];
1376 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1379 method = vendor_class->method_table[index];
1381 /* Remove any methods for this mad agent */
1382 remove_methods_mad_agent(method, agent_priv);
1384 * Now, check to see if there are
1385 * any methods still in use
1387 if (!check_method_table(method)) {
1388 /* If not, release management method table */
1390 vendor_class->method_table[index] = NULL;
1391 memset(vendor_class->oui[index], 0, 3);
1392 /* Any OUIs left ? */
1393 if (!check_vendor_class(vendor_class)) {
1394 /* If not, release vendor class table */
1395 kfree(vendor_class);
1396 vendor->vendor_class[mgmt_class] = NULL;
1397 /* Any other vendor classes left ? */
1398 if (!check_vendor_table(vendor)) {
1401 agent_priv->reg_req->
1402 mgmt_class_version].
1414 static struct ib_mad_agent_private *
1415 find_mad_agent(struct ib_mad_port_private *port_priv,
1418 struct ib_mad_agent_private *mad_agent = NULL;
1419 unsigned long flags;
1421 spin_lock_irqsave(&port_priv->reg_lock, flags);
1422 if (response_mad(mad)) {
1424 struct ib_mad_agent_private *entry;
1427 * Routing is based on high 32 bits of transaction ID
1430 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1431 list_for_each_entry(entry, &port_priv->agent_list,
1433 if (entry->agent.hi_tid == hi_tid) {
1439 struct ib_mad_mgmt_class_table *class;
1440 struct ib_mad_mgmt_method_table *method;
1441 struct ib_mad_mgmt_vendor_class_table *vendor;
1442 struct ib_mad_mgmt_vendor_class *vendor_class;
1443 struct ib_vendor_mad *vendor_mad;
1447 * Routing is based on version, class, and method
1448 * For "newer" vendor MADs, also based on OUI
1450 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1452 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1453 class = port_priv->version[
1454 mad->mad_hdr.class_version].class;
1457 method = class->method_table[convert_mgmt_class(
1458 mad->mad_hdr.mgmt_class)];
1460 mad_agent = method->agent[mad->mad_hdr.method &
1461 ~IB_MGMT_METHOD_RESP];
1463 vendor = port_priv->version[
1464 mad->mad_hdr.class_version].vendor;
1467 vendor_class = vendor->vendor_class[vendor_class_index(
1468 mad->mad_hdr.mgmt_class)];
1471 /* Find matching OUI */
1472 vendor_mad = (struct ib_vendor_mad *)mad;
1473 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1476 method = vendor_class->method_table[index];
1478 mad_agent = method->agent[mad->mad_hdr.method &
1479 ~IB_MGMT_METHOD_RESP];
1485 if (mad_agent->agent.recv_handler)
1486 atomic_inc(&mad_agent->refcount);
1488 printk(KERN_NOTICE PFX "No receive handler for client "
1490 &mad_agent->agent, port_priv->port_num);
1495 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1500 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1504 /* Make sure MAD base version is understood */
1505 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1506 printk(KERN_ERR PFX "MAD received with unsupported base "
1507 "version %d\n", mad->mad_hdr.base_version);
1511 /* Filter SMI packets sent to other than QP0 */
1512 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1513 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1517 /* Filter GSI packets sent to QP0 */
1526 static struct ib_mad_send_wr_private*
1527 find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1530 struct ib_mad_send_wr_private *mad_send_wr;
1532 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1534 if (mad_send_wr->tid == tid)
1539 * It's possible to receive the response before we've
1540 * been notified that the send has completed
1542 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1544 if (mad_send_wr->tid == tid && mad_send_wr->timeout) {
1545 /* Verify request has not been canceled */
1546 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1553 static void ib_mark_req_done(struct ib_mad_send_wr_private *mad_send_wr)
1555 mad_send_wr->timeout = 0;
1556 if (mad_send_wr->refcount == 1) {
1557 list_del(&mad_send_wr->agent_list);
1558 list_add_tail(&mad_send_wr->agent_list,
1559 &mad_send_wr->mad_agent_priv->done_list);
1563 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1564 struct ib_mad_recv_wc *mad_recv_wc)
1566 struct ib_mad_send_wr_private *mad_send_wr;
1567 struct ib_mad_send_wc mad_send_wc;
1568 unsigned long flags;
1571 INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list);
1572 /* Complete corresponding request */
1573 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1574 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1575 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1576 mad_send_wr = find_send_req(mad_agent_priv, tid);
1578 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1579 ib_free_recv_mad(mad_recv_wc);
1580 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1581 wake_up(&mad_agent_priv->wait);
1584 ib_mark_req_done(mad_send_wr);
1585 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1587 /* Defined behavior is to complete response before request */
1588 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
1589 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1591 atomic_dec(&mad_agent_priv->refcount);
1593 mad_send_wc.status = IB_WC_SUCCESS;
1594 mad_send_wc.vendor_err = 0;
1595 mad_send_wc.wr_id = mad_send_wr->wr_id;
1596 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1598 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1600 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1601 wake_up(&mad_agent_priv->wait);
1605 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1608 struct ib_mad_qp_info *qp_info;
1609 struct ib_mad_private_header *mad_priv_hdr;
1610 struct ib_mad_private *recv, *response;
1611 struct ib_mad_list_head *mad_list;
1612 struct ib_mad_agent_private *mad_agent;
1614 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1616 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1617 "for response buffer\n");
1619 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1620 qp_info = mad_list->mad_queue->qp_info;
1621 dequeue_mad(mad_list);
1623 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1625 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1626 dma_unmap_single(port_priv->device->dma_device,
1627 pci_unmap_addr(&recv->header, mapping),
1628 sizeof(struct ib_mad_private) -
1629 sizeof(struct ib_mad_private_header),
1632 /* Setup MAD receive work completion from "normal" work completion */
1633 recv->header.wc = *wc;
1634 recv->header.recv_wc.wc = &recv->header.wc;
1635 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1636 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1637 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1639 if (atomic_read(&qp_info->snoop_count))
1640 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1643 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1646 if (recv->mad.mad.mad_hdr.mgmt_class ==
1647 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1648 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1649 port_priv->device->node_type,
1650 port_priv->port_num,
1651 port_priv->device->phys_port_cnt))
1653 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1655 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1656 port_priv->device->node_type,
1657 port_priv->port_num))
1659 if (!smi_check_local_dr_smp(&recv->mad.smp,
1661 port_priv->port_num))
1666 /* Give driver "right of first refusal" on incoming MAD */
1667 if (port_priv->device->process_mad) {
1671 printk(KERN_ERR PFX "No memory for response MAD\n");
1673 * Is it better to assume that
1674 * it wouldn't be processed ?
1679 ret = port_priv->device->process_mad(port_priv->device, 0,
1680 port_priv->port_num,
1683 &response->mad.mad);
1684 if (ret & IB_MAD_RESULT_SUCCESS) {
1685 if (ret & IB_MAD_RESULT_CONSUMED)
1687 if (ret & IB_MAD_RESULT_REPLY) {
1689 if (!agent_send(response, &recv->grh, wc,
1691 port_priv->port_num))
1698 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1700 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1702 * recv is freed up in error cases in ib_mad_complete_recv
1703 * or via recv_handler in ib_mad_complete_recv()
1709 /* Post another receive request for this QP */
1711 ib_mad_post_receive_mads(qp_info, response);
1713 kmem_cache_free(ib_mad_cache, recv);
1715 ib_mad_post_receive_mads(qp_info, recv);
1718 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1720 struct ib_mad_send_wr_private *mad_send_wr;
1721 unsigned long delay;
1723 if (list_empty(&mad_agent_priv->wait_list)) {
1724 cancel_delayed_work(&mad_agent_priv->timed_work);
1726 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1727 struct ib_mad_send_wr_private,
1730 if (time_after(mad_agent_priv->timeout,
1731 mad_send_wr->timeout)) {
1732 mad_agent_priv->timeout = mad_send_wr->timeout;
1733 cancel_delayed_work(&mad_agent_priv->timed_work);
1734 delay = mad_send_wr->timeout - jiffies;
1735 if ((long)delay <= 0)
1737 queue_delayed_work(mad_agent_priv->qp_info->
1739 &mad_agent_priv->timed_work, delay);
1744 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1746 struct ib_mad_agent_private *mad_agent_priv;
1747 struct ib_mad_send_wr_private *temp_mad_send_wr;
1748 struct list_head *list_item;
1749 unsigned long delay;
1751 mad_agent_priv = mad_send_wr->mad_agent_priv;
1752 list_del(&mad_send_wr->agent_list);
1754 delay = mad_send_wr->timeout;
1755 mad_send_wr->timeout += jiffies;
1758 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1759 temp_mad_send_wr = list_entry(list_item,
1760 struct ib_mad_send_wr_private,
1762 if (time_after(mad_send_wr->timeout,
1763 temp_mad_send_wr->timeout))
1768 list_item = &mad_agent_priv->wait_list;
1769 list_add(&mad_send_wr->agent_list, list_item);
1771 /* Reschedule a work item if we have a shorter timeout */
1772 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1773 cancel_delayed_work(&mad_agent_priv->timed_work);
1774 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1775 &mad_agent_priv->timed_work, delay);
1779 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1782 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1783 wait_for_response(mad_send_wr);
1787 * Process a send work completion
1789 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1790 struct ib_mad_send_wc *mad_send_wc)
1792 struct ib_mad_agent_private *mad_agent_priv;
1793 unsigned long flags;
1795 mad_agent_priv = mad_send_wr->mad_agent_priv;
1796 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1797 if (mad_send_wc->status != IB_WC_SUCCESS &&
1798 mad_send_wr->status == IB_WC_SUCCESS) {
1799 mad_send_wr->status = mad_send_wc->status;
1800 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1803 if (--mad_send_wr->refcount > 0) {
1804 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1805 mad_send_wr->status == IB_WC_SUCCESS) {
1806 wait_for_response(mad_send_wr);
1808 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1812 /* Remove send from MAD agent and notify client of completion */
1813 list_del(&mad_send_wr->agent_list);
1814 adjust_timeout(mad_agent_priv);
1815 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1817 if (mad_send_wr->status != IB_WC_SUCCESS )
1818 mad_send_wc->status = mad_send_wr->status;
1819 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1822 /* Release reference on agent taken when sending */
1823 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1824 wake_up(&mad_agent_priv->wait);
1829 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1832 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1833 struct ib_mad_list_head *mad_list;
1834 struct ib_mad_qp_info *qp_info;
1835 struct ib_mad_queue *send_queue;
1836 struct ib_send_wr *bad_send_wr;
1837 unsigned long flags;
1840 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1841 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1843 send_queue = mad_list->mad_queue;
1844 qp_info = send_queue->qp_info;
1847 queued_send_wr = NULL;
1848 spin_lock_irqsave(&send_queue->lock, flags);
1849 list_del(&mad_list->list);
1851 /* Move queued send to the send queue */
1852 if (send_queue->count-- > send_queue->max_active) {
1853 mad_list = container_of(qp_info->overflow_list.next,
1854 struct ib_mad_list_head, list);
1855 queued_send_wr = container_of(mad_list,
1856 struct ib_mad_send_wr_private,
1858 list_del(&mad_list->list);
1859 list_add_tail(&mad_list->list, &send_queue->list);
1861 spin_unlock_irqrestore(&send_queue->lock, flags);
1863 /* Restore client wr_id in WC and complete send */
1864 wc->wr_id = mad_send_wr->wr_id;
1865 if (atomic_read(&qp_info->snoop_count))
1866 snoop_send(qp_info, &mad_send_wr->send_wr,
1867 (struct ib_mad_send_wc *)wc,
1868 IB_MAD_SNOOP_SEND_COMPLETIONS);
1869 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
1871 if (queued_send_wr) {
1872 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1875 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1876 mad_send_wr = queued_send_wr;
1877 wc->status = IB_WC_LOC_QP_OP_ERR;
1883 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1885 struct ib_mad_send_wr_private *mad_send_wr;
1886 struct ib_mad_list_head *mad_list;
1887 unsigned long flags;
1889 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1890 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1891 mad_send_wr = container_of(mad_list,
1892 struct ib_mad_send_wr_private,
1894 mad_send_wr->retry = 1;
1896 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1899 static void mad_error_handler(struct ib_mad_port_private *port_priv,
1902 struct ib_mad_list_head *mad_list;
1903 struct ib_mad_qp_info *qp_info;
1904 struct ib_mad_send_wr_private *mad_send_wr;
1907 /* Determine if failure was a send or receive */
1908 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1909 qp_info = mad_list->mad_queue->qp_info;
1910 if (mad_list->mad_queue == &qp_info->recv_queue)
1912 * Receive errors indicate that the QP has entered the error
1913 * state - error handling/shutdown code will cleanup
1918 * Send errors will transition the QP to SQE - move
1919 * QP to RTS and repost flushed work requests
1921 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1923 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1924 if (mad_send_wr->retry) {
1926 struct ib_send_wr *bad_send_wr;
1928 mad_send_wr->retry = 0;
1929 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1932 ib_mad_send_done_handler(port_priv, wc);
1934 ib_mad_send_done_handler(port_priv, wc);
1936 struct ib_qp_attr *attr;
1938 /* Transition QP to RTS and fail offending send */
1939 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1941 attr->qp_state = IB_QPS_RTS;
1942 attr->cur_qp_state = IB_QPS_SQE;
1943 ret = ib_modify_qp(qp_info->qp, attr,
1944 IB_QP_STATE | IB_QP_CUR_STATE);
1947 printk(KERN_ERR PFX "mad_error_handler - "
1948 "ib_modify_qp to RTS : %d\n", ret);
1950 mark_sends_for_retry(qp_info);
1952 ib_mad_send_done_handler(port_priv, wc);
1957 * IB MAD completion callback
1959 static void ib_mad_completion_handler(void *data)
1961 struct ib_mad_port_private *port_priv;
1964 port_priv = (struct ib_mad_port_private *)data;
1965 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1967 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1968 if (wc.status == IB_WC_SUCCESS) {
1969 switch (wc.opcode) {
1971 ib_mad_send_done_handler(port_priv, &wc);
1974 ib_mad_recv_done_handler(port_priv, &wc);
1981 mad_error_handler(port_priv, &wc);
1985 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1987 unsigned long flags;
1988 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
1989 struct ib_mad_send_wc mad_send_wc;
1990 struct list_head cancel_list;
1992 INIT_LIST_HEAD(&cancel_list);
1994 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1995 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
1996 &mad_agent_priv->send_list, agent_list) {
1997 if (mad_send_wr->status == IB_WC_SUCCESS) {
1998 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1999 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2003 /* Empty wait list to prevent receives from finding a request */
2004 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2005 /* Empty local completion list as well */
2006 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2007 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2009 /* Report all cancelled requests */
2010 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2011 mad_send_wc.vendor_err = 0;
2013 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2014 &cancel_list, agent_list) {
2015 mad_send_wc.wr_id = mad_send_wr->wr_id;
2016 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2019 list_del(&mad_send_wr->agent_list);
2021 atomic_dec(&mad_agent_priv->refcount);
2025 static struct ib_mad_send_wr_private*
2026 find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
2029 struct ib_mad_send_wr_private *mad_send_wr;
2031 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2033 if (mad_send_wr->wr_id == wr_id)
2037 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2039 if (mad_send_wr->wr_id == wr_id)
2045 int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
2047 struct ib_mad_agent_private *mad_agent_priv;
2048 struct ib_mad_send_wr_private *mad_send_wr;
2049 unsigned long flags;
2051 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2053 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2054 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2055 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2056 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2061 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2062 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2065 mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
2066 if (!mad_send_wr->timeout || mad_send_wr->refcount > 1)
2067 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2069 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2071 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2074 EXPORT_SYMBOL(ib_modify_mad);
2076 void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id)
2078 ib_modify_mad(mad_agent, wr_id, 0);
2080 EXPORT_SYMBOL(ib_cancel_mad);
2082 static void local_completions(void *data)
2084 struct ib_mad_agent_private *mad_agent_priv;
2085 struct ib_mad_local_private *local;
2086 struct ib_mad_agent_private *recv_mad_agent;
2087 unsigned long flags;
2090 struct ib_mad_send_wc mad_send_wc;
2092 mad_agent_priv = (struct ib_mad_agent_private *)data;
2094 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2095 while (!list_empty(&mad_agent_priv->local_list)) {
2096 local = list_entry(mad_agent_priv->local_list.next,
2097 struct ib_mad_local_private,
2099 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2100 if (local->mad_priv) {
2101 recv_mad_agent = local->recv_mad_agent;
2102 if (!recv_mad_agent) {
2103 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2104 goto local_send_completion;
2109 * Defined behavior is to complete response
2112 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
2114 recv_mad_agent->agent.port_num, &wc);
2116 local->mad_priv->header.recv_wc.wc = &wc;
2117 local->mad_priv->header.recv_wc.mad_len =
2118 sizeof(struct ib_mad);
2119 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list);
2120 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2121 local->mad_priv->header.recv_wc.recv_buf.mad =
2122 &local->mad_priv->mad.mad;
2123 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2124 snoop_recv(recv_mad_agent->qp_info,
2125 &local->mad_priv->header.recv_wc,
2126 IB_MAD_SNOOP_RECVS);
2127 recv_mad_agent->agent.recv_handler(
2128 &recv_mad_agent->agent,
2129 &local->mad_priv->header.recv_wc);
2130 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2131 atomic_dec(&recv_mad_agent->refcount);
2132 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2135 local_send_completion:
2137 mad_send_wc.status = IB_WC_SUCCESS;
2138 mad_send_wc.vendor_err = 0;
2139 mad_send_wc.wr_id = local->wr_id;
2140 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2141 snoop_send(mad_agent_priv->qp_info, &local->send_wr,
2143 IB_MAD_SNOOP_SEND_COMPLETIONS);
2144 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2147 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2148 list_del(&local->completion_list);
2149 atomic_dec(&mad_agent_priv->refcount);
2151 kmem_cache_free(ib_mad_cache, local->mad_priv);
2154 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2157 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2161 if (!mad_send_wr->retries--)
2164 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr.
2167 ret = ib_send_mad(mad_send_wr);
2170 mad_send_wr->refcount++;
2171 list_add_tail(&mad_send_wr->agent_list,
2172 &mad_send_wr->mad_agent_priv->send_list);
2177 static void timeout_sends(void *data)
2179 struct ib_mad_agent_private *mad_agent_priv;
2180 struct ib_mad_send_wr_private *mad_send_wr;
2181 struct ib_mad_send_wc mad_send_wc;
2182 unsigned long flags, delay;
2184 mad_agent_priv = (struct ib_mad_agent_private *)data;
2185 mad_send_wc.vendor_err = 0;
2187 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2188 while (!list_empty(&mad_agent_priv->wait_list)) {
2189 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2190 struct ib_mad_send_wr_private,
2193 if (time_after(mad_send_wr->timeout, jiffies)) {
2194 delay = mad_send_wr->timeout - jiffies;
2195 if ((long)delay <= 0)
2197 queue_delayed_work(mad_agent_priv->qp_info->
2199 &mad_agent_priv->timed_work, delay);
2203 list_del(&mad_send_wr->agent_list);
2204 if (mad_send_wr->status == IB_WC_SUCCESS &&
2205 !retry_send(mad_send_wr))
2208 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2210 if (mad_send_wr->status == IB_WC_SUCCESS)
2211 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2213 mad_send_wc.status = mad_send_wr->status;
2214 mad_send_wc.wr_id = mad_send_wr->wr_id;
2215 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2219 atomic_dec(&mad_agent_priv->refcount);
2220 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2222 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2225 static void ib_mad_thread_completion_handler(struct ib_cq *cq)
2227 struct ib_mad_port_private *port_priv = cq->cq_context;
2229 queue_work(port_priv->wq, &port_priv->work);
2233 * Allocate receive MADs and post receive WRs for them
2235 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2236 struct ib_mad_private *mad)
2238 unsigned long flags;
2240 struct ib_mad_private *mad_priv;
2241 struct ib_sge sg_list;
2242 struct ib_recv_wr recv_wr, *bad_recv_wr;
2243 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2245 /* Initialize common scatter list fields */
2246 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2247 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2249 /* Initialize common receive WR fields */
2250 recv_wr.next = NULL;
2251 recv_wr.sg_list = &sg_list;
2252 recv_wr.num_sge = 1;
2255 /* Allocate and map receive buffer */
2260 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2262 printk(KERN_ERR PFX "No memory for receive buffer\n");
2267 sg_list.addr = dma_map_single(qp_info->port_priv->
2271 sizeof mad_priv->header,
2273 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2274 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2275 mad_priv->header.mad_list.mad_queue = recv_queue;
2277 /* Post receive WR */
2278 spin_lock_irqsave(&recv_queue->lock, flags);
2279 post = (++recv_queue->count < recv_queue->max_active);
2280 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2281 spin_unlock_irqrestore(&recv_queue->lock, flags);
2282 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2284 spin_lock_irqsave(&recv_queue->lock, flags);
2285 list_del(&mad_priv->header.mad_list.list);
2286 recv_queue->count--;
2287 spin_unlock_irqrestore(&recv_queue->lock, flags);
2288 dma_unmap_single(qp_info->port_priv->device->dma_device,
2289 pci_unmap_addr(&mad_priv->header,
2292 sizeof mad_priv->header,
2294 kmem_cache_free(ib_mad_cache, mad_priv);
2295 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2304 * Return all the posted receive MADs
2306 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2308 struct ib_mad_private_header *mad_priv_hdr;
2309 struct ib_mad_private *recv;
2310 struct ib_mad_list_head *mad_list;
2312 while (!list_empty(&qp_info->recv_queue.list)) {
2314 mad_list = list_entry(qp_info->recv_queue.list.next,
2315 struct ib_mad_list_head, list);
2316 mad_priv_hdr = container_of(mad_list,
2317 struct ib_mad_private_header,
2319 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2322 /* Remove from posted receive MAD list */
2323 list_del(&mad_list->list);
2325 dma_unmap_single(qp_info->port_priv->device->dma_device,
2326 pci_unmap_addr(&recv->header, mapping),
2327 sizeof(struct ib_mad_private) -
2328 sizeof(struct ib_mad_private_header),
2330 kmem_cache_free(ib_mad_cache, recv);
2333 qp_info->recv_queue.count = 0;
2339 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2342 struct ib_qp_attr *attr;
2345 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2347 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2351 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2352 qp = port_priv->qp_info[i].qp;
2354 * PKey index for QP1 is irrelevant but
2355 * one is needed for the Reset to Init transition
2357 attr->qp_state = IB_QPS_INIT;
2358 attr->pkey_index = 0;
2359 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2360 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2361 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2363 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2364 "INIT: %d\n", i, ret);
2368 attr->qp_state = IB_QPS_RTR;
2369 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2371 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2372 "RTR: %d\n", i, ret);
2376 attr->qp_state = IB_QPS_RTS;
2377 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2378 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2380 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2381 "RTS: %d\n", i, ret);
2386 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2388 printk(KERN_ERR PFX "Failed to request completion "
2389 "notification: %d\n", ret);
2393 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2394 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2396 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2405 static void qp_event_handler(struct ib_event *event, void *qp_context)
2407 struct ib_mad_qp_info *qp_info = qp_context;
2409 /* It's worse than that! He's dead, Jim! */
2410 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2411 event->event, qp_info->qp->qp_num);
2414 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2415 struct ib_mad_queue *mad_queue)
2417 mad_queue->qp_info = qp_info;
2418 mad_queue->count = 0;
2419 spin_lock_init(&mad_queue->lock);
2420 INIT_LIST_HEAD(&mad_queue->list);
2423 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2424 struct ib_mad_qp_info *qp_info)
2426 qp_info->port_priv = port_priv;
2427 init_mad_queue(qp_info, &qp_info->send_queue);
2428 init_mad_queue(qp_info, &qp_info->recv_queue);
2429 INIT_LIST_HEAD(&qp_info->overflow_list);
2430 spin_lock_init(&qp_info->snoop_lock);
2431 qp_info->snoop_table = NULL;
2432 qp_info->snoop_table_size = 0;
2433 atomic_set(&qp_info->snoop_count, 0);
2436 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2437 enum ib_qp_type qp_type)
2439 struct ib_qp_init_attr qp_init_attr;
2442 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2443 qp_init_attr.send_cq = qp_info->port_priv->cq;
2444 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2445 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2446 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2447 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2448 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2449 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2450 qp_init_attr.qp_type = qp_type;
2451 qp_init_attr.port_num = qp_info->port_priv->port_num;
2452 qp_init_attr.qp_context = qp_info;
2453 qp_init_attr.event_handler = qp_event_handler;
2454 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2455 if (IS_ERR(qp_info->qp)) {
2456 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2457 get_spl_qp_index(qp_type));
2458 ret = PTR_ERR(qp_info->qp);
2461 /* Use minimum queue sizes unless the CQ is resized */
2462 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2463 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2470 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2472 ib_destroy_qp(qp_info->qp);
2473 if (qp_info->snoop_table)
2474 kfree(qp_info->snoop_table);
2479 * Create the QP, PD, MR, and CQ if needed
2481 static int ib_mad_port_open(struct ib_device *device,
2485 struct ib_mad_port_private *port_priv;
2486 unsigned long flags;
2487 char name[sizeof "ib_mad123"];
2489 /* Create new device info */
2490 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2492 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2495 memset(port_priv, 0, sizeof *port_priv);
2496 port_priv->device = device;
2497 port_priv->port_num = port_num;
2498 spin_lock_init(&port_priv->reg_lock);
2499 INIT_LIST_HEAD(&port_priv->agent_list);
2500 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2501 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2503 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2504 port_priv->cq = ib_create_cq(port_priv->device,
2506 ib_mad_thread_completion_handler,
2507 NULL, port_priv, cq_size);
2508 if (IS_ERR(port_priv->cq)) {
2509 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2510 ret = PTR_ERR(port_priv->cq);
2514 port_priv->pd = ib_alloc_pd(device);
2515 if (IS_ERR(port_priv->pd)) {
2516 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2517 ret = PTR_ERR(port_priv->pd);
2521 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2522 if (IS_ERR(port_priv->mr)) {
2523 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2524 ret = PTR_ERR(port_priv->mr);
2528 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2531 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2535 snprintf(name, sizeof name, "ib_mad%d", port_num);
2536 port_priv->wq = create_singlethread_workqueue(name);
2537 if (!port_priv->wq) {
2541 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2543 ret = ib_mad_port_start(port_priv);
2545 printk(KERN_ERR PFX "Couldn't start port\n");
2549 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2550 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2551 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2555 destroy_workqueue(port_priv->wq);
2557 destroy_mad_qp(&port_priv->qp_info[1]);
2559 destroy_mad_qp(&port_priv->qp_info[0]);
2561 ib_dereg_mr(port_priv->mr);
2563 ib_dealloc_pd(port_priv->pd);
2565 ib_destroy_cq(port_priv->cq);
2566 cleanup_recv_queue(&port_priv->qp_info[1]);
2567 cleanup_recv_queue(&port_priv->qp_info[0]);
2576 * If there are no classes using the port, free the port
2577 * resources (CQ, MR, PD, QP) and remove the port's info structure
2579 static int ib_mad_port_close(struct ib_device *device, int port_num)
2581 struct ib_mad_port_private *port_priv;
2582 unsigned long flags;
2584 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2585 port_priv = __ib_get_mad_port(device, port_num);
2586 if (port_priv == NULL) {
2587 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2588 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2591 list_del(&port_priv->port_list);
2592 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2594 /* Stop processing completions. */
2595 flush_workqueue(port_priv->wq);
2596 destroy_workqueue(port_priv->wq);
2597 destroy_mad_qp(&port_priv->qp_info[1]);
2598 destroy_mad_qp(&port_priv->qp_info[0]);
2599 ib_dereg_mr(port_priv->mr);
2600 ib_dealloc_pd(port_priv->pd);
2601 ib_destroy_cq(port_priv->cq);
2602 cleanup_recv_queue(&port_priv->qp_info[1]);
2603 cleanup_recv_queue(&port_priv->qp_info[0]);
2604 /* XXX: Handle deallocation of MAD registration tables */
2611 static void ib_mad_init_device(struct ib_device *device)
2613 int num_ports, cur_port, i;
2615 if (device->node_type == IB_NODE_SWITCH) {
2619 num_ports = device->phys_port_cnt;
2622 for (i = 0; i < num_ports; i++, cur_port++) {
2623 if (ib_mad_port_open(device, cur_port)) {
2624 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2625 device->name, cur_port);
2626 goto error_device_open;
2628 if (ib_agent_port_open(device, cur_port)) {
2629 printk(KERN_ERR PFX "Couldn't open %s port %d "
2631 device->name, cur_port);
2632 goto error_device_open;
2640 if (ib_agent_port_close(device, cur_port))
2641 printk(KERN_ERR PFX "Couldn't close %s port %d "
2643 device->name, cur_port);
2644 if (ib_mad_port_close(device, cur_port))
2645 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2646 device->name, cur_port);
2651 static void ib_mad_remove_device(struct ib_device *device)
2653 int i, num_ports, cur_port;
2655 if (device->node_type == IB_NODE_SWITCH) {
2659 num_ports = device->phys_port_cnt;
2662 for (i = 0; i < num_ports; i++, cur_port++) {
2663 if (ib_agent_port_close(device, cur_port))
2664 printk(KERN_ERR PFX "Couldn't close %s port %d "
2666 device->name, cur_port);
2667 if (ib_mad_port_close(device, cur_port))
2668 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2669 device->name, cur_port);
2673 static struct ib_client mad_client = {
2675 .add = ib_mad_init_device,
2676 .remove = ib_mad_remove_device
2679 static int __init ib_mad_init_module(void)
2683 spin_lock_init(&ib_mad_port_list_lock);
2684 spin_lock_init(&ib_agent_port_list_lock);
2686 ib_mad_cache = kmem_cache_create("ib_mad",
2687 sizeof(struct ib_mad_private),
2692 if (!ib_mad_cache) {
2693 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2698 INIT_LIST_HEAD(&ib_mad_port_list);
2700 if (ib_register_client(&mad_client)) {
2701 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2709 kmem_cache_destroy(ib_mad_cache);
2714 static void __exit ib_mad_cleanup_module(void)
2716 ib_unregister_client(&mad_client);
2718 if (kmem_cache_destroy(ib_mad_cache)) {
2719 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2723 module_init(ib_mad_init_module);
2724 module_exit(ib_mad_cleanup_module);