2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
36 #include <linux/dma-mapping.h>
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
49 kmem_cache_t *ib_mad_cache;
51 static struct list_head ib_mad_port_list;
52 static u32 ib_mad_client_id = 0;
55 static spinlock_t ib_mad_port_list_lock;
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table **method,
60 struct ib_mad_reg_req *mad_reg_req);
61 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
62 static struct ib_mad_agent_private *find_mad_agent(
63 struct ib_mad_port_private *port_priv,
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad);
67 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68 static void timeout_sends(void *data);
69 static void local_completions(void *data);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv,
73 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74 struct ib_mad_agent_private *agent_priv);
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
80 static inline struct ib_mad_port_private *
81 __ib_get_mad_port(struct ib_device *device, int port_num)
83 struct ib_mad_port_private *entry;
85 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86 if (entry->device == device && entry->port_num == port_num)
93 * Wrapper function to return a ib_mad_port_private structure or NULL
96 static inline struct ib_mad_port_private *
97 ib_get_mad_port(struct ib_device *device, int port_num)
99 struct ib_mad_port_private *entry;
102 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103 entry = __ib_get_mad_port(device, port_num);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
109 static inline u8 convert_mgmt_class(u8 mgmt_class)
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
116 static int get_spl_qp_index(enum ib_qp_type qp_type)
129 static int vendor_class_index(u8 mgmt_class)
131 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
134 static int is_vendor_class(u8 mgmt_class)
136 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
142 static int is_vendor_oui(char *oui)
144 if (oui[0] || oui[1] || oui[2])
149 static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class *vendor_class,
151 struct ib_mad_reg_req *mad_reg_req)
153 struct ib_mad_mgmt_method_table *method;
156 for (i = 0; i < MAX_MGMT_OUI; i++) {
157 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158 method = vendor_class->method_table[i];
160 if (method_in_use(&method, mad_reg_req))
171 * ib_register_mad_agent - Register to send/receive MADs
173 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
175 enum ib_qp_type qp_type,
176 struct ib_mad_reg_req *mad_reg_req,
178 ib_mad_send_handler send_handler,
179 ib_mad_recv_handler recv_handler,
182 struct ib_mad_port_private *port_priv;
183 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184 struct ib_mad_agent_private *mad_agent_priv;
185 struct ib_mad_reg_req *reg_req = NULL;
186 struct ib_mad_mgmt_class_table *class;
187 struct ib_mad_mgmt_vendor_class_table *vendor;
188 struct ib_mad_mgmt_vendor_class *vendor_class;
189 struct ib_mad_mgmt_method_table *method;
192 u8 mgmt_class, vclass;
194 /* Validate parameters */
195 qpn = get_spl_qp_index(qp_type);
199 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
202 /* Validate MAD registration request if supplied */
204 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
208 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
213 if (mad_reg_req->mgmt_class !=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
216 } else if (mad_reg_req->mgmt_class == 0) {
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
222 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
227 if (!is_vendor_oui(mad_reg_req->oui))
230 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class !=
233 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
234 (mad_reg_req->mgmt_class !=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
238 if ((mad_reg_req->mgmt_class ==
239 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
240 (mad_reg_req->mgmt_class ==
241 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
245 /* No registration request supplied */
250 /* Validate device and port */
251 port_priv = ib_get_mad_port(device, port_num);
253 ret = ERR_PTR(-ENODEV);
257 /* Allocate structures */
258 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM);
263 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
265 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
266 IB_ACCESS_LOCAL_WRITE);
267 if (IS_ERR(mad_agent_priv->agent.mr)) {
268 ret = ERR_PTR(-ENOMEM);
273 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
275 ret = ERR_PTR(-ENOMEM);
278 /* Make a copy of the MAD registration request */
279 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
282 /* Now, fill in the various structures */
283 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
284 mad_agent_priv->reg_req = reg_req;
285 mad_agent_priv->agent.rmpp_version = rmpp_version;
286 mad_agent_priv->agent.device = device;
287 mad_agent_priv->agent.recv_handler = recv_handler;
288 mad_agent_priv->agent.send_handler = send_handler;
289 mad_agent_priv->agent.context = context;
290 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
291 mad_agent_priv->agent.port_num = port_num;
293 spin_lock_irqsave(&port_priv->reg_lock, flags);
294 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
297 * Make sure MAD registration (if supplied)
298 * is non overlapping with any existing ones
301 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
302 if (!is_vendor_class(mgmt_class)) {
303 class = port_priv->version[mad_reg_req->
304 mgmt_class_version].class;
306 method = class->method_table[mgmt_class];
308 if (method_in_use(&method,
313 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
316 /* "New" vendor class range */
317 vendor = port_priv->version[mad_reg_req->
318 mgmt_class_version].vendor;
320 vclass = vendor_class_index(mgmt_class);
321 vendor_class = vendor->vendor_class[vclass];
323 if (is_vendor_method_in_use(
329 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
337 /* Add mad agent into port's agent list */
338 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
339 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
341 spin_lock_init(&mad_agent_priv->lock);
342 INIT_LIST_HEAD(&mad_agent_priv->send_list);
343 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
344 INIT_LIST_HEAD(&mad_agent_priv->done_list);
345 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
346 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
347 INIT_LIST_HEAD(&mad_agent_priv->local_list);
348 INIT_WORK(&mad_agent_priv->local_work, local_completions,
350 atomic_set(&mad_agent_priv->refcount, 1);
351 init_waitqueue_head(&mad_agent_priv->wait);
353 return &mad_agent_priv->agent;
356 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
359 kfree(mad_agent_priv);
361 ib_dereg_mr(mad_agent_priv->agent.mr);
365 EXPORT_SYMBOL(ib_register_mad_agent);
367 static inline int is_snooping_sends(int mad_snoop_flags)
369 return (mad_snoop_flags &
370 (/*IB_MAD_SNOOP_POSTED_SENDS |
371 IB_MAD_SNOOP_RMPP_SENDS |*/
372 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
373 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
376 static inline int is_snooping_recvs(int mad_snoop_flags)
378 return (mad_snoop_flags &
379 (IB_MAD_SNOOP_RECVS /*|
380 IB_MAD_SNOOP_RMPP_RECVS*/));
383 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
384 struct ib_mad_snoop_private *mad_snoop_priv)
386 struct ib_mad_snoop_private **new_snoop_table;
390 spin_lock_irqsave(&qp_info->snoop_lock, flags);
391 /* Check for empty slot in array. */
392 for (i = 0; i < qp_info->snoop_table_size; i++)
393 if (!qp_info->snoop_table[i])
396 if (i == qp_info->snoop_table_size) {
398 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
399 qp_info->snoop_table_size + 1,
401 if (!new_snoop_table) {
405 if (qp_info->snoop_table) {
406 memcpy(new_snoop_table, qp_info->snoop_table,
407 sizeof mad_snoop_priv *
408 qp_info->snoop_table_size);
409 kfree(qp_info->snoop_table);
411 qp_info->snoop_table = new_snoop_table;
412 qp_info->snoop_table_size++;
414 qp_info->snoop_table[i] = mad_snoop_priv;
415 atomic_inc(&qp_info->snoop_count);
417 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
421 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
423 enum ib_qp_type qp_type,
425 ib_mad_snoop_handler snoop_handler,
426 ib_mad_recv_handler recv_handler,
429 struct ib_mad_port_private *port_priv;
430 struct ib_mad_agent *ret;
431 struct ib_mad_snoop_private *mad_snoop_priv;
434 /* Validate parameters */
435 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
436 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
437 ret = ERR_PTR(-EINVAL);
440 qpn = get_spl_qp_index(qp_type);
442 ret = ERR_PTR(-EINVAL);
445 port_priv = ib_get_mad_port(device, port_num);
447 ret = ERR_PTR(-ENODEV);
450 /* Allocate structures */
451 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
452 if (!mad_snoop_priv) {
453 ret = ERR_PTR(-ENOMEM);
457 /* Now, fill in the various structures */
458 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
459 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
460 mad_snoop_priv->agent.device = device;
461 mad_snoop_priv->agent.recv_handler = recv_handler;
462 mad_snoop_priv->agent.snoop_handler = snoop_handler;
463 mad_snoop_priv->agent.context = context;
464 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
465 mad_snoop_priv->agent.port_num = port_num;
466 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
467 init_waitqueue_head(&mad_snoop_priv->wait);
468 mad_snoop_priv->snoop_index = register_snoop_agent(
469 &port_priv->qp_info[qpn],
471 if (mad_snoop_priv->snoop_index < 0) {
472 ret = ERR_PTR(mad_snoop_priv->snoop_index);
476 atomic_set(&mad_snoop_priv->refcount, 1);
477 return &mad_snoop_priv->agent;
480 kfree(mad_snoop_priv);
484 EXPORT_SYMBOL(ib_register_mad_snoop);
486 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
488 struct ib_mad_port_private *port_priv;
491 /* Note that we could still be handling received MADs */
494 * Canceling all sends results in dropping received response
495 * MADs, preventing us from queuing additional work
497 cancel_mads(mad_agent_priv);
498 port_priv = mad_agent_priv->qp_info->port_priv;
499 cancel_delayed_work(&mad_agent_priv->timed_work);
501 spin_lock_irqsave(&port_priv->reg_lock, flags);
502 remove_mad_reg_req(mad_agent_priv);
503 list_del(&mad_agent_priv->agent_list);
504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
506 flush_workqueue(port_priv->wq);
507 ib_cancel_rmpp_recvs(mad_agent_priv);
509 atomic_dec(&mad_agent_priv->refcount);
510 wait_event(mad_agent_priv->wait,
511 !atomic_read(&mad_agent_priv->refcount));
513 if (mad_agent_priv->reg_req)
514 kfree(mad_agent_priv->reg_req);
515 ib_dereg_mr(mad_agent_priv->agent.mr);
516 kfree(mad_agent_priv);
519 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
521 struct ib_mad_qp_info *qp_info;
524 qp_info = mad_snoop_priv->qp_info;
525 spin_lock_irqsave(&qp_info->snoop_lock, flags);
526 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
527 atomic_dec(&qp_info->snoop_count);
528 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
530 atomic_dec(&mad_snoop_priv->refcount);
531 wait_event(mad_snoop_priv->wait,
532 !atomic_read(&mad_snoop_priv->refcount));
534 kfree(mad_snoop_priv);
538 * ib_unregister_mad_agent - Unregisters a client from using MAD services
540 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
542 struct ib_mad_agent_private *mad_agent_priv;
543 struct ib_mad_snoop_private *mad_snoop_priv;
545 /* If the TID is zero, the agent can only snoop. */
546 if (mad_agent->hi_tid) {
547 mad_agent_priv = container_of(mad_agent,
548 struct ib_mad_agent_private,
550 unregister_mad_agent(mad_agent_priv);
552 mad_snoop_priv = container_of(mad_agent,
553 struct ib_mad_snoop_private,
555 unregister_mad_snoop(mad_snoop_priv);
559 EXPORT_SYMBOL(ib_unregister_mad_agent);
561 static inline int response_mad(struct ib_mad *mad)
563 /* Trap represses are responses although response bit is reset */
564 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
565 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
568 static void dequeue_mad(struct ib_mad_list_head *mad_list)
570 struct ib_mad_queue *mad_queue;
573 BUG_ON(!mad_list->mad_queue);
574 mad_queue = mad_list->mad_queue;
575 spin_lock_irqsave(&mad_queue->lock, flags);
576 list_del(&mad_list->list);
578 spin_unlock_irqrestore(&mad_queue->lock, flags);
581 static void snoop_send(struct ib_mad_qp_info *qp_info,
582 struct ib_send_wr *send_wr,
583 struct ib_mad_send_wc *mad_send_wc,
586 struct ib_mad_snoop_private *mad_snoop_priv;
590 spin_lock_irqsave(&qp_info->snoop_lock, flags);
591 for (i = 0; i < qp_info->snoop_table_size; i++) {
592 mad_snoop_priv = qp_info->snoop_table[i];
593 if (!mad_snoop_priv ||
594 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
597 atomic_inc(&mad_snoop_priv->refcount);
598 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
599 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
600 send_wr, mad_send_wc);
601 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
602 wake_up(&mad_snoop_priv->wait);
603 spin_lock_irqsave(&qp_info->snoop_lock, flags);
605 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
608 static void snoop_recv(struct ib_mad_qp_info *qp_info,
609 struct ib_mad_recv_wc *mad_recv_wc,
612 struct ib_mad_snoop_private *mad_snoop_priv;
616 spin_lock_irqsave(&qp_info->snoop_lock, flags);
617 for (i = 0; i < qp_info->snoop_table_size; i++) {
618 mad_snoop_priv = qp_info->snoop_table[i];
619 if (!mad_snoop_priv ||
620 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
623 atomic_inc(&mad_snoop_priv->refcount);
624 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
625 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
627 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
628 wake_up(&mad_snoop_priv->wait);
629 spin_lock_irqsave(&qp_info->snoop_lock, flags);
631 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
634 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
637 memset(wc, 0, sizeof *wc);
639 wc->status = IB_WC_SUCCESS;
640 wc->opcode = IB_WC_RECV;
641 wc->pkey_index = pkey_index;
642 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
647 wc->dlid_path_bits = 0;
648 wc->port_num = port_num;
652 * Return 0 if SMP is to be sent
653 * Return 1 if SMP was consumed locally (whether or not solicited)
654 * Return < 0 if error
656 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
658 struct ib_send_wr *send_wr)
662 struct ib_mad_local_private *local;
663 struct ib_mad_private *mad_priv;
664 struct ib_mad_port_private *port_priv;
665 struct ib_mad_agent_private *recv_mad_agent = NULL;
666 struct ib_device *device = mad_agent_priv->agent.device;
667 u8 port_num = mad_agent_priv->agent.port_num;
670 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
672 printk(KERN_ERR PFX "Invalid directed route\n");
675 /* Check to post send on QP or process locally */
676 ret = smi_check_local_dr_smp(smp, device, port_num);
677 if (!ret || !device->process_mad)
680 local = kmalloc(sizeof *local, GFP_ATOMIC);
683 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
686 local->mad_priv = NULL;
687 local->recv_mad_agent = NULL;
688 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
691 printk(KERN_ERR PFX "No memory for local response MAD\n");
696 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
697 send_wr->wr.ud.port_num, &mad_wc);
699 /* No GRH for DR SMP */
700 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
701 (struct ib_mad *)smp,
702 (struct ib_mad *)&mad_priv->mad);
705 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
706 if (response_mad(&mad_priv->mad.mad) &&
707 mad_agent_priv->agent.recv_handler) {
708 local->mad_priv = mad_priv;
709 local->recv_mad_agent = mad_agent_priv;
711 * Reference MAD agent until receive
712 * side of local completion handled
714 atomic_inc(&mad_agent_priv->refcount);
716 kmem_cache_free(ib_mad_cache, mad_priv);
718 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
719 kmem_cache_free(ib_mad_cache, mad_priv);
721 case IB_MAD_RESULT_SUCCESS:
722 /* Treat like an incoming receive MAD */
723 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
724 mad_agent_priv->agent.port_num);
726 mad_priv->mad.mad.mad_hdr.tid =
727 ((struct ib_mad *)smp)->mad_hdr.tid;
728 recv_mad_agent = find_mad_agent(port_priv,
731 if (!port_priv || !recv_mad_agent) {
732 kmem_cache_free(ib_mad_cache, mad_priv);
737 local->mad_priv = mad_priv;
738 local->recv_mad_agent = recv_mad_agent;
741 kmem_cache_free(ib_mad_cache, mad_priv);
747 local->send_wr = *send_wr;
748 local->send_wr.sg_list = local->sg_list;
749 memcpy(local->sg_list, send_wr->sg_list,
750 sizeof *send_wr->sg_list * send_wr->num_sge);
751 local->send_wr.next = NULL;
752 local->tid = send_wr->wr.ud.mad_hdr->tid;
753 local->wr_id = send_wr->wr_id;
754 /* Reference MAD agent until send side of local completion handled */
755 atomic_inc(&mad_agent_priv->refcount);
756 /* Queue local completion to local list */
757 spin_lock_irqsave(&mad_agent_priv->lock, flags);
758 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
759 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
760 queue_work(mad_agent_priv->qp_info->port_priv->wq,
761 &mad_agent_priv->local_work);
767 static int get_buf_length(int hdr_len, int data_len)
771 seg_size = sizeof(struct ib_mad) - hdr_len;
772 if (data_len && seg_size) {
773 pad = seg_size - data_len % seg_size;
778 return hdr_len + data_len + pad;
781 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
782 u32 remote_qpn, u16 pkey_index,
783 struct ib_ah *ah, int rmpp_active,
784 int hdr_len, int data_len,
785 unsigned int __nocast gfp_mask)
787 struct ib_mad_agent_private *mad_agent_priv;
788 struct ib_mad_send_buf *send_buf;
792 mad_agent_priv = container_of(mad_agent,
793 struct ib_mad_agent_private, agent);
794 buf_size = get_buf_length(hdr_len, data_len);
796 if ((!mad_agent->rmpp_version &&
797 (rmpp_active || buf_size > sizeof(struct ib_mad))) ||
798 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
799 return ERR_PTR(-EINVAL);
801 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
803 return ERR_PTR(-ENOMEM);
804 memset(buf, 0, sizeof *send_buf + buf_size);
806 send_buf = buf + buf_size;
809 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
810 buf, buf_size, DMA_TO_DEVICE);
811 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
812 send_buf->sge.length = buf_size;
813 send_buf->sge.lkey = mad_agent->mr->lkey;
815 send_buf->send_wr.wr_id = (unsigned long) send_buf;
816 send_buf->send_wr.sg_list = &send_buf->sge;
817 send_buf->send_wr.num_sge = 1;
818 send_buf->send_wr.opcode = IB_WR_SEND;
819 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
820 send_buf->send_wr.wr.ud.ah = ah;
821 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
822 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
823 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
824 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
827 struct ib_rmpp_mad *rmpp_mad;
828 rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad;
829 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
830 offsetof(struct ib_rmpp_mad, data) + data_len);
831 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
832 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
833 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
834 IB_MGMT_RMPP_FLAG_ACTIVE);
837 send_buf->mad_agent = mad_agent;
838 atomic_inc(&mad_agent_priv->refcount);
841 EXPORT_SYMBOL(ib_create_send_mad);
843 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
845 struct ib_mad_agent_private *mad_agent_priv;
847 mad_agent_priv = container_of(send_buf->mad_agent,
848 struct ib_mad_agent_private, agent);
850 dma_unmap_single(send_buf->mad_agent->device->dma_device,
851 pci_unmap_addr(send_buf, mapping),
852 send_buf->sge.length, DMA_TO_DEVICE);
853 kfree(send_buf->mad);
855 if (atomic_dec_and_test(&mad_agent_priv->refcount))
856 wake_up(&mad_agent_priv->wait);
858 EXPORT_SYMBOL(ib_free_send_mad);
860 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
862 struct ib_mad_qp_info *qp_info;
863 struct ib_send_wr *bad_send_wr;
864 struct list_head *list;
868 /* Set WR ID to find mad_send_wr upon completion */
869 qp_info = mad_send_wr->mad_agent_priv->qp_info;
870 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
871 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
873 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
874 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
875 ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
876 &mad_send_wr->send_wr, &bad_send_wr);
877 list = &qp_info->send_queue.list;
880 list = &qp_info->overflow_list;
884 qp_info->send_queue.count++;
885 list_add_tail(&mad_send_wr->mad_list.list, list);
887 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
892 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
893 * with the registered client
895 int ib_post_send_mad(struct ib_mad_agent *mad_agent,
896 struct ib_send_wr *send_wr,
897 struct ib_send_wr **bad_send_wr)
900 struct ib_mad_agent_private *mad_agent_priv;
902 /* Validate supplied parameters */
906 if (!mad_agent || !send_wr)
909 if (!mad_agent->send_handler)
912 mad_agent_priv = container_of(mad_agent,
913 struct ib_mad_agent_private,
916 /* Walk list of send WRs and post each on send list */
919 struct ib_send_wr *next_send_wr;
920 struct ib_mad_send_wr_private *mad_send_wr;
923 /* Validate more parameters */
924 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
927 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)
930 if (!send_wr->wr.ud.mad_hdr) {
931 printk(KERN_ERR PFX "MAD header must be supplied "
932 "in WR %p\n", send_wr);
937 * Save pointer to next work request to post in case the
938 * current one completes, and the user modifies the work
939 * request associated with the completion
941 next_send_wr = (struct ib_send_wr *)send_wr->next;
943 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
944 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
945 ret = handle_outgoing_dr_smp(mad_agent_priv, smp,
947 if (ret < 0) /* error */
949 else if (ret == 1) /* locally consumed */
953 /* Allocate MAD send WR tracking structure */
954 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
956 printk(KERN_ERR PFX "No memory for "
957 "ib_mad_send_wr_private\n");
961 memset(mad_send_wr, 0, sizeof *mad_send_wr);
963 mad_send_wr->send_wr = *send_wr;
964 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
965 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
966 sizeof *send_wr->sg_list * send_wr->num_sge);
967 mad_send_wr->wr_id = send_wr->wr_id;
968 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
969 mad_send_wr->mad_agent_priv = mad_agent_priv;
970 /* Timeout will be updated after send completes */
971 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
973 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
974 /* One reference for each work request to QP + response */
975 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
976 mad_send_wr->status = IB_WC_SUCCESS;
978 /* Reference MAD agent until send completes */
979 atomic_inc(&mad_agent_priv->refcount);
980 spin_lock_irqsave(&mad_agent_priv->lock, flags);
981 list_add_tail(&mad_send_wr->agent_list,
982 &mad_agent_priv->send_list);
983 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
985 if (mad_agent_priv->agent.rmpp_version) {
986 ret = ib_send_rmpp_mad(mad_send_wr);
987 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
988 ret = ib_send_mad(mad_send_wr);
990 ret = ib_send_mad(mad_send_wr);
992 /* Fail send request */
993 spin_lock_irqsave(&mad_agent_priv->lock, flags);
994 list_del(&mad_send_wr->agent_list);
995 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
996 atomic_dec(&mad_agent_priv->refcount);
1000 send_wr = next_send_wr;
1005 *bad_send_wr = send_wr;
1009 EXPORT_SYMBOL(ib_post_send_mad);
1012 * ib_free_recv_mad - Returns data buffers used to receive
1013 * a MAD to the access layer
1015 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1017 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1018 struct ib_mad_private_header *mad_priv_hdr;
1019 struct ib_mad_private *priv;
1020 struct list_head free_list;
1022 INIT_LIST_HEAD(&free_list);
1023 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1025 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1027 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1029 mad_priv_hdr = container_of(mad_recv_wc,
1030 struct ib_mad_private_header,
1032 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1034 kmem_cache_free(ib_mad_cache, priv);
1037 EXPORT_SYMBOL(ib_free_recv_mad);
1039 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1041 ib_mad_send_handler send_handler,
1042 ib_mad_recv_handler recv_handler,
1045 return ERR_PTR(-EINVAL); /* XXX: for now */
1047 EXPORT_SYMBOL(ib_redirect_mad_qp);
1049 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1052 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1055 EXPORT_SYMBOL(ib_process_mad_wc);
1057 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1058 struct ib_mad_reg_req *mad_reg_req)
1062 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1063 i < IB_MGMT_MAX_METHODS;
1064 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1066 if ((*method)->agent[i]) {
1067 printk(KERN_ERR PFX "Method %d already in use\n", i);
1074 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1076 /* Allocate management method table */
1077 *method = kmalloc(sizeof **method, GFP_ATOMIC);
1079 printk(KERN_ERR PFX "No memory for "
1080 "ib_mad_mgmt_method_table\n");
1083 /* Clear management method table */
1084 memset(*method, 0, sizeof **method);
1090 * Check to see if there are any methods still in use
1092 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1096 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1097 if (method->agent[i])
1103 * Check to see if there are any method tables for this class still in use
1105 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1109 for (i = 0; i < MAX_MGMT_CLASS; i++)
1110 if (class->method_table[i])
1115 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1119 for (i = 0; i < MAX_MGMT_OUI; i++)
1120 if (vendor_class->method_table[i])
1125 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1130 for (i = 0; i < MAX_MGMT_OUI; i++)
1131 /* Is there matching OUI for this vendor class ? */
1132 if (!memcmp(vendor_class->oui[i], oui, 3))
1138 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1142 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1143 if (vendor->vendor_class[i])
1149 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1150 struct ib_mad_agent_private *agent)
1154 /* Remove any methods for this mad agent */
1155 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1156 if (method->agent[i] == agent) {
1157 method->agent[i] = NULL;
1162 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1163 struct ib_mad_agent_private *agent_priv,
1166 struct ib_mad_port_private *port_priv;
1167 struct ib_mad_mgmt_class_table **class;
1168 struct ib_mad_mgmt_method_table **method;
1171 port_priv = agent_priv->qp_info->port_priv;
1172 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1174 /* Allocate management class table for "new" class version */
1175 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1177 printk(KERN_ERR PFX "No memory for "
1178 "ib_mad_mgmt_class_table\n");
1182 /* Clear management class table */
1183 memset(*class, 0, sizeof(**class));
1184 /* Allocate method table for this management class */
1185 method = &(*class)->method_table[mgmt_class];
1186 if ((ret = allocate_method_table(method)))
1189 method = &(*class)->method_table[mgmt_class];
1191 /* Allocate method table for this management class */
1192 if ((ret = allocate_method_table(method)))
1197 /* Now, make sure methods are not already in use */
1198 if (method_in_use(method, mad_reg_req))
1201 /* Finally, add in methods being registered */
1202 for (i = find_first_bit(mad_reg_req->method_mask,
1203 IB_MGMT_MAX_METHODS);
1204 i < IB_MGMT_MAX_METHODS;
1205 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1207 (*method)->agent[i] = agent_priv;
1212 /* Remove any methods for this mad agent */
1213 remove_methods_mad_agent(*method, agent_priv);
1214 /* Now, check to see if there are any methods in use */
1215 if (!check_method_table(*method)) {
1216 /* If not, release management method table */
1229 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1230 struct ib_mad_agent_private *agent_priv)
1232 struct ib_mad_port_private *port_priv;
1233 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1234 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1235 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1236 struct ib_mad_mgmt_method_table **method;
1237 int i, ret = -ENOMEM;
1240 /* "New" vendor (with OUI) class */
1241 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1242 port_priv = agent_priv->qp_info->port_priv;
1243 vendor_table = &port_priv->version[
1244 mad_reg_req->mgmt_class_version].vendor;
1245 if (!*vendor_table) {
1246 /* Allocate mgmt vendor class table for "new" class version */
1247 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1249 printk(KERN_ERR PFX "No memory for "
1250 "ib_mad_mgmt_vendor_class_table\n");
1253 /* Clear management vendor class table */
1254 memset(vendor, 0, sizeof(*vendor));
1255 *vendor_table = vendor;
1257 if (!(*vendor_table)->vendor_class[vclass]) {
1258 /* Allocate table for this management vendor class */
1259 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1260 if (!vendor_class) {
1261 printk(KERN_ERR PFX "No memory for "
1262 "ib_mad_mgmt_vendor_class\n");
1265 memset(vendor_class, 0, sizeof(*vendor_class));
1266 (*vendor_table)->vendor_class[vclass] = vendor_class;
1268 for (i = 0; i < MAX_MGMT_OUI; i++) {
1269 /* Is there matching OUI for this vendor class ? */
1270 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1271 mad_reg_req->oui, 3)) {
1272 method = &(*vendor_table)->vendor_class[
1273 vclass]->method_table[i];
1278 for (i = 0; i < MAX_MGMT_OUI; i++) {
1279 /* OUI slot available ? */
1280 if (!is_vendor_oui((*vendor_table)->vendor_class[
1282 method = &(*vendor_table)->vendor_class[
1283 vclass]->method_table[i];
1285 /* Allocate method table for this OUI */
1286 if ((ret = allocate_method_table(method)))
1288 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1289 mad_reg_req->oui, 3);
1293 printk(KERN_ERR PFX "All OUI slots in use\n");
1297 /* Now, make sure methods are not already in use */
1298 if (method_in_use(method, mad_reg_req))
1301 /* Finally, add in methods being registered */
1302 for (i = find_first_bit(mad_reg_req->method_mask,
1303 IB_MGMT_MAX_METHODS);
1304 i < IB_MGMT_MAX_METHODS;
1305 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1307 (*method)->agent[i] = agent_priv;
1312 /* Remove any methods for this mad agent */
1313 remove_methods_mad_agent(*method, agent_priv);
1314 /* Now, check to see if there are any methods in use */
1315 if (!check_method_table(*method)) {
1316 /* If not, release management method table */
1323 (*vendor_table)->vendor_class[vclass] = NULL;
1324 kfree(vendor_class);
1328 *vendor_table = NULL;
1335 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1337 struct ib_mad_port_private *port_priv;
1338 struct ib_mad_mgmt_class_table *class;
1339 struct ib_mad_mgmt_method_table *method;
1340 struct ib_mad_mgmt_vendor_class_table *vendor;
1341 struct ib_mad_mgmt_vendor_class *vendor_class;
1346 * Was MAD registration request supplied
1347 * with original registration ?
1349 if (!agent_priv->reg_req) {
1353 port_priv = agent_priv->qp_info->port_priv;
1354 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1355 class = port_priv->version[
1356 agent_priv->reg_req->mgmt_class_version].class;
1360 method = class->method_table[mgmt_class];
1362 /* Remove any methods for this mad agent */
1363 remove_methods_mad_agent(method, agent_priv);
1364 /* Now, check to see if there are any methods still in use */
1365 if (!check_method_table(method)) {
1366 /* If not, release management method table */
1368 class->method_table[mgmt_class] = NULL;
1369 /* Any management classes left ? */
1370 if (!check_class_table(class)) {
1371 /* If not, release management class table */
1374 agent_priv->reg_req->
1375 mgmt_class_version].class = NULL;
1381 if (!is_vendor_class(mgmt_class))
1384 /* normalize mgmt_class to vendor range 2 */
1385 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1386 vendor = port_priv->version[
1387 agent_priv->reg_req->mgmt_class_version].vendor;
1392 vendor_class = vendor->vendor_class[mgmt_class];
1394 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1397 method = vendor_class->method_table[index];
1399 /* Remove any methods for this mad agent */
1400 remove_methods_mad_agent(method, agent_priv);
1402 * Now, check to see if there are
1403 * any methods still in use
1405 if (!check_method_table(method)) {
1406 /* If not, release management method table */
1408 vendor_class->method_table[index] = NULL;
1409 memset(vendor_class->oui[index], 0, 3);
1410 /* Any OUIs left ? */
1411 if (!check_vendor_class(vendor_class)) {
1412 /* If not, release vendor class table */
1413 kfree(vendor_class);
1414 vendor->vendor_class[mgmt_class] = NULL;
1415 /* Any other vendor classes left ? */
1416 if (!check_vendor_table(vendor)) {
1419 agent_priv->reg_req->
1420 mgmt_class_version].
1432 static struct ib_mad_agent_private *
1433 find_mad_agent(struct ib_mad_port_private *port_priv,
1436 struct ib_mad_agent_private *mad_agent = NULL;
1437 unsigned long flags;
1439 spin_lock_irqsave(&port_priv->reg_lock, flags);
1440 if (response_mad(mad)) {
1442 struct ib_mad_agent_private *entry;
1445 * Routing is based on high 32 bits of transaction ID
1448 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1449 list_for_each_entry(entry, &port_priv->agent_list,
1451 if (entry->agent.hi_tid == hi_tid) {
1457 struct ib_mad_mgmt_class_table *class;
1458 struct ib_mad_mgmt_method_table *method;
1459 struct ib_mad_mgmt_vendor_class_table *vendor;
1460 struct ib_mad_mgmt_vendor_class *vendor_class;
1461 struct ib_vendor_mad *vendor_mad;
1465 * Routing is based on version, class, and method
1466 * For "newer" vendor MADs, also based on OUI
1468 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1470 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1471 class = port_priv->version[
1472 mad->mad_hdr.class_version].class;
1475 method = class->method_table[convert_mgmt_class(
1476 mad->mad_hdr.mgmt_class)];
1478 mad_agent = method->agent[mad->mad_hdr.method &
1479 ~IB_MGMT_METHOD_RESP];
1481 vendor = port_priv->version[
1482 mad->mad_hdr.class_version].vendor;
1485 vendor_class = vendor->vendor_class[vendor_class_index(
1486 mad->mad_hdr.mgmt_class)];
1489 /* Find matching OUI */
1490 vendor_mad = (struct ib_vendor_mad *)mad;
1491 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1494 method = vendor_class->method_table[index];
1496 mad_agent = method->agent[mad->mad_hdr.method &
1497 ~IB_MGMT_METHOD_RESP];
1503 if (mad_agent->agent.recv_handler)
1504 atomic_inc(&mad_agent->refcount);
1506 printk(KERN_NOTICE PFX "No receive handler for client "
1508 &mad_agent->agent, port_priv->port_num);
1513 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1518 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1522 /* Make sure MAD base version is understood */
1523 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1524 printk(KERN_ERR PFX "MAD received with unsupported base "
1525 "version %d\n", mad->mad_hdr.base_version);
1529 /* Filter SMI packets sent to other than QP0 */
1530 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1531 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1535 /* Filter GSI packets sent to QP0 */
1544 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1545 struct ib_mad_hdr *mad_hdr)
1547 struct ib_rmpp_mad *rmpp_mad;
1549 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1550 return !mad_agent_priv->agent.rmpp_version ||
1551 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1552 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1553 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1556 struct ib_mad_send_wr_private*
1557 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid)
1559 struct ib_mad_send_wr_private *mad_send_wr;
1561 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1563 if (mad_send_wr->tid == tid)
1568 * It's possible to receive the response before we've
1569 * been notified that the send has completed
1571 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1573 if (is_data_mad(mad_agent_priv,
1574 mad_send_wr->send_wr.wr.ud.mad_hdr) &&
1575 mad_send_wr->tid == tid && mad_send_wr->timeout) {
1576 /* Verify request has not been canceled */
1577 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1584 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1586 mad_send_wr->timeout = 0;
1587 if (mad_send_wr->refcount == 1) {
1588 list_del(&mad_send_wr->agent_list);
1589 list_add_tail(&mad_send_wr->agent_list,
1590 &mad_send_wr->mad_agent_priv->done_list);
1594 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1595 struct ib_mad_recv_wc *mad_recv_wc)
1597 struct ib_mad_send_wr_private *mad_send_wr;
1598 struct ib_mad_send_wc mad_send_wc;
1599 unsigned long flags;
1602 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1603 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1604 if (mad_agent_priv->agent.rmpp_version) {
1605 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1608 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1609 wake_up(&mad_agent_priv->wait);
1614 /* Complete corresponding request */
1615 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1616 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1617 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1618 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
1620 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1621 ib_free_recv_mad(mad_recv_wc);
1622 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1623 wake_up(&mad_agent_priv->wait);
1626 ib_mark_mad_done(mad_send_wr);
1627 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1629 /* Defined behavior is to complete response before request */
1630 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
1631 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1633 atomic_dec(&mad_agent_priv->refcount);
1635 mad_send_wc.status = IB_WC_SUCCESS;
1636 mad_send_wc.vendor_err = 0;
1637 mad_send_wc.wr_id = mad_send_wr->wr_id;
1638 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1640 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1642 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1643 wake_up(&mad_agent_priv->wait);
1647 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1650 struct ib_mad_qp_info *qp_info;
1651 struct ib_mad_private_header *mad_priv_hdr;
1652 struct ib_mad_private *recv, *response;
1653 struct ib_mad_list_head *mad_list;
1654 struct ib_mad_agent_private *mad_agent;
1656 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1658 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1659 "for response buffer\n");
1661 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1662 qp_info = mad_list->mad_queue->qp_info;
1663 dequeue_mad(mad_list);
1665 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1667 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1668 dma_unmap_single(port_priv->device->dma_device,
1669 pci_unmap_addr(&recv->header, mapping),
1670 sizeof(struct ib_mad_private) -
1671 sizeof(struct ib_mad_private_header),
1674 /* Setup MAD receive work completion from "normal" work completion */
1675 recv->header.wc = *wc;
1676 recv->header.recv_wc.wc = &recv->header.wc;
1677 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1678 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1679 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1681 if (atomic_read(&qp_info->snoop_count))
1682 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1685 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1688 if (recv->mad.mad.mad_hdr.mgmt_class ==
1689 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1690 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1691 port_priv->device->node_type,
1692 port_priv->port_num,
1693 port_priv->device->phys_port_cnt))
1695 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1697 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1698 port_priv->device->node_type,
1699 port_priv->port_num))
1701 if (!smi_check_local_dr_smp(&recv->mad.smp,
1703 port_priv->port_num))
1708 /* Give driver "right of first refusal" on incoming MAD */
1709 if (port_priv->device->process_mad) {
1713 printk(KERN_ERR PFX "No memory for response MAD\n");
1715 * Is it better to assume that
1716 * it wouldn't be processed ?
1721 ret = port_priv->device->process_mad(port_priv->device, 0,
1722 port_priv->port_num,
1725 &response->mad.mad);
1726 if (ret & IB_MAD_RESULT_SUCCESS) {
1727 if (ret & IB_MAD_RESULT_CONSUMED)
1729 if (ret & IB_MAD_RESULT_REPLY) {
1731 if (!agent_send(response, &recv->grh, wc,
1733 port_priv->port_num))
1740 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1742 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1744 * recv is freed up in error cases in ib_mad_complete_recv
1745 * or via recv_handler in ib_mad_complete_recv()
1751 /* Post another receive request for this QP */
1753 ib_mad_post_receive_mads(qp_info, response);
1755 kmem_cache_free(ib_mad_cache, recv);
1757 ib_mad_post_receive_mads(qp_info, recv);
1760 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1762 struct ib_mad_send_wr_private *mad_send_wr;
1763 unsigned long delay;
1765 if (list_empty(&mad_agent_priv->wait_list)) {
1766 cancel_delayed_work(&mad_agent_priv->timed_work);
1768 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1769 struct ib_mad_send_wr_private,
1772 if (time_after(mad_agent_priv->timeout,
1773 mad_send_wr->timeout)) {
1774 mad_agent_priv->timeout = mad_send_wr->timeout;
1775 cancel_delayed_work(&mad_agent_priv->timed_work);
1776 delay = mad_send_wr->timeout - jiffies;
1777 if ((long)delay <= 0)
1779 queue_delayed_work(mad_agent_priv->qp_info->
1781 &mad_agent_priv->timed_work, delay);
1786 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1788 struct ib_mad_agent_private *mad_agent_priv;
1789 struct ib_mad_send_wr_private *temp_mad_send_wr;
1790 struct list_head *list_item;
1791 unsigned long delay;
1793 mad_agent_priv = mad_send_wr->mad_agent_priv;
1794 list_del(&mad_send_wr->agent_list);
1796 delay = mad_send_wr->timeout;
1797 mad_send_wr->timeout += jiffies;
1800 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1801 temp_mad_send_wr = list_entry(list_item,
1802 struct ib_mad_send_wr_private,
1804 if (time_after(mad_send_wr->timeout,
1805 temp_mad_send_wr->timeout))
1810 list_item = &mad_agent_priv->wait_list;
1811 list_add(&mad_send_wr->agent_list, list_item);
1813 /* Reschedule a work item if we have a shorter timeout */
1814 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1815 cancel_delayed_work(&mad_agent_priv->timed_work);
1816 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1817 &mad_agent_priv->timed_work, delay);
1821 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1824 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1825 wait_for_response(mad_send_wr);
1829 * Process a send work completion
1831 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1832 struct ib_mad_send_wc *mad_send_wc)
1834 struct ib_mad_agent_private *mad_agent_priv;
1835 unsigned long flags;
1838 mad_agent_priv = mad_send_wr->mad_agent_priv;
1839 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1840 if (mad_agent_priv->agent.rmpp_version) {
1841 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
1842 if (ret == IB_RMPP_RESULT_CONSUMED)
1845 ret = IB_RMPP_RESULT_UNHANDLED;
1847 if (mad_send_wc->status != IB_WC_SUCCESS &&
1848 mad_send_wr->status == IB_WC_SUCCESS) {
1849 mad_send_wr->status = mad_send_wc->status;
1850 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1853 if (--mad_send_wr->refcount > 0) {
1854 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1855 mad_send_wr->status == IB_WC_SUCCESS) {
1856 wait_for_response(mad_send_wr);
1861 /* Remove send from MAD agent and notify client of completion */
1862 list_del(&mad_send_wr->agent_list);
1863 adjust_timeout(mad_agent_priv);
1864 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1866 if (mad_send_wr->status != IB_WC_SUCCESS )
1867 mad_send_wc->status = mad_send_wr->status;
1868 if (ret != IB_RMPP_RESULT_INTERNAL)
1869 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1872 /* Release reference on agent taken when sending */
1873 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1874 wake_up(&mad_agent_priv->wait);
1879 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1882 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1885 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1886 struct ib_mad_list_head *mad_list;
1887 struct ib_mad_qp_info *qp_info;
1888 struct ib_mad_queue *send_queue;
1889 struct ib_send_wr *bad_send_wr;
1890 unsigned long flags;
1893 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1894 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1896 send_queue = mad_list->mad_queue;
1897 qp_info = send_queue->qp_info;
1900 queued_send_wr = NULL;
1901 spin_lock_irqsave(&send_queue->lock, flags);
1902 list_del(&mad_list->list);
1904 /* Move queued send to the send queue */
1905 if (send_queue->count-- > send_queue->max_active) {
1906 mad_list = container_of(qp_info->overflow_list.next,
1907 struct ib_mad_list_head, list);
1908 queued_send_wr = container_of(mad_list,
1909 struct ib_mad_send_wr_private,
1911 list_del(&mad_list->list);
1912 list_add_tail(&mad_list->list, &send_queue->list);
1914 spin_unlock_irqrestore(&send_queue->lock, flags);
1916 /* Restore client wr_id in WC and complete send */
1917 wc->wr_id = mad_send_wr->wr_id;
1918 if (atomic_read(&qp_info->snoop_count))
1919 snoop_send(qp_info, &mad_send_wr->send_wr,
1920 (struct ib_mad_send_wc *)wc,
1921 IB_MAD_SNOOP_SEND_COMPLETIONS);
1922 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
1924 if (queued_send_wr) {
1925 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1928 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1929 mad_send_wr = queued_send_wr;
1930 wc->status = IB_WC_LOC_QP_OP_ERR;
1936 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1938 struct ib_mad_send_wr_private *mad_send_wr;
1939 struct ib_mad_list_head *mad_list;
1940 unsigned long flags;
1942 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1943 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1944 mad_send_wr = container_of(mad_list,
1945 struct ib_mad_send_wr_private,
1947 mad_send_wr->retry = 1;
1949 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1952 static void mad_error_handler(struct ib_mad_port_private *port_priv,
1955 struct ib_mad_list_head *mad_list;
1956 struct ib_mad_qp_info *qp_info;
1957 struct ib_mad_send_wr_private *mad_send_wr;
1960 /* Determine if failure was a send or receive */
1961 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1962 qp_info = mad_list->mad_queue->qp_info;
1963 if (mad_list->mad_queue == &qp_info->recv_queue)
1965 * Receive errors indicate that the QP has entered the error
1966 * state - error handling/shutdown code will cleanup
1971 * Send errors will transition the QP to SQE - move
1972 * QP to RTS and repost flushed work requests
1974 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1976 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1977 if (mad_send_wr->retry) {
1979 struct ib_send_wr *bad_send_wr;
1981 mad_send_wr->retry = 0;
1982 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1985 ib_mad_send_done_handler(port_priv, wc);
1987 ib_mad_send_done_handler(port_priv, wc);
1989 struct ib_qp_attr *attr;
1991 /* Transition QP to RTS and fail offending send */
1992 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1994 attr->qp_state = IB_QPS_RTS;
1995 attr->cur_qp_state = IB_QPS_SQE;
1996 ret = ib_modify_qp(qp_info->qp, attr,
1997 IB_QP_STATE | IB_QP_CUR_STATE);
2000 printk(KERN_ERR PFX "mad_error_handler - "
2001 "ib_modify_qp to RTS : %d\n", ret);
2003 mark_sends_for_retry(qp_info);
2005 ib_mad_send_done_handler(port_priv, wc);
2010 * IB MAD completion callback
2012 static void ib_mad_completion_handler(void *data)
2014 struct ib_mad_port_private *port_priv;
2017 port_priv = (struct ib_mad_port_private *)data;
2018 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2020 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2021 if (wc.status == IB_WC_SUCCESS) {
2022 switch (wc.opcode) {
2024 ib_mad_send_done_handler(port_priv, &wc);
2027 ib_mad_recv_done_handler(port_priv, &wc);
2034 mad_error_handler(port_priv, &wc);
2038 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2040 unsigned long flags;
2041 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2042 struct ib_mad_send_wc mad_send_wc;
2043 struct list_head cancel_list;
2045 INIT_LIST_HEAD(&cancel_list);
2047 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2048 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2049 &mad_agent_priv->send_list, agent_list) {
2050 if (mad_send_wr->status == IB_WC_SUCCESS) {
2051 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2052 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2056 /* Empty wait list to prevent receives from finding a request */
2057 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2058 /* Empty local completion list as well */
2059 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2060 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2062 /* Report all cancelled requests */
2063 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2064 mad_send_wc.vendor_err = 0;
2066 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2067 &cancel_list, agent_list) {
2068 mad_send_wc.wr_id = mad_send_wr->wr_id;
2069 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2072 list_del(&mad_send_wr->agent_list);
2074 atomic_dec(&mad_agent_priv->refcount);
2078 static struct ib_mad_send_wr_private*
2079 find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id)
2081 struct ib_mad_send_wr_private *mad_send_wr;
2083 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2085 if (mad_send_wr->wr_id == wr_id)
2089 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2091 if (is_data_mad(mad_agent_priv,
2092 mad_send_wr->send_wr.wr.ud.mad_hdr) &&
2093 mad_send_wr->wr_id == wr_id)
2099 int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
2101 struct ib_mad_agent_private *mad_agent_priv;
2102 struct ib_mad_send_wr_private *mad_send_wr;
2103 unsigned long flags;
2106 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2108 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2109 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2110 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2111 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2115 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2117 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2118 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2121 mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
2123 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2125 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2127 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2130 EXPORT_SYMBOL(ib_modify_mad);
2132 void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id)
2134 ib_modify_mad(mad_agent, wr_id, 0);
2136 EXPORT_SYMBOL(ib_cancel_mad);
2138 static void local_completions(void *data)
2140 struct ib_mad_agent_private *mad_agent_priv;
2141 struct ib_mad_local_private *local;
2142 struct ib_mad_agent_private *recv_mad_agent;
2143 unsigned long flags;
2146 struct ib_mad_send_wc mad_send_wc;
2148 mad_agent_priv = (struct ib_mad_agent_private *)data;
2150 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2151 while (!list_empty(&mad_agent_priv->local_list)) {
2152 local = list_entry(mad_agent_priv->local_list.next,
2153 struct ib_mad_local_private,
2155 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2156 if (local->mad_priv) {
2157 recv_mad_agent = local->recv_mad_agent;
2158 if (!recv_mad_agent) {
2159 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2160 goto local_send_completion;
2165 * Defined behavior is to complete response
2168 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
2170 recv_mad_agent->agent.port_num, &wc);
2172 local->mad_priv->header.recv_wc.wc = &wc;
2173 local->mad_priv->header.recv_wc.mad_len =
2174 sizeof(struct ib_mad);
2175 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2176 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2177 &local->mad_priv->header.recv_wc.rmpp_list);
2178 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2179 local->mad_priv->header.recv_wc.recv_buf.mad =
2180 &local->mad_priv->mad.mad;
2181 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2182 snoop_recv(recv_mad_agent->qp_info,
2183 &local->mad_priv->header.recv_wc,
2184 IB_MAD_SNOOP_RECVS);
2185 recv_mad_agent->agent.recv_handler(
2186 &recv_mad_agent->agent,
2187 &local->mad_priv->header.recv_wc);
2188 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2189 atomic_dec(&recv_mad_agent->refcount);
2190 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2193 local_send_completion:
2195 mad_send_wc.status = IB_WC_SUCCESS;
2196 mad_send_wc.vendor_err = 0;
2197 mad_send_wc.wr_id = local->wr_id;
2198 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2199 snoop_send(mad_agent_priv->qp_info, &local->send_wr,
2201 IB_MAD_SNOOP_SEND_COMPLETIONS);
2202 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2205 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2206 list_del(&local->completion_list);
2207 atomic_dec(&mad_agent_priv->refcount);
2209 kmem_cache_free(ib_mad_cache, local->mad_priv);
2212 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2215 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2219 if (!mad_send_wr->retries--)
2222 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr.
2225 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2226 ret = ib_retry_rmpp(mad_send_wr);
2228 case IB_RMPP_RESULT_UNHANDLED:
2229 ret = ib_send_mad(mad_send_wr);
2231 case IB_RMPP_RESULT_CONSUMED:
2239 ret = ib_send_mad(mad_send_wr);
2242 mad_send_wr->refcount++;
2243 list_add_tail(&mad_send_wr->agent_list,
2244 &mad_send_wr->mad_agent_priv->send_list);
2249 static void timeout_sends(void *data)
2251 struct ib_mad_agent_private *mad_agent_priv;
2252 struct ib_mad_send_wr_private *mad_send_wr;
2253 struct ib_mad_send_wc mad_send_wc;
2254 unsigned long flags, delay;
2256 mad_agent_priv = (struct ib_mad_agent_private *)data;
2257 mad_send_wc.vendor_err = 0;
2259 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2260 while (!list_empty(&mad_agent_priv->wait_list)) {
2261 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2262 struct ib_mad_send_wr_private,
2265 if (time_after(mad_send_wr->timeout, jiffies)) {
2266 delay = mad_send_wr->timeout - jiffies;
2267 if ((long)delay <= 0)
2269 queue_delayed_work(mad_agent_priv->qp_info->
2271 &mad_agent_priv->timed_work, delay);
2275 list_del(&mad_send_wr->agent_list);
2276 if (mad_send_wr->status == IB_WC_SUCCESS &&
2277 !retry_send(mad_send_wr))
2280 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2282 if (mad_send_wr->status == IB_WC_SUCCESS)
2283 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2285 mad_send_wc.status = mad_send_wr->status;
2286 mad_send_wc.wr_id = mad_send_wr->wr_id;
2287 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2291 atomic_dec(&mad_agent_priv->refcount);
2292 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2294 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2297 static void ib_mad_thread_completion_handler(struct ib_cq *cq)
2299 struct ib_mad_port_private *port_priv = cq->cq_context;
2301 queue_work(port_priv->wq, &port_priv->work);
2305 * Allocate receive MADs and post receive WRs for them
2307 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2308 struct ib_mad_private *mad)
2310 unsigned long flags;
2312 struct ib_mad_private *mad_priv;
2313 struct ib_sge sg_list;
2314 struct ib_recv_wr recv_wr, *bad_recv_wr;
2315 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2317 /* Initialize common scatter list fields */
2318 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2319 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2321 /* Initialize common receive WR fields */
2322 recv_wr.next = NULL;
2323 recv_wr.sg_list = &sg_list;
2324 recv_wr.num_sge = 1;
2327 /* Allocate and map receive buffer */
2332 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2334 printk(KERN_ERR PFX "No memory for receive buffer\n");
2339 sg_list.addr = dma_map_single(qp_info->port_priv->
2343 sizeof mad_priv->header,
2345 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2346 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2347 mad_priv->header.mad_list.mad_queue = recv_queue;
2349 /* Post receive WR */
2350 spin_lock_irqsave(&recv_queue->lock, flags);
2351 post = (++recv_queue->count < recv_queue->max_active);
2352 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2353 spin_unlock_irqrestore(&recv_queue->lock, flags);
2354 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2356 spin_lock_irqsave(&recv_queue->lock, flags);
2357 list_del(&mad_priv->header.mad_list.list);
2358 recv_queue->count--;
2359 spin_unlock_irqrestore(&recv_queue->lock, flags);
2360 dma_unmap_single(qp_info->port_priv->device->dma_device,
2361 pci_unmap_addr(&mad_priv->header,
2364 sizeof mad_priv->header,
2366 kmem_cache_free(ib_mad_cache, mad_priv);
2367 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2376 * Return all the posted receive MADs
2378 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2380 struct ib_mad_private_header *mad_priv_hdr;
2381 struct ib_mad_private *recv;
2382 struct ib_mad_list_head *mad_list;
2384 while (!list_empty(&qp_info->recv_queue.list)) {
2386 mad_list = list_entry(qp_info->recv_queue.list.next,
2387 struct ib_mad_list_head, list);
2388 mad_priv_hdr = container_of(mad_list,
2389 struct ib_mad_private_header,
2391 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2394 /* Remove from posted receive MAD list */
2395 list_del(&mad_list->list);
2397 dma_unmap_single(qp_info->port_priv->device->dma_device,
2398 pci_unmap_addr(&recv->header, mapping),
2399 sizeof(struct ib_mad_private) -
2400 sizeof(struct ib_mad_private_header),
2402 kmem_cache_free(ib_mad_cache, recv);
2405 qp_info->recv_queue.count = 0;
2411 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2414 struct ib_qp_attr *attr;
2417 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2419 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2423 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2424 qp = port_priv->qp_info[i].qp;
2426 * PKey index for QP1 is irrelevant but
2427 * one is needed for the Reset to Init transition
2429 attr->qp_state = IB_QPS_INIT;
2430 attr->pkey_index = 0;
2431 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2432 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2433 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2435 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2436 "INIT: %d\n", i, ret);
2440 attr->qp_state = IB_QPS_RTR;
2441 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2443 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2444 "RTR: %d\n", i, ret);
2448 attr->qp_state = IB_QPS_RTS;
2449 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2450 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2452 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2453 "RTS: %d\n", i, ret);
2458 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2460 printk(KERN_ERR PFX "Failed to request completion "
2461 "notification: %d\n", ret);
2465 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2466 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2468 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2477 static void qp_event_handler(struct ib_event *event, void *qp_context)
2479 struct ib_mad_qp_info *qp_info = qp_context;
2481 /* It's worse than that! He's dead, Jim! */
2482 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2483 event->event, qp_info->qp->qp_num);
2486 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2487 struct ib_mad_queue *mad_queue)
2489 mad_queue->qp_info = qp_info;
2490 mad_queue->count = 0;
2491 spin_lock_init(&mad_queue->lock);
2492 INIT_LIST_HEAD(&mad_queue->list);
2495 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2496 struct ib_mad_qp_info *qp_info)
2498 qp_info->port_priv = port_priv;
2499 init_mad_queue(qp_info, &qp_info->send_queue);
2500 init_mad_queue(qp_info, &qp_info->recv_queue);
2501 INIT_LIST_HEAD(&qp_info->overflow_list);
2502 spin_lock_init(&qp_info->snoop_lock);
2503 qp_info->snoop_table = NULL;
2504 qp_info->snoop_table_size = 0;
2505 atomic_set(&qp_info->snoop_count, 0);
2508 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2509 enum ib_qp_type qp_type)
2511 struct ib_qp_init_attr qp_init_attr;
2514 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2515 qp_init_attr.send_cq = qp_info->port_priv->cq;
2516 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2517 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2518 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2519 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2520 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2521 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2522 qp_init_attr.qp_type = qp_type;
2523 qp_init_attr.port_num = qp_info->port_priv->port_num;
2524 qp_init_attr.qp_context = qp_info;
2525 qp_init_attr.event_handler = qp_event_handler;
2526 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2527 if (IS_ERR(qp_info->qp)) {
2528 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2529 get_spl_qp_index(qp_type));
2530 ret = PTR_ERR(qp_info->qp);
2533 /* Use minimum queue sizes unless the CQ is resized */
2534 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2535 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2542 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2544 ib_destroy_qp(qp_info->qp);
2545 if (qp_info->snoop_table)
2546 kfree(qp_info->snoop_table);
2551 * Create the QP, PD, MR, and CQ if needed
2553 static int ib_mad_port_open(struct ib_device *device,
2557 struct ib_mad_port_private *port_priv;
2558 unsigned long flags;
2559 char name[sizeof "ib_mad123"];
2561 /* Create new device info */
2562 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2564 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2567 memset(port_priv, 0, sizeof *port_priv);
2568 port_priv->device = device;
2569 port_priv->port_num = port_num;
2570 spin_lock_init(&port_priv->reg_lock);
2571 INIT_LIST_HEAD(&port_priv->agent_list);
2572 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2573 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2575 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2576 port_priv->cq = ib_create_cq(port_priv->device,
2578 ib_mad_thread_completion_handler,
2579 NULL, port_priv, cq_size);
2580 if (IS_ERR(port_priv->cq)) {
2581 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2582 ret = PTR_ERR(port_priv->cq);
2586 port_priv->pd = ib_alloc_pd(device);
2587 if (IS_ERR(port_priv->pd)) {
2588 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2589 ret = PTR_ERR(port_priv->pd);
2593 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2594 if (IS_ERR(port_priv->mr)) {
2595 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2596 ret = PTR_ERR(port_priv->mr);
2600 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2603 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2607 snprintf(name, sizeof name, "ib_mad%d", port_num);
2608 port_priv->wq = create_singlethread_workqueue(name);
2609 if (!port_priv->wq) {
2613 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2615 ret = ib_mad_port_start(port_priv);
2617 printk(KERN_ERR PFX "Couldn't start port\n");
2621 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2622 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2623 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2627 destroy_workqueue(port_priv->wq);
2629 destroy_mad_qp(&port_priv->qp_info[1]);
2631 destroy_mad_qp(&port_priv->qp_info[0]);
2633 ib_dereg_mr(port_priv->mr);
2635 ib_dealloc_pd(port_priv->pd);
2637 ib_destroy_cq(port_priv->cq);
2638 cleanup_recv_queue(&port_priv->qp_info[1]);
2639 cleanup_recv_queue(&port_priv->qp_info[0]);
2648 * If there are no classes using the port, free the port
2649 * resources (CQ, MR, PD, QP) and remove the port's info structure
2651 static int ib_mad_port_close(struct ib_device *device, int port_num)
2653 struct ib_mad_port_private *port_priv;
2654 unsigned long flags;
2656 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2657 port_priv = __ib_get_mad_port(device, port_num);
2658 if (port_priv == NULL) {
2659 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2660 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2663 list_del(&port_priv->port_list);
2664 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2666 /* Stop processing completions. */
2667 flush_workqueue(port_priv->wq);
2668 destroy_workqueue(port_priv->wq);
2669 destroy_mad_qp(&port_priv->qp_info[1]);
2670 destroy_mad_qp(&port_priv->qp_info[0]);
2671 ib_dereg_mr(port_priv->mr);
2672 ib_dealloc_pd(port_priv->pd);
2673 ib_destroy_cq(port_priv->cq);
2674 cleanup_recv_queue(&port_priv->qp_info[1]);
2675 cleanup_recv_queue(&port_priv->qp_info[0]);
2676 /* XXX: Handle deallocation of MAD registration tables */
2683 static void ib_mad_init_device(struct ib_device *device)
2685 int num_ports, cur_port, i;
2687 if (device->node_type == IB_NODE_SWITCH) {
2691 num_ports = device->phys_port_cnt;
2694 for (i = 0; i < num_ports; i++, cur_port++) {
2695 if (ib_mad_port_open(device, cur_port)) {
2696 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2697 device->name, cur_port);
2698 goto error_device_open;
2700 if (ib_agent_port_open(device, cur_port)) {
2701 printk(KERN_ERR PFX "Couldn't open %s port %d "
2703 device->name, cur_port);
2704 goto error_device_open;
2712 if (ib_agent_port_close(device, cur_port))
2713 printk(KERN_ERR PFX "Couldn't close %s port %d "
2715 device->name, cur_port);
2716 if (ib_mad_port_close(device, cur_port))
2717 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2718 device->name, cur_port);
2723 static void ib_mad_remove_device(struct ib_device *device)
2725 int i, num_ports, cur_port;
2727 if (device->node_type == IB_NODE_SWITCH) {
2731 num_ports = device->phys_port_cnt;
2734 for (i = 0; i < num_ports; i++, cur_port++) {
2735 if (ib_agent_port_close(device, cur_port))
2736 printk(KERN_ERR PFX "Couldn't close %s port %d "
2738 device->name, cur_port);
2739 if (ib_mad_port_close(device, cur_port))
2740 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2741 device->name, cur_port);
2745 static struct ib_client mad_client = {
2747 .add = ib_mad_init_device,
2748 .remove = ib_mad_remove_device
2751 static int __init ib_mad_init_module(void)
2755 spin_lock_init(&ib_mad_port_list_lock);
2756 spin_lock_init(&ib_agent_port_list_lock);
2758 ib_mad_cache = kmem_cache_create("ib_mad",
2759 sizeof(struct ib_mad_private),
2764 if (!ib_mad_cache) {
2765 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2770 INIT_LIST_HEAD(&ib_mad_port_list);
2772 if (ib_register_client(&mad_client)) {
2773 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2781 kmem_cache_destroy(ib_mad_cache);
2786 static void __exit ib_mad_cleanup_module(void)
2788 ib_unregister_client(&mad_client);
2790 if (kmem_cache_destroy(ib_mad_cache)) {
2791 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2795 module_init(ib_mad_init_module);
2796 module_exit(ib_mad_cleanup_module);