Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux-2.6] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  */
35 #include <linux/dma-mapping.h>
36 #include <rdma/ib_cache.h>
37
38 #include "mad_priv.h"
39 #include "mad_rmpp.h"
40 #include "smi.h"
41 #include "agent.h"
42
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
47
48 static struct kmem_cache *ib_mad_cache;
49
50 static struct list_head ib_mad_port_list;
51 static u32 ib_mad_client_id = 0;
52
53 /* Port list lock */
54 static spinlock_t ib_mad_port_list_lock;
55
56
57 /* Forward declarations */
58 static int method_in_use(struct ib_mad_mgmt_method_table **method,
59                          struct ib_mad_reg_req *mad_reg_req);
60 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
61 static struct ib_mad_agent_private *find_mad_agent(
62                                         struct ib_mad_port_private *port_priv,
63                                         struct ib_mad *mad);
64 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
65                                     struct ib_mad_private *mad);
66 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
67 static void timeout_sends(struct work_struct *work);
68 static void local_completions(struct work_struct *work);
69 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
70                               struct ib_mad_agent_private *agent_priv,
71                               u8 mgmt_class);
72 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
73                            struct ib_mad_agent_private *agent_priv);
74
75 /*
76  * Returns a ib_mad_port_private structure or NULL for a device/port
77  * Assumes ib_mad_port_list_lock is being held
78  */
79 static inline struct ib_mad_port_private *
80 __ib_get_mad_port(struct ib_device *device, int port_num)
81 {
82         struct ib_mad_port_private *entry;
83
84         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
85                 if (entry->device == device && entry->port_num == port_num)
86                         return entry;
87         }
88         return NULL;
89 }
90
91 /*
92  * Wrapper function to return a ib_mad_port_private structure or NULL
93  * for a device/port
94  */
95 static inline struct ib_mad_port_private *
96 ib_get_mad_port(struct ib_device *device, int port_num)
97 {
98         struct ib_mad_port_private *entry;
99         unsigned long flags;
100
101         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
102         entry = __ib_get_mad_port(device, port_num);
103         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
104
105         return entry;
106 }
107
108 static inline u8 convert_mgmt_class(u8 mgmt_class)
109 {
110         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
111         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
112                 0 : mgmt_class;
113 }
114
115 static int get_spl_qp_index(enum ib_qp_type qp_type)
116 {
117         switch (qp_type)
118         {
119         case IB_QPT_SMI:
120                 return 0;
121         case IB_QPT_GSI:
122                 return 1;
123         default:
124                 return -1;
125         }
126 }
127
128 static int vendor_class_index(u8 mgmt_class)
129 {
130         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
131 }
132
133 static int is_vendor_class(u8 mgmt_class)
134 {
135         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
136             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
137                 return 0;
138         return 1;
139 }
140
141 static int is_vendor_oui(char *oui)
142 {
143         if (oui[0] || oui[1] || oui[2])
144                 return 1;
145         return 0;
146 }
147
148 static int is_vendor_method_in_use(
149                 struct ib_mad_mgmt_vendor_class *vendor_class,
150                 struct ib_mad_reg_req *mad_reg_req)
151 {
152         struct ib_mad_mgmt_method_table *method;
153         int i;
154
155         for (i = 0; i < MAX_MGMT_OUI; i++) {
156                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
157                         method = vendor_class->method_table[i];
158                         if (method) {
159                                 if (method_in_use(&method, mad_reg_req))
160                                         return 1;
161                                 else
162                                         break;
163                         }
164                 }
165         }
166         return 0;
167 }
168
169 int ib_response_mad(struct ib_mad *mad)
170 {
171         return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
172                 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
173                 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
174                  (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
175 }
176 EXPORT_SYMBOL(ib_response_mad);
177
178 /*
179  * ib_register_mad_agent - Register to send/receive MADs
180  */
181 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
182                                            u8 port_num,
183                                            enum ib_qp_type qp_type,
184                                            struct ib_mad_reg_req *mad_reg_req,
185                                            u8 rmpp_version,
186                                            ib_mad_send_handler send_handler,
187                                            ib_mad_recv_handler recv_handler,
188                                            void *context)
189 {
190         struct ib_mad_port_private *port_priv;
191         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
192         struct ib_mad_agent_private *mad_agent_priv;
193         struct ib_mad_reg_req *reg_req = NULL;
194         struct ib_mad_mgmt_class_table *class;
195         struct ib_mad_mgmt_vendor_class_table *vendor;
196         struct ib_mad_mgmt_vendor_class *vendor_class;
197         struct ib_mad_mgmt_method_table *method;
198         int ret2, qpn;
199         unsigned long flags;
200         u8 mgmt_class, vclass;
201
202         /* Validate parameters */
203         qpn = get_spl_qp_index(qp_type);
204         if (qpn == -1)
205                 goto error1;
206
207         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
208                 goto error1;
209
210         /* Validate MAD registration request if supplied */
211         if (mad_reg_req) {
212                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
213                         goto error1;
214                 if (!recv_handler)
215                         goto error1;
216                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
217                         /*
218                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
219                          * one in this range currently allowed
220                          */
221                         if (mad_reg_req->mgmt_class !=
222                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
223                                 goto error1;
224                 } else if (mad_reg_req->mgmt_class == 0) {
225                         /*
226                          * Class 0 is reserved in IBA and is used for
227                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
228                          */
229                         goto error1;
230                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
231                         /*
232                          * If class is in "new" vendor range,
233                          * ensure supplied OUI is not zero
234                          */
235                         if (!is_vendor_oui(mad_reg_req->oui))
236                                 goto error1;
237                 }
238                 /* Make sure class supplied is consistent with RMPP */
239                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
240                         if (rmpp_version)
241                                 goto error1;
242                 }
243                 /* Make sure class supplied is consistent with QP type */
244                 if (qp_type == IB_QPT_SMI) {
245                         if ((mad_reg_req->mgmt_class !=
246                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
247                             (mad_reg_req->mgmt_class !=
248                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
249                                 goto error1;
250                 } else {
251                         if ((mad_reg_req->mgmt_class ==
252                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
253                             (mad_reg_req->mgmt_class ==
254                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
255                                 goto error1;
256                 }
257         } else {
258                 /* No registration request supplied */
259                 if (!send_handler)
260                         goto error1;
261         }
262
263         /* Validate device and port */
264         port_priv = ib_get_mad_port(device, port_num);
265         if (!port_priv) {
266                 ret = ERR_PTR(-ENODEV);
267                 goto error1;
268         }
269
270         /* Allocate structures */
271         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
272         if (!mad_agent_priv) {
273                 ret = ERR_PTR(-ENOMEM);
274                 goto error1;
275         }
276
277         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
278                                                  IB_ACCESS_LOCAL_WRITE);
279         if (IS_ERR(mad_agent_priv->agent.mr)) {
280                 ret = ERR_PTR(-ENOMEM);
281                 goto error2;
282         }
283
284         if (mad_reg_req) {
285                 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
286                 if (!reg_req) {
287                         ret = ERR_PTR(-ENOMEM);
288                         goto error3;
289                 }
290                 /* Make a copy of the MAD registration request */
291                 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
292         }
293
294         /* Now, fill in the various structures */
295         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
296         mad_agent_priv->reg_req = reg_req;
297         mad_agent_priv->agent.rmpp_version = rmpp_version;
298         mad_agent_priv->agent.device = device;
299         mad_agent_priv->agent.recv_handler = recv_handler;
300         mad_agent_priv->agent.send_handler = send_handler;
301         mad_agent_priv->agent.context = context;
302         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
303         mad_agent_priv->agent.port_num = port_num;
304
305         spin_lock_irqsave(&port_priv->reg_lock, flags);
306         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
307
308         /*
309          * Make sure MAD registration (if supplied)
310          * is non overlapping with any existing ones
311          */
312         if (mad_reg_req) {
313                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
314                 if (!is_vendor_class(mgmt_class)) {
315                         class = port_priv->version[mad_reg_req->
316                                                    mgmt_class_version].class;
317                         if (class) {
318                                 method = class->method_table[mgmt_class];
319                                 if (method) {
320                                         if (method_in_use(&method,
321                                                            mad_reg_req))
322                                                 goto error4;
323                                 }
324                         }
325                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
326                                                   mgmt_class);
327                 } else {
328                         /* "New" vendor class range */
329                         vendor = port_priv->version[mad_reg_req->
330                                                     mgmt_class_version].vendor;
331                         if (vendor) {
332                                 vclass = vendor_class_index(mgmt_class);
333                                 vendor_class = vendor->vendor_class[vclass];
334                                 if (vendor_class) {
335                                         if (is_vendor_method_in_use(
336                                                         vendor_class,
337                                                         mad_reg_req))
338                                                 goto error4;
339                                 }
340                         }
341                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
342                 }
343                 if (ret2) {
344                         ret = ERR_PTR(ret2);
345                         goto error4;
346                 }
347         }
348
349         /* Add mad agent into port's agent list */
350         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
351         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
352
353         spin_lock_init(&mad_agent_priv->lock);
354         INIT_LIST_HEAD(&mad_agent_priv->send_list);
355         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
356         INIT_LIST_HEAD(&mad_agent_priv->done_list);
357         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
358         INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
359         INIT_LIST_HEAD(&mad_agent_priv->local_list);
360         INIT_WORK(&mad_agent_priv->local_work, local_completions);
361         atomic_set(&mad_agent_priv->refcount, 1);
362         init_completion(&mad_agent_priv->comp);
363
364         return &mad_agent_priv->agent;
365
366 error4:
367         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
368         kfree(reg_req);
369 error3:
370         ib_dereg_mr(mad_agent_priv->agent.mr);
371 error2:
372         kfree(mad_agent_priv);
373 error1:
374         return ret;
375 }
376 EXPORT_SYMBOL(ib_register_mad_agent);
377
378 static inline int is_snooping_sends(int mad_snoop_flags)
379 {
380         return (mad_snoop_flags &
381                 (/*IB_MAD_SNOOP_POSTED_SENDS |
382                  IB_MAD_SNOOP_RMPP_SENDS |*/
383                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
384                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
385 }
386
387 static inline int is_snooping_recvs(int mad_snoop_flags)
388 {
389         return (mad_snoop_flags &
390                 (IB_MAD_SNOOP_RECVS /*|
391                  IB_MAD_SNOOP_RMPP_RECVS*/));
392 }
393
394 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
395                                 struct ib_mad_snoop_private *mad_snoop_priv)
396 {
397         struct ib_mad_snoop_private **new_snoop_table;
398         unsigned long flags;
399         int i;
400
401         spin_lock_irqsave(&qp_info->snoop_lock, flags);
402         /* Check for empty slot in array. */
403         for (i = 0; i < qp_info->snoop_table_size; i++)
404                 if (!qp_info->snoop_table[i])
405                         break;
406
407         if (i == qp_info->snoop_table_size) {
408                 /* Grow table. */
409                 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
410                                           qp_info->snoop_table_size + 1,
411                                           GFP_ATOMIC);
412                 if (!new_snoop_table) {
413                         i = -ENOMEM;
414                         goto out;
415                 }
416                 if (qp_info->snoop_table) {
417                         memcpy(new_snoop_table, qp_info->snoop_table,
418                                sizeof mad_snoop_priv *
419                                qp_info->snoop_table_size);
420                         kfree(qp_info->snoop_table);
421                 }
422                 qp_info->snoop_table = new_snoop_table;
423                 qp_info->snoop_table_size++;
424         }
425         qp_info->snoop_table[i] = mad_snoop_priv;
426         atomic_inc(&qp_info->snoop_count);
427 out:
428         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
429         return i;
430 }
431
432 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
433                                            u8 port_num,
434                                            enum ib_qp_type qp_type,
435                                            int mad_snoop_flags,
436                                            ib_mad_snoop_handler snoop_handler,
437                                            ib_mad_recv_handler recv_handler,
438                                            void *context)
439 {
440         struct ib_mad_port_private *port_priv;
441         struct ib_mad_agent *ret;
442         struct ib_mad_snoop_private *mad_snoop_priv;
443         int qpn;
444
445         /* Validate parameters */
446         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
447             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
448                 ret = ERR_PTR(-EINVAL);
449                 goto error1;
450         }
451         qpn = get_spl_qp_index(qp_type);
452         if (qpn == -1) {
453                 ret = ERR_PTR(-EINVAL);
454                 goto error1;
455         }
456         port_priv = ib_get_mad_port(device, port_num);
457         if (!port_priv) {
458                 ret = ERR_PTR(-ENODEV);
459                 goto error1;
460         }
461         /* Allocate structures */
462         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
463         if (!mad_snoop_priv) {
464                 ret = ERR_PTR(-ENOMEM);
465                 goto error1;
466         }
467
468         /* Now, fill in the various structures */
469         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
470         mad_snoop_priv->agent.device = device;
471         mad_snoop_priv->agent.recv_handler = recv_handler;
472         mad_snoop_priv->agent.snoop_handler = snoop_handler;
473         mad_snoop_priv->agent.context = context;
474         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
475         mad_snoop_priv->agent.port_num = port_num;
476         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
477         init_completion(&mad_snoop_priv->comp);
478         mad_snoop_priv->snoop_index = register_snoop_agent(
479                                                 &port_priv->qp_info[qpn],
480                                                 mad_snoop_priv);
481         if (mad_snoop_priv->snoop_index < 0) {
482                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
483                 goto error2;
484         }
485
486         atomic_set(&mad_snoop_priv->refcount, 1);
487         return &mad_snoop_priv->agent;
488
489 error2:
490         kfree(mad_snoop_priv);
491 error1:
492         return ret;
493 }
494 EXPORT_SYMBOL(ib_register_mad_snoop);
495
496 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
497 {
498         if (atomic_dec_and_test(&mad_agent_priv->refcount))
499                 complete(&mad_agent_priv->comp);
500 }
501
502 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
503 {
504         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
505                 complete(&mad_snoop_priv->comp);
506 }
507
508 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
509 {
510         struct ib_mad_port_private *port_priv;
511         unsigned long flags;
512
513         /* Note that we could still be handling received MADs */
514
515         /*
516          * Canceling all sends results in dropping received response
517          * MADs, preventing us from queuing additional work
518          */
519         cancel_mads(mad_agent_priv);
520         port_priv = mad_agent_priv->qp_info->port_priv;
521         cancel_delayed_work(&mad_agent_priv->timed_work);
522
523         spin_lock_irqsave(&port_priv->reg_lock, flags);
524         remove_mad_reg_req(mad_agent_priv);
525         list_del(&mad_agent_priv->agent_list);
526         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
527
528         flush_workqueue(port_priv->wq);
529         ib_cancel_rmpp_recvs(mad_agent_priv);
530
531         deref_mad_agent(mad_agent_priv);
532         wait_for_completion(&mad_agent_priv->comp);
533
534         kfree(mad_agent_priv->reg_req);
535         ib_dereg_mr(mad_agent_priv->agent.mr);
536         kfree(mad_agent_priv);
537 }
538
539 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
540 {
541         struct ib_mad_qp_info *qp_info;
542         unsigned long flags;
543
544         qp_info = mad_snoop_priv->qp_info;
545         spin_lock_irqsave(&qp_info->snoop_lock, flags);
546         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
547         atomic_dec(&qp_info->snoop_count);
548         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
549
550         deref_snoop_agent(mad_snoop_priv);
551         wait_for_completion(&mad_snoop_priv->comp);
552
553         kfree(mad_snoop_priv);
554 }
555
556 /*
557  * ib_unregister_mad_agent - Unregisters a client from using MAD services
558  */
559 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
560 {
561         struct ib_mad_agent_private *mad_agent_priv;
562         struct ib_mad_snoop_private *mad_snoop_priv;
563
564         /* If the TID is zero, the agent can only snoop. */
565         if (mad_agent->hi_tid) {
566                 mad_agent_priv = container_of(mad_agent,
567                                               struct ib_mad_agent_private,
568                                               agent);
569                 unregister_mad_agent(mad_agent_priv);
570         } else {
571                 mad_snoop_priv = container_of(mad_agent,
572                                               struct ib_mad_snoop_private,
573                                               agent);
574                 unregister_mad_snoop(mad_snoop_priv);
575         }
576         return 0;
577 }
578 EXPORT_SYMBOL(ib_unregister_mad_agent);
579
580 static void dequeue_mad(struct ib_mad_list_head *mad_list)
581 {
582         struct ib_mad_queue *mad_queue;
583         unsigned long flags;
584
585         BUG_ON(!mad_list->mad_queue);
586         mad_queue = mad_list->mad_queue;
587         spin_lock_irqsave(&mad_queue->lock, flags);
588         list_del(&mad_list->list);
589         mad_queue->count--;
590         spin_unlock_irqrestore(&mad_queue->lock, flags);
591 }
592
593 static void snoop_send(struct ib_mad_qp_info *qp_info,
594                        struct ib_mad_send_buf *send_buf,
595                        struct ib_mad_send_wc *mad_send_wc,
596                        int mad_snoop_flags)
597 {
598         struct ib_mad_snoop_private *mad_snoop_priv;
599         unsigned long flags;
600         int i;
601
602         spin_lock_irqsave(&qp_info->snoop_lock, flags);
603         for (i = 0; i < qp_info->snoop_table_size; i++) {
604                 mad_snoop_priv = qp_info->snoop_table[i];
605                 if (!mad_snoop_priv ||
606                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
607                         continue;
608
609                 atomic_inc(&mad_snoop_priv->refcount);
610                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
611                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
612                                                     send_buf, mad_send_wc);
613                 deref_snoop_agent(mad_snoop_priv);
614                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
615         }
616         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
617 }
618
619 static void snoop_recv(struct ib_mad_qp_info *qp_info,
620                        struct ib_mad_recv_wc *mad_recv_wc,
621                        int mad_snoop_flags)
622 {
623         struct ib_mad_snoop_private *mad_snoop_priv;
624         unsigned long flags;
625         int i;
626
627         spin_lock_irqsave(&qp_info->snoop_lock, flags);
628         for (i = 0; i < qp_info->snoop_table_size; i++) {
629                 mad_snoop_priv = qp_info->snoop_table[i];
630                 if (!mad_snoop_priv ||
631                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
632                         continue;
633
634                 atomic_inc(&mad_snoop_priv->refcount);
635                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
636                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
637                                                    mad_recv_wc);
638                 deref_snoop_agent(mad_snoop_priv);
639                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
640         }
641         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
642 }
643
644 static void build_smp_wc(struct ib_qp *qp,
645                          u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
646                          struct ib_wc *wc)
647 {
648         memset(wc, 0, sizeof *wc);
649         wc->wr_id = wr_id;
650         wc->status = IB_WC_SUCCESS;
651         wc->opcode = IB_WC_RECV;
652         wc->pkey_index = pkey_index;
653         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
654         wc->src_qp = IB_QP0;
655         wc->qp = qp;
656         wc->slid = slid;
657         wc->sl = 0;
658         wc->dlid_path_bits = 0;
659         wc->port_num = port_num;
660 }
661
662 /*
663  * Return 0 if SMP is to be sent
664  * Return 1 if SMP was consumed locally (whether or not solicited)
665  * Return < 0 if error
666  */
667 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
668                                   struct ib_mad_send_wr_private *mad_send_wr)
669 {
670         int ret = 0;
671         struct ib_smp *smp = mad_send_wr->send_buf.mad;
672         unsigned long flags;
673         struct ib_mad_local_private *local;
674         struct ib_mad_private *mad_priv;
675         struct ib_mad_port_private *port_priv;
676         struct ib_mad_agent_private *recv_mad_agent = NULL;
677         struct ib_device *device = mad_agent_priv->agent.device;
678         u8 port_num;
679         struct ib_wc mad_wc;
680         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
681
682         if (device->node_type == RDMA_NODE_IB_SWITCH &&
683             smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
684                 port_num = send_wr->wr.ud.port_num;
685         else
686                 port_num = mad_agent_priv->agent.port_num;
687
688         /*
689          * Directed route handling starts if the initial LID routed part of
690          * a request or the ending LID routed part of a response is empty.
691          * If we are at the start of the LID routed part, don't update the
692          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
693          */
694         if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
695              IB_LID_PERMISSIVE &&
696              smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
697              IB_SMI_DISCARD) {
698                 ret = -EINVAL;
699                 printk(KERN_ERR PFX "Invalid directed route\n");
700                 goto out;
701         }
702
703         /* Check to post send on QP or process locally */
704         if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD)
705                 goto out;
706
707         local = kmalloc(sizeof *local, GFP_ATOMIC);
708         if (!local) {
709                 ret = -ENOMEM;
710                 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
711                 goto out;
712         }
713         local->mad_priv = NULL;
714         local->recv_mad_agent = NULL;
715         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
716         if (!mad_priv) {
717                 ret = -ENOMEM;
718                 printk(KERN_ERR PFX "No memory for local response MAD\n");
719                 kfree(local);
720                 goto out;
721         }
722
723         build_smp_wc(mad_agent_priv->agent.qp,
724                      send_wr->wr_id, be16_to_cpu(smp->dr_slid),
725                      send_wr->wr.ud.pkey_index,
726                      send_wr->wr.ud.port_num, &mad_wc);
727
728         /* No GRH for DR SMP */
729         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
730                                   (struct ib_mad *)smp,
731                                   (struct ib_mad *)&mad_priv->mad);
732         switch (ret)
733         {
734         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
735                 if (ib_response_mad(&mad_priv->mad.mad) &&
736                     mad_agent_priv->agent.recv_handler) {
737                         local->mad_priv = mad_priv;
738                         local->recv_mad_agent = mad_agent_priv;
739                         /*
740                          * Reference MAD agent until receive
741                          * side of local completion handled
742                          */
743                         atomic_inc(&mad_agent_priv->refcount);
744                 } else
745                         kmem_cache_free(ib_mad_cache, mad_priv);
746                 break;
747         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
748                 kmem_cache_free(ib_mad_cache, mad_priv);
749                 break;
750         case IB_MAD_RESULT_SUCCESS:
751                 /* Treat like an incoming receive MAD */
752                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
753                                             mad_agent_priv->agent.port_num);
754                 if (port_priv) {
755                         mad_priv->mad.mad.mad_hdr.tid =
756                                 ((struct ib_mad *)smp)->mad_hdr.tid;
757                         recv_mad_agent = find_mad_agent(port_priv,
758                                                         &mad_priv->mad.mad);
759                 }
760                 if (!port_priv || !recv_mad_agent) {
761                         kmem_cache_free(ib_mad_cache, mad_priv);
762                         kfree(local);
763                         ret = 0;
764                         goto out;
765                 }
766                 local->mad_priv = mad_priv;
767                 local->recv_mad_agent = recv_mad_agent;
768                 break;
769         default:
770                 kmem_cache_free(ib_mad_cache, mad_priv);
771                 kfree(local);
772                 ret = -EINVAL;
773                 goto out;
774         }
775
776         local->mad_send_wr = mad_send_wr;
777         /* Reference MAD agent until send side of local completion handled */
778         atomic_inc(&mad_agent_priv->refcount);
779         /* Queue local completion to local list */
780         spin_lock_irqsave(&mad_agent_priv->lock, flags);
781         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
782         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
783         queue_work(mad_agent_priv->qp_info->port_priv->wq,
784                    &mad_agent_priv->local_work);
785         ret = 1;
786 out:
787         return ret;
788 }
789
790 static int get_pad_size(int hdr_len, int data_len)
791 {
792         int seg_size, pad;
793
794         seg_size = sizeof(struct ib_mad) - hdr_len;
795         if (data_len && seg_size) {
796                 pad = seg_size - data_len % seg_size;
797                 return pad == seg_size ? 0 : pad;
798         } else
799                 return seg_size;
800 }
801
802 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
803 {
804         struct ib_rmpp_segment *s, *t;
805
806         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
807                 list_del(&s->list);
808                 kfree(s);
809         }
810 }
811
812 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
813                                 gfp_t gfp_mask)
814 {
815         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
816         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
817         struct ib_rmpp_segment *seg = NULL;
818         int left, seg_size, pad;
819
820         send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
821         seg_size = send_buf->seg_size;
822         pad = send_wr->pad;
823
824         /* Allocate data segments. */
825         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
826                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
827                 if (!seg) {
828                         printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
829                                "alloc failed for len %zd, gfp %#x\n",
830                                sizeof (*seg) + seg_size, gfp_mask);
831                         free_send_rmpp_list(send_wr);
832                         return -ENOMEM;
833                 }
834                 seg->num = ++send_buf->seg_count;
835                 list_add_tail(&seg->list, &send_wr->rmpp_list);
836         }
837
838         /* Zero any padding */
839         if (pad)
840                 memset(seg->data + seg_size - pad, 0, pad);
841
842         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
843                                           agent.rmpp_version;
844         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
845         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
846
847         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
848                                         struct ib_rmpp_segment, list);
849         send_wr->last_ack_seg = send_wr->cur_seg;
850         return 0;
851 }
852
853 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
854                                             u32 remote_qpn, u16 pkey_index,
855                                             int rmpp_active,
856                                             int hdr_len, int data_len,
857                                             gfp_t gfp_mask)
858 {
859         struct ib_mad_agent_private *mad_agent_priv;
860         struct ib_mad_send_wr_private *mad_send_wr;
861         int pad, message_size, ret, size;
862         void *buf;
863
864         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
865                                       agent);
866         pad = get_pad_size(hdr_len, data_len);
867         message_size = hdr_len + data_len + pad;
868
869         if ((!mad_agent->rmpp_version &&
870              (rmpp_active || message_size > sizeof(struct ib_mad))) ||
871             (!rmpp_active && message_size > sizeof(struct ib_mad)))
872                 return ERR_PTR(-EINVAL);
873
874         size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
875         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
876         if (!buf)
877                 return ERR_PTR(-ENOMEM);
878
879         mad_send_wr = buf + size;
880         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
881         mad_send_wr->send_buf.mad = buf;
882         mad_send_wr->send_buf.hdr_len = hdr_len;
883         mad_send_wr->send_buf.data_len = data_len;
884         mad_send_wr->pad = pad;
885
886         mad_send_wr->mad_agent_priv = mad_agent_priv;
887         mad_send_wr->sg_list[0].length = hdr_len;
888         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
889         mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
890         mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
891
892         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
893         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
894         mad_send_wr->send_wr.num_sge = 2;
895         mad_send_wr->send_wr.opcode = IB_WR_SEND;
896         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
897         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
898         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
899         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
900
901         if (rmpp_active) {
902                 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
903                 if (ret) {
904                         kfree(buf);
905                         return ERR_PTR(ret);
906                 }
907         }
908
909         mad_send_wr->send_buf.mad_agent = mad_agent;
910         atomic_inc(&mad_agent_priv->refcount);
911         return &mad_send_wr->send_buf;
912 }
913 EXPORT_SYMBOL(ib_create_send_mad);
914
915 int ib_get_mad_data_offset(u8 mgmt_class)
916 {
917         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
918                 return IB_MGMT_SA_HDR;
919         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
920                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
921                  (mgmt_class == IB_MGMT_CLASS_BIS))
922                 return IB_MGMT_DEVICE_HDR;
923         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
924                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
925                 return IB_MGMT_VENDOR_HDR;
926         else
927                 return IB_MGMT_MAD_HDR;
928 }
929 EXPORT_SYMBOL(ib_get_mad_data_offset);
930
931 int ib_is_mad_class_rmpp(u8 mgmt_class)
932 {
933         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
934             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
935             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
936             (mgmt_class == IB_MGMT_CLASS_BIS) ||
937             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
938              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
939                 return 1;
940         return 0;
941 }
942 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
943
944 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
945 {
946         struct ib_mad_send_wr_private *mad_send_wr;
947         struct list_head *list;
948
949         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
950                                    send_buf);
951         list = &mad_send_wr->cur_seg->list;
952
953         if (mad_send_wr->cur_seg->num < seg_num) {
954                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
955                         if (mad_send_wr->cur_seg->num == seg_num)
956                                 break;
957         } else if (mad_send_wr->cur_seg->num > seg_num) {
958                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
959                         if (mad_send_wr->cur_seg->num == seg_num)
960                                 break;
961         }
962         return mad_send_wr->cur_seg->data;
963 }
964 EXPORT_SYMBOL(ib_get_rmpp_segment);
965
966 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
967 {
968         if (mad_send_wr->send_buf.seg_count)
969                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
970                                            mad_send_wr->seg_num);
971         else
972                 return mad_send_wr->send_buf.mad +
973                        mad_send_wr->send_buf.hdr_len;
974 }
975
976 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
977 {
978         struct ib_mad_agent_private *mad_agent_priv;
979         struct ib_mad_send_wr_private *mad_send_wr;
980
981         mad_agent_priv = container_of(send_buf->mad_agent,
982                                       struct ib_mad_agent_private, agent);
983         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
984                                    send_buf);
985
986         free_send_rmpp_list(mad_send_wr);
987         kfree(send_buf->mad);
988         deref_mad_agent(mad_agent_priv);
989 }
990 EXPORT_SYMBOL(ib_free_send_mad);
991
992 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
993 {
994         struct ib_mad_qp_info *qp_info;
995         struct list_head *list;
996         struct ib_send_wr *bad_send_wr;
997         struct ib_mad_agent *mad_agent;
998         struct ib_sge *sge;
999         unsigned long flags;
1000         int ret;
1001
1002         /* Set WR ID to find mad_send_wr upon completion */
1003         qp_info = mad_send_wr->mad_agent_priv->qp_info;
1004         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1005         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1006
1007         mad_agent = mad_send_wr->send_buf.mad_agent;
1008         sge = mad_send_wr->sg_list;
1009         sge[0].addr = ib_dma_map_single(mad_agent->device,
1010                                         mad_send_wr->send_buf.mad,
1011                                         sge[0].length,
1012                                         DMA_TO_DEVICE);
1013         mad_send_wr->header_mapping = sge[0].addr;
1014
1015         sge[1].addr = ib_dma_map_single(mad_agent->device,
1016                                         ib_get_payload(mad_send_wr),
1017                                         sge[1].length,
1018                                         DMA_TO_DEVICE);
1019         mad_send_wr->payload_mapping = sge[1].addr;
1020
1021         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1022         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1023                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1024                                    &bad_send_wr);
1025                 list = &qp_info->send_queue.list;
1026         } else {
1027                 ret = 0;
1028                 list = &qp_info->overflow_list;
1029         }
1030
1031         if (!ret) {
1032                 qp_info->send_queue.count++;
1033                 list_add_tail(&mad_send_wr->mad_list.list, list);
1034         }
1035         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1036         if (ret) {
1037                 ib_dma_unmap_single(mad_agent->device,
1038                                     mad_send_wr->header_mapping,
1039                                     sge[0].length, DMA_TO_DEVICE);
1040                 ib_dma_unmap_single(mad_agent->device,
1041                                     mad_send_wr->payload_mapping,
1042                                     sge[1].length, DMA_TO_DEVICE);
1043         }
1044         return ret;
1045 }
1046
1047 /*
1048  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1049  *  with the registered client
1050  */
1051 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1052                      struct ib_mad_send_buf **bad_send_buf)
1053 {
1054         struct ib_mad_agent_private *mad_agent_priv;
1055         struct ib_mad_send_buf *next_send_buf;
1056         struct ib_mad_send_wr_private *mad_send_wr;
1057         unsigned long flags;
1058         int ret = -EINVAL;
1059
1060         /* Walk list of send WRs and post each on send list */
1061         for (; send_buf; send_buf = next_send_buf) {
1062
1063                 mad_send_wr = container_of(send_buf,
1064                                            struct ib_mad_send_wr_private,
1065                                            send_buf);
1066                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1067
1068                 if (!send_buf->mad_agent->send_handler ||
1069                     (send_buf->timeout_ms &&
1070                      !send_buf->mad_agent->recv_handler)) {
1071                         ret = -EINVAL;
1072                         goto error;
1073                 }
1074
1075                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1076                         if (mad_agent_priv->agent.rmpp_version) {
1077                                 ret = -EINVAL;
1078                                 goto error;
1079                         }
1080                 }
1081
1082                 /*
1083                  * Save pointer to next work request to post in case the
1084                  * current one completes, and the user modifies the work
1085                  * request associated with the completion
1086                  */
1087                 next_send_buf = send_buf->next;
1088                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1089
1090                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1091                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1092                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1093                                                      mad_send_wr);
1094                         if (ret < 0)            /* error */
1095                                 goto error;
1096                         else if (ret == 1)      /* locally consumed */
1097                                 continue;
1098                 }
1099
1100                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1101                 /* Timeout will be updated after send completes */
1102                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1103                 mad_send_wr->retries = send_buf->retries;
1104                 /* Reference for work request to QP + response */
1105                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1106                 mad_send_wr->status = IB_WC_SUCCESS;
1107
1108                 /* Reference MAD agent until send completes */
1109                 atomic_inc(&mad_agent_priv->refcount);
1110                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1111                 list_add_tail(&mad_send_wr->agent_list,
1112                               &mad_agent_priv->send_list);
1113                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1114
1115                 if (mad_agent_priv->agent.rmpp_version) {
1116                         ret = ib_send_rmpp_mad(mad_send_wr);
1117                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1118                                 ret = ib_send_mad(mad_send_wr);
1119                 } else
1120                         ret = ib_send_mad(mad_send_wr);
1121                 if (ret < 0) {
1122                         /* Fail send request */
1123                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1124                         list_del(&mad_send_wr->agent_list);
1125                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1126                         atomic_dec(&mad_agent_priv->refcount);
1127                         goto error;
1128                 }
1129         }
1130         return 0;
1131 error:
1132         if (bad_send_buf)
1133                 *bad_send_buf = send_buf;
1134         return ret;
1135 }
1136 EXPORT_SYMBOL(ib_post_send_mad);
1137
1138 /*
1139  * ib_free_recv_mad - Returns data buffers used to receive
1140  *  a MAD to the access layer
1141  */
1142 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1143 {
1144         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1145         struct ib_mad_private_header *mad_priv_hdr;
1146         struct ib_mad_private *priv;
1147         struct list_head free_list;
1148
1149         INIT_LIST_HEAD(&free_list);
1150         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1151
1152         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1153                                         &free_list, list) {
1154                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1155                                            recv_buf);
1156                 mad_priv_hdr = container_of(mad_recv_wc,
1157                                             struct ib_mad_private_header,
1158                                             recv_wc);
1159                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1160                                     header);
1161                 kmem_cache_free(ib_mad_cache, priv);
1162         }
1163 }
1164 EXPORT_SYMBOL(ib_free_recv_mad);
1165
1166 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1167                                         u8 rmpp_version,
1168                                         ib_mad_send_handler send_handler,
1169                                         ib_mad_recv_handler recv_handler,
1170                                         void *context)
1171 {
1172         return ERR_PTR(-EINVAL);        /* XXX: for now */
1173 }
1174 EXPORT_SYMBOL(ib_redirect_mad_qp);
1175
1176 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1177                       struct ib_wc *wc)
1178 {
1179         printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1180         return 0;
1181 }
1182 EXPORT_SYMBOL(ib_process_mad_wc);
1183
1184 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1185                          struct ib_mad_reg_req *mad_reg_req)
1186 {
1187         int i;
1188
1189         for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1190              i < IB_MGMT_MAX_METHODS;
1191              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1192                                1+i)) {
1193                 if ((*method)->agent[i]) {
1194                         printk(KERN_ERR PFX "Method %d already in use\n", i);
1195                         return -EINVAL;
1196                 }
1197         }
1198         return 0;
1199 }
1200
1201 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1202 {
1203         /* Allocate management method table */
1204         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1205         if (!*method) {
1206                 printk(KERN_ERR PFX "No memory for "
1207                        "ib_mad_mgmt_method_table\n");
1208                 return -ENOMEM;
1209         }
1210
1211         return 0;
1212 }
1213
1214 /*
1215  * Check to see if there are any methods still in use
1216  */
1217 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1218 {
1219         int i;
1220
1221         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1222                 if (method->agent[i])
1223                         return 1;
1224         return 0;
1225 }
1226
1227 /*
1228  * Check to see if there are any method tables for this class still in use
1229  */
1230 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1231 {
1232         int i;
1233
1234         for (i = 0; i < MAX_MGMT_CLASS; i++)
1235                 if (class->method_table[i])
1236                         return 1;
1237         return 0;
1238 }
1239
1240 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1241 {
1242         int i;
1243
1244         for (i = 0; i < MAX_MGMT_OUI; i++)
1245                 if (vendor_class->method_table[i])
1246                         return 1;
1247         return 0;
1248 }
1249
1250 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1251                            char *oui)
1252 {
1253         int i;
1254
1255         for (i = 0; i < MAX_MGMT_OUI; i++)
1256                 /* Is there matching OUI for this vendor class ? */
1257                 if (!memcmp(vendor_class->oui[i], oui, 3))
1258                         return i;
1259
1260         return -1;
1261 }
1262
1263 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1264 {
1265         int i;
1266
1267         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1268                 if (vendor->vendor_class[i])
1269                         return 1;
1270
1271         return 0;
1272 }
1273
1274 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1275                                      struct ib_mad_agent_private *agent)
1276 {
1277         int i;
1278
1279         /* Remove any methods for this mad agent */
1280         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1281                 if (method->agent[i] == agent) {
1282                         method->agent[i] = NULL;
1283                 }
1284         }
1285 }
1286
1287 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1288                               struct ib_mad_agent_private *agent_priv,
1289                               u8 mgmt_class)
1290 {
1291         struct ib_mad_port_private *port_priv;
1292         struct ib_mad_mgmt_class_table **class;
1293         struct ib_mad_mgmt_method_table **method;
1294         int i, ret;
1295
1296         port_priv = agent_priv->qp_info->port_priv;
1297         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1298         if (!*class) {
1299                 /* Allocate management class table for "new" class version */
1300                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1301                 if (!*class) {
1302                         printk(KERN_ERR PFX "No memory for "
1303                                "ib_mad_mgmt_class_table\n");
1304                         ret = -ENOMEM;
1305                         goto error1;
1306                 }
1307
1308                 /* Allocate method table for this management class */
1309                 method = &(*class)->method_table[mgmt_class];
1310                 if ((ret = allocate_method_table(method)))
1311                         goto error2;
1312         } else {
1313                 method = &(*class)->method_table[mgmt_class];
1314                 if (!*method) {
1315                         /* Allocate method table for this management class */
1316                         if ((ret = allocate_method_table(method)))
1317                                 goto error1;
1318                 }
1319         }
1320
1321         /* Now, make sure methods are not already in use */
1322         if (method_in_use(method, mad_reg_req))
1323                 goto error3;
1324
1325         /* Finally, add in methods being registered */
1326         for (i = find_first_bit(mad_reg_req->method_mask,
1327                                 IB_MGMT_MAX_METHODS);
1328              i < IB_MGMT_MAX_METHODS;
1329              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1330                                1+i)) {
1331                 (*method)->agent[i] = agent_priv;
1332         }
1333         return 0;
1334
1335 error3:
1336         /* Remove any methods for this mad agent */
1337         remove_methods_mad_agent(*method, agent_priv);
1338         /* Now, check to see if there are any methods in use */
1339         if (!check_method_table(*method)) {
1340                 /* If not, release management method table */
1341                 kfree(*method);
1342                 *method = NULL;
1343         }
1344         ret = -EINVAL;
1345         goto error1;
1346 error2:
1347         kfree(*class);
1348         *class = NULL;
1349 error1:
1350         return ret;
1351 }
1352
1353 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1354                            struct ib_mad_agent_private *agent_priv)
1355 {
1356         struct ib_mad_port_private *port_priv;
1357         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1358         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1359         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1360         struct ib_mad_mgmt_method_table **method;
1361         int i, ret = -ENOMEM;
1362         u8 vclass;
1363
1364         /* "New" vendor (with OUI) class */
1365         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1366         port_priv = agent_priv->qp_info->port_priv;
1367         vendor_table = &port_priv->version[
1368                                 mad_reg_req->mgmt_class_version].vendor;
1369         if (!*vendor_table) {
1370                 /* Allocate mgmt vendor class table for "new" class version */
1371                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1372                 if (!vendor) {
1373                         printk(KERN_ERR PFX "No memory for "
1374                                "ib_mad_mgmt_vendor_class_table\n");
1375                         goto error1;
1376                 }
1377
1378                 *vendor_table = vendor;
1379         }
1380         if (!(*vendor_table)->vendor_class[vclass]) {
1381                 /* Allocate table for this management vendor class */
1382                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1383                 if (!vendor_class) {
1384                         printk(KERN_ERR PFX "No memory for "
1385                                "ib_mad_mgmt_vendor_class\n");
1386                         goto error2;
1387                 }
1388
1389                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1390         }
1391         for (i = 0; i < MAX_MGMT_OUI; i++) {
1392                 /* Is there matching OUI for this vendor class ? */
1393                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1394                             mad_reg_req->oui, 3)) {
1395                         method = &(*vendor_table)->vendor_class[
1396                                                 vclass]->method_table[i];
1397                         BUG_ON(!*method);
1398                         goto check_in_use;
1399                 }
1400         }
1401         for (i = 0; i < MAX_MGMT_OUI; i++) {
1402                 /* OUI slot available ? */
1403                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1404                                 vclass]->oui[i])) {
1405                         method = &(*vendor_table)->vendor_class[
1406                                 vclass]->method_table[i];
1407                         BUG_ON(*method);
1408                         /* Allocate method table for this OUI */
1409                         if ((ret = allocate_method_table(method)))
1410                                 goto error3;
1411                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1412                                mad_reg_req->oui, 3);
1413                         goto check_in_use;
1414                 }
1415         }
1416         printk(KERN_ERR PFX "All OUI slots in use\n");
1417         goto error3;
1418
1419 check_in_use:
1420         /* Now, make sure methods are not already in use */
1421         if (method_in_use(method, mad_reg_req))
1422                 goto error4;
1423
1424         /* Finally, add in methods being registered */
1425         for (i = find_first_bit(mad_reg_req->method_mask,
1426                                 IB_MGMT_MAX_METHODS);
1427              i < IB_MGMT_MAX_METHODS;
1428              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1429                                1+i)) {
1430                 (*method)->agent[i] = agent_priv;
1431         }
1432         return 0;
1433
1434 error4:
1435         /* Remove any methods for this mad agent */
1436         remove_methods_mad_agent(*method, agent_priv);
1437         /* Now, check to see if there are any methods in use */
1438         if (!check_method_table(*method)) {
1439                 /* If not, release management method table */
1440                 kfree(*method);
1441                 *method = NULL;
1442         }
1443         ret = -EINVAL;
1444 error3:
1445         if (vendor_class) {
1446                 (*vendor_table)->vendor_class[vclass] = NULL;
1447                 kfree(vendor_class);
1448         }
1449 error2:
1450         if (vendor) {
1451                 *vendor_table = NULL;
1452                 kfree(vendor);
1453         }
1454 error1:
1455         return ret;
1456 }
1457
1458 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1459 {
1460         struct ib_mad_port_private *port_priv;
1461         struct ib_mad_mgmt_class_table *class;
1462         struct ib_mad_mgmt_method_table *method;
1463         struct ib_mad_mgmt_vendor_class_table *vendor;
1464         struct ib_mad_mgmt_vendor_class *vendor_class;
1465         int index;
1466         u8 mgmt_class;
1467
1468         /*
1469          * Was MAD registration request supplied
1470          * with original registration ?
1471          */
1472         if (!agent_priv->reg_req) {
1473                 goto out;
1474         }
1475
1476         port_priv = agent_priv->qp_info->port_priv;
1477         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1478         class = port_priv->version[
1479                         agent_priv->reg_req->mgmt_class_version].class;
1480         if (!class)
1481                 goto vendor_check;
1482
1483         method = class->method_table[mgmt_class];
1484         if (method) {
1485                 /* Remove any methods for this mad agent */
1486                 remove_methods_mad_agent(method, agent_priv);
1487                 /* Now, check to see if there are any methods still in use */
1488                 if (!check_method_table(method)) {
1489                         /* If not, release management method table */
1490                          kfree(method);
1491                          class->method_table[mgmt_class] = NULL;
1492                          /* Any management classes left ? */
1493                         if (!check_class_table(class)) {
1494                                 /* If not, release management class table */
1495                                 kfree(class);
1496                                 port_priv->version[
1497                                         agent_priv->reg_req->
1498                                         mgmt_class_version].class = NULL;
1499                         }
1500                 }
1501         }
1502
1503 vendor_check:
1504         if (!is_vendor_class(mgmt_class))
1505                 goto out;
1506
1507         /* normalize mgmt_class to vendor range 2 */
1508         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1509         vendor = port_priv->version[
1510                         agent_priv->reg_req->mgmt_class_version].vendor;
1511
1512         if (!vendor)
1513                 goto out;
1514
1515         vendor_class = vendor->vendor_class[mgmt_class];
1516         if (vendor_class) {
1517                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1518                 if (index < 0)
1519                         goto out;
1520                 method = vendor_class->method_table[index];
1521                 if (method) {
1522                         /* Remove any methods for this mad agent */
1523                         remove_methods_mad_agent(method, agent_priv);
1524                         /*
1525                          * Now, check to see if there are
1526                          * any methods still in use
1527                          */
1528                         if (!check_method_table(method)) {
1529                                 /* If not, release management method table */
1530                                 kfree(method);
1531                                 vendor_class->method_table[index] = NULL;
1532                                 memset(vendor_class->oui[index], 0, 3);
1533                                 /* Any OUIs left ? */
1534                                 if (!check_vendor_class(vendor_class)) {
1535                                         /* If not, release vendor class table */
1536                                         kfree(vendor_class);
1537                                         vendor->vendor_class[mgmt_class] = NULL;
1538                                         /* Any other vendor classes left ? */
1539                                         if (!check_vendor_table(vendor)) {
1540                                                 kfree(vendor);
1541                                                 port_priv->version[
1542                                                         agent_priv->reg_req->
1543                                                         mgmt_class_version].
1544                                                         vendor = NULL;
1545                                         }
1546                                 }
1547                         }
1548                 }
1549         }
1550
1551 out:
1552         return;
1553 }
1554
1555 static struct ib_mad_agent_private *
1556 find_mad_agent(struct ib_mad_port_private *port_priv,
1557                struct ib_mad *mad)
1558 {
1559         struct ib_mad_agent_private *mad_agent = NULL;
1560         unsigned long flags;
1561
1562         spin_lock_irqsave(&port_priv->reg_lock, flags);
1563         if (ib_response_mad(mad)) {
1564                 u32 hi_tid;
1565                 struct ib_mad_agent_private *entry;
1566
1567                 /*
1568                  * Routing is based on high 32 bits of transaction ID
1569                  * of MAD.
1570                  */
1571                 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1572                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1573                         if (entry->agent.hi_tid == hi_tid) {
1574                                 mad_agent = entry;
1575                                 break;
1576                         }
1577                 }
1578         } else {
1579                 struct ib_mad_mgmt_class_table *class;
1580                 struct ib_mad_mgmt_method_table *method;
1581                 struct ib_mad_mgmt_vendor_class_table *vendor;
1582                 struct ib_mad_mgmt_vendor_class *vendor_class;
1583                 struct ib_vendor_mad *vendor_mad;
1584                 int index;
1585
1586                 /*
1587                  * Routing is based on version, class, and method
1588                  * For "newer" vendor MADs, also based on OUI
1589                  */
1590                 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1591                         goto out;
1592                 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1593                         class = port_priv->version[
1594                                         mad->mad_hdr.class_version].class;
1595                         if (!class)
1596                                 goto out;
1597                         method = class->method_table[convert_mgmt_class(
1598                                                         mad->mad_hdr.mgmt_class)];
1599                         if (method)
1600                                 mad_agent = method->agent[mad->mad_hdr.method &
1601                                                           ~IB_MGMT_METHOD_RESP];
1602                 } else {
1603                         vendor = port_priv->version[
1604                                         mad->mad_hdr.class_version].vendor;
1605                         if (!vendor)
1606                                 goto out;
1607                         vendor_class = vendor->vendor_class[vendor_class_index(
1608                                                 mad->mad_hdr.mgmt_class)];
1609                         if (!vendor_class)
1610                                 goto out;
1611                         /* Find matching OUI */
1612                         vendor_mad = (struct ib_vendor_mad *)mad;
1613                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1614                         if (index == -1)
1615                                 goto out;
1616                         method = vendor_class->method_table[index];
1617                         if (method) {
1618                                 mad_agent = method->agent[mad->mad_hdr.method &
1619                                                           ~IB_MGMT_METHOD_RESP];
1620                         }
1621                 }
1622         }
1623
1624         if (mad_agent) {
1625                 if (mad_agent->agent.recv_handler)
1626                         atomic_inc(&mad_agent->refcount);
1627                 else {
1628                         printk(KERN_NOTICE PFX "No receive handler for client "
1629                                "%p on port %d\n",
1630                                &mad_agent->agent, port_priv->port_num);
1631                         mad_agent = NULL;
1632                 }
1633         }
1634 out:
1635         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1636
1637         return mad_agent;
1638 }
1639
1640 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1641 {
1642         int valid = 0;
1643
1644         /* Make sure MAD base version is understood */
1645         if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1646                 printk(KERN_ERR PFX "MAD received with unsupported base "
1647                        "version %d\n", mad->mad_hdr.base_version);
1648                 goto out;
1649         }
1650
1651         /* Filter SMI packets sent to other than QP0 */
1652         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1653             (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1654                 if (qp_num == 0)
1655                         valid = 1;
1656         } else {
1657                 /* Filter GSI packets sent to QP0 */
1658                 if (qp_num != 0)
1659                         valid = 1;
1660         }
1661
1662 out:
1663         return valid;
1664 }
1665
1666 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1667                        struct ib_mad_hdr *mad_hdr)
1668 {
1669         struct ib_rmpp_mad *rmpp_mad;
1670
1671         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1672         return !mad_agent_priv->agent.rmpp_version ||
1673                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1674                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1675                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1676 }
1677
1678 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1679                                      struct ib_mad_recv_wc *rwc)
1680 {
1681         return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1682                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1683 }
1684
1685 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1686                                    struct ib_mad_send_wr_private *wr,
1687                                    struct ib_mad_recv_wc *rwc )
1688 {
1689         struct ib_ah_attr attr;
1690         u8 send_resp, rcv_resp;
1691         union ib_gid sgid;
1692         struct ib_device *device = mad_agent_priv->agent.device;
1693         u8 port_num = mad_agent_priv->agent.port_num;
1694         u8 lmc;
1695
1696         send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1697                      mad_hdr.method & IB_MGMT_METHOD_RESP;
1698         rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1699
1700         if (send_resp == rcv_resp)
1701                 /* both requests, or both responses. GIDs different */
1702                 return 0;
1703
1704         if (ib_query_ah(wr->send_buf.ah, &attr))
1705                 /* Assume not equal, to avoid false positives. */
1706                 return 0;
1707
1708         if (!!(attr.ah_flags & IB_AH_GRH) !=
1709             !!(rwc->wc->wc_flags & IB_WC_GRH))
1710                 /* one has GID, other does not.  Assume different */
1711                 return 0;
1712
1713         if (!send_resp && rcv_resp) {
1714                 /* is request/response. */
1715                 if (!(attr.ah_flags & IB_AH_GRH)) {
1716                         if (ib_get_cached_lmc(device, port_num, &lmc))
1717                                 return 0;
1718                         return (!lmc || !((attr.src_path_bits ^
1719                                            rwc->wc->dlid_path_bits) &
1720                                           ((1 << lmc) - 1)));
1721                 } else {
1722                         if (ib_get_cached_gid(device, port_num,
1723                                               attr.grh.sgid_index, &sgid))
1724                                 return 0;
1725                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1726                                        16);
1727                 }
1728         }
1729
1730         if (!(attr.ah_flags & IB_AH_GRH))
1731                 return attr.dlid == rwc->wc->slid;
1732         else
1733                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1734                                16);
1735 }
1736
1737 static inline int is_direct(u8 class)
1738 {
1739         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1740 }
1741
1742 struct ib_mad_send_wr_private*
1743 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1744                  struct ib_mad_recv_wc *wc)
1745 {
1746         struct ib_mad_send_wr_private *wr;
1747         struct ib_mad *mad;
1748
1749         mad = (struct ib_mad *)wc->recv_buf.mad;
1750
1751         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1752                 if ((wr->tid == mad->mad_hdr.tid) &&
1753                     rcv_has_same_class(wr, wc) &&
1754                     /*
1755                      * Don't check GID for direct routed MADs.
1756                      * These might have permissive LIDs.
1757                      */
1758                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1759                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1760                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1761         }
1762
1763         /*
1764          * It's possible to receive the response before we've
1765          * been notified that the send has completed
1766          */
1767         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1768                 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1769                     wr->tid == mad->mad_hdr.tid &&
1770                     wr->timeout &&
1771                     rcv_has_same_class(wr, wc) &&
1772                     /*
1773                      * Don't check GID for direct routed MADs.
1774                      * These might have permissive LIDs.
1775                      */
1776                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1777                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1778                         /* Verify request has not been canceled */
1779                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1780         }
1781         return NULL;
1782 }
1783
1784 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1785 {
1786         mad_send_wr->timeout = 0;
1787         if (mad_send_wr->refcount == 1)
1788                 list_move_tail(&mad_send_wr->agent_list,
1789                               &mad_send_wr->mad_agent_priv->done_list);
1790 }
1791
1792 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1793                                  struct ib_mad_recv_wc *mad_recv_wc)
1794 {
1795         struct ib_mad_send_wr_private *mad_send_wr;
1796         struct ib_mad_send_wc mad_send_wc;
1797         unsigned long flags;
1798
1799         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1800         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1801         if (mad_agent_priv->agent.rmpp_version) {
1802                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1803                                                       mad_recv_wc);
1804                 if (!mad_recv_wc) {
1805                         deref_mad_agent(mad_agent_priv);
1806                         return;
1807                 }
1808         }
1809
1810         /* Complete corresponding request */
1811         if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1812                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1813                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1814                 if (!mad_send_wr) {
1815                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1816                         ib_free_recv_mad(mad_recv_wc);
1817                         deref_mad_agent(mad_agent_priv);
1818                         return;
1819                 }
1820                 ib_mark_mad_done(mad_send_wr);
1821                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1822
1823                 /* Defined behavior is to complete response before request */
1824                 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1825                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1826                                                    mad_recv_wc);
1827                 atomic_dec(&mad_agent_priv->refcount);
1828
1829                 mad_send_wc.status = IB_WC_SUCCESS;
1830                 mad_send_wc.vendor_err = 0;
1831                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1832                 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1833         } else {
1834                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1835                                                    mad_recv_wc);
1836                 deref_mad_agent(mad_agent_priv);
1837         }
1838 }
1839
1840 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1841                                      struct ib_wc *wc)
1842 {
1843         struct ib_mad_qp_info *qp_info;
1844         struct ib_mad_private_header *mad_priv_hdr;
1845         struct ib_mad_private *recv, *response = NULL;
1846         struct ib_mad_list_head *mad_list;
1847         struct ib_mad_agent_private *mad_agent;
1848         int port_num;
1849
1850         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1851         qp_info = mad_list->mad_queue->qp_info;
1852         dequeue_mad(mad_list);
1853
1854         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1855                                     mad_list);
1856         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1857         ib_dma_unmap_single(port_priv->device,
1858                             recv->header.mapping,
1859                             sizeof(struct ib_mad_private) -
1860                               sizeof(struct ib_mad_private_header),
1861                             DMA_FROM_DEVICE);
1862
1863         /* Setup MAD receive work completion from "normal" work completion */
1864         recv->header.wc = *wc;
1865         recv->header.recv_wc.wc = &recv->header.wc;
1866         recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1867         recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1868         recv->header.recv_wc.recv_buf.grh = &recv->grh;
1869
1870         if (atomic_read(&qp_info->snoop_count))
1871                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1872
1873         /* Validate MAD */
1874         if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1875                 goto out;
1876
1877         response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1878         if (!response) {
1879                 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1880                        "for response buffer\n");
1881                 goto out;
1882         }
1883
1884         if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1885                 port_num = wc->port_num;
1886         else
1887                 port_num = port_priv->port_num;
1888
1889         if (recv->mad.mad.mad_hdr.mgmt_class ==
1890             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1891                 enum smi_forward_action retsmi;
1892
1893                 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1894                                            port_priv->device->node_type,
1895                                            port_num,
1896                                            port_priv->device->phys_port_cnt) ==
1897                                            IB_SMI_DISCARD)
1898                         goto out;
1899
1900                 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1901                 if (retsmi == IB_SMI_LOCAL)
1902                         goto local;
1903
1904                 if (retsmi == IB_SMI_SEND) { /* don't forward */
1905                         if (smi_handle_dr_smp_send(&recv->mad.smp,
1906                                                    port_priv->device->node_type,
1907                                                    port_num) == IB_SMI_DISCARD)
1908                                 goto out;
1909
1910                         if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1911                                 goto out;
1912                 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1913                         /* forward case for switches */
1914                         memcpy(response, recv, sizeof(*response));
1915                         response->header.recv_wc.wc = &response->header.wc;
1916                         response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1917                         response->header.recv_wc.recv_buf.grh = &response->grh;
1918
1919                         agent_send_response(&response->mad.mad,
1920                                             &response->grh, wc,
1921                                             port_priv->device,
1922                                             smi_get_fwd_port(&recv->mad.smp),
1923                                             qp_info->qp->qp_num);
1924
1925                         goto out;
1926                 }
1927         }
1928
1929 local:
1930         /* Give driver "right of first refusal" on incoming MAD */
1931         if (port_priv->device->process_mad) {
1932                 int ret;
1933
1934                 if (!response) {
1935                         printk(KERN_ERR PFX "No memory for response MAD\n");
1936                         /*
1937                          * Is it better to assume that
1938                          * it wouldn't be processed ?
1939                          */
1940                         goto out;
1941                 }
1942
1943                 ret = port_priv->device->process_mad(port_priv->device, 0,
1944                                                      port_priv->port_num,
1945                                                      wc, &recv->grh,
1946                                                      &recv->mad.mad,
1947                                                      &response->mad.mad);
1948                 if (ret & IB_MAD_RESULT_SUCCESS) {
1949                         if (ret & IB_MAD_RESULT_CONSUMED)
1950                                 goto out;
1951                         if (ret & IB_MAD_RESULT_REPLY) {
1952                                 agent_send_response(&response->mad.mad,
1953                                                     &recv->grh, wc,
1954                                                     port_priv->device,
1955                                                     port_num,
1956                                                     qp_info->qp->qp_num);
1957                                 goto out;
1958                         }
1959                 }
1960         }
1961
1962         mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1963         if (mad_agent) {
1964                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1965                 /*
1966                  * recv is freed up in error cases in ib_mad_complete_recv
1967                  * or via recv_handler in ib_mad_complete_recv()
1968                  */
1969                 recv = NULL;
1970         }
1971
1972 out:
1973         /* Post another receive request for this QP */
1974         if (response) {
1975                 ib_mad_post_receive_mads(qp_info, response);
1976                 if (recv)
1977                         kmem_cache_free(ib_mad_cache, recv);
1978         } else
1979                 ib_mad_post_receive_mads(qp_info, recv);
1980 }
1981
1982 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1983 {
1984         struct ib_mad_send_wr_private *mad_send_wr;
1985         unsigned long delay;
1986
1987         if (list_empty(&mad_agent_priv->wait_list)) {
1988                 cancel_delayed_work(&mad_agent_priv->timed_work);
1989         } else {
1990                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1991                                          struct ib_mad_send_wr_private,
1992                                          agent_list);
1993
1994                 if (time_after(mad_agent_priv->timeout,
1995                                mad_send_wr->timeout)) {
1996                         mad_agent_priv->timeout = mad_send_wr->timeout;
1997                         cancel_delayed_work(&mad_agent_priv->timed_work);
1998                         delay = mad_send_wr->timeout - jiffies;
1999                         if ((long)delay <= 0)
2000                                 delay = 1;
2001                         queue_delayed_work(mad_agent_priv->qp_info->
2002                                            port_priv->wq,
2003                                            &mad_agent_priv->timed_work, delay);
2004                 }
2005         }
2006 }
2007
2008 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2009 {
2010         struct ib_mad_agent_private *mad_agent_priv;
2011         struct ib_mad_send_wr_private *temp_mad_send_wr;
2012         struct list_head *list_item;
2013         unsigned long delay;
2014
2015         mad_agent_priv = mad_send_wr->mad_agent_priv;
2016         list_del(&mad_send_wr->agent_list);
2017
2018         delay = mad_send_wr->timeout;
2019         mad_send_wr->timeout += jiffies;
2020
2021         if (delay) {
2022                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2023                         temp_mad_send_wr = list_entry(list_item,
2024                                                 struct ib_mad_send_wr_private,
2025                                                 agent_list);
2026                         if (time_after(mad_send_wr->timeout,
2027                                        temp_mad_send_wr->timeout))
2028                                 break;
2029                 }
2030         }
2031         else
2032                 list_item = &mad_agent_priv->wait_list;
2033         list_add(&mad_send_wr->agent_list, list_item);
2034
2035         /* Reschedule a work item if we have a shorter timeout */
2036         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2037                 cancel_delayed_work(&mad_agent_priv->timed_work);
2038                 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2039                                    &mad_agent_priv->timed_work, delay);
2040         }
2041 }
2042
2043 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2044                           int timeout_ms)
2045 {
2046         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2047         wait_for_response(mad_send_wr);
2048 }
2049
2050 /*
2051  * Process a send work completion
2052  */
2053 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2054                              struct ib_mad_send_wc *mad_send_wc)
2055 {
2056         struct ib_mad_agent_private     *mad_agent_priv;
2057         unsigned long                   flags;
2058         int                             ret;
2059
2060         mad_agent_priv = mad_send_wr->mad_agent_priv;
2061         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2062         if (mad_agent_priv->agent.rmpp_version) {
2063                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2064                 if (ret == IB_RMPP_RESULT_CONSUMED)
2065                         goto done;
2066         } else
2067                 ret = IB_RMPP_RESULT_UNHANDLED;
2068
2069         if (mad_send_wc->status != IB_WC_SUCCESS &&
2070             mad_send_wr->status == IB_WC_SUCCESS) {
2071                 mad_send_wr->status = mad_send_wc->status;
2072                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2073         }
2074
2075         if (--mad_send_wr->refcount > 0) {
2076                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2077                     mad_send_wr->status == IB_WC_SUCCESS) {
2078                         wait_for_response(mad_send_wr);
2079                 }
2080                 goto done;
2081         }
2082
2083         /* Remove send from MAD agent and notify client of completion */
2084         list_del(&mad_send_wr->agent_list);
2085         adjust_timeout(mad_agent_priv);
2086         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2087
2088         if (mad_send_wr->status != IB_WC_SUCCESS )
2089                 mad_send_wc->status = mad_send_wr->status;
2090         if (ret == IB_RMPP_RESULT_INTERNAL)
2091                 ib_rmpp_send_handler(mad_send_wc);
2092         else
2093                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2094                                                    mad_send_wc);
2095
2096         /* Release reference on agent taken when sending */
2097         deref_mad_agent(mad_agent_priv);
2098         return;
2099 done:
2100         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2101 }
2102
2103 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2104                                      struct ib_wc *wc)
2105 {
2106         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2107         struct ib_mad_list_head         *mad_list;
2108         struct ib_mad_qp_info           *qp_info;
2109         struct ib_mad_queue             *send_queue;
2110         struct ib_send_wr               *bad_send_wr;
2111         struct ib_mad_send_wc           mad_send_wc;
2112         unsigned long flags;
2113         int ret;
2114
2115         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2116         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2117                                    mad_list);
2118         send_queue = mad_list->mad_queue;
2119         qp_info = send_queue->qp_info;
2120
2121 retry:
2122         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2123                             mad_send_wr->header_mapping,
2124                             mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2125         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2126                             mad_send_wr->payload_mapping,
2127                             mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2128         queued_send_wr = NULL;
2129         spin_lock_irqsave(&send_queue->lock, flags);
2130         list_del(&mad_list->list);
2131
2132         /* Move queued send to the send queue */
2133         if (send_queue->count-- > send_queue->max_active) {
2134                 mad_list = container_of(qp_info->overflow_list.next,
2135                                         struct ib_mad_list_head, list);
2136                 queued_send_wr = container_of(mad_list,
2137                                         struct ib_mad_send_wr_private,
2138                                         mad_list);
2139                 list_move_tail(&mad_list->list, &send_queue->list);
2140         }
2141         spin_unlock_irqrestore(&send_queue->lock, flags);
2142
2143         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2144         mad_send_wc.status = wc->status;
2145         mad_send_wc.vendor_err = wc->vendor_err;
2146         if (atomic_read(&qp_info->snoop_count))
2147                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2148                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2149         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2150
2151         if (queued_send_wr) {
2152                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2153                                    &bad_send_wr);
2154                 if (ret) {
2155                         printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2156                         mad_send_wr = queued_send_wr;
2157                         wc->status = IB_WC_LOC_QP_OP_ERR;
2158                         goto retry;
2159                 }
2160         }
2161 }
2162
2163 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2164 {
2165         struct ib_mad_send_wr_private *mad_send_wr;
2166         struct ib_mad_list_head *mad_list;
2167         unsigned long flags;
2168
2169         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2170         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2171                 mad_send_wr = container_of(mad_list,
2172                                            struct ib_mad_send_wr_private,
2173                                            mad_list);
2174                 mad_send_wr->retry = 1;
2175         }
2176         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2177 }
2178
2179 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2180                               struct ib_wc *wc)
2181 {
2182         struct ib_mad_list_head *mad_list;
2183         struct ib_mad_qp_info *qp_info;
2184         struct ib_mad_send_wr_private *mad_send_wr;
2185         int ret;
2186
2187         /* Determine if failure was a send or receive */
2188         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2189         qp_info = mad_list->mad_queue->qp_info;
2190         if (mad_list->mad_queue == &qp_info->recv_queue)
2191                 /*
2192                  * Receive errors indicate that the QP has entered the error
2193                  * state - error handling/shutdown code will cleanup
2194                  */
2195                 return;
2196
2197         /*
2198          * Send errors will transition the QP to SQE - move
2199          * QP to RTS and repost flushed work requests
2200          */
2201         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2202                                    mad_list);
2203         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2204                 if (mad_send_wr->retry) {
2205                         /* Repost send */
2206                         struct ib_send_wr *bad_send_wr;
2207
2208                         mad_send_wr->retry = 0;
2209                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2210                                         &bad_send_wr);
2211                         if (ret)
2212                                 ib_mad_send_done_handler(port_priv, wc);
2213                 } else
2214                         ib_mad_send_done_handler(port_priv, wc);
2215         } else {
2216                 struct ib_qp_attr *attr;
2217
2218                 /* Transition QP to RTS and fail offending send */
2219                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2220                 if (attr) {
2221                         attr->qp_state = IB_QPS_RTS;
2222                         attr->cur_qp_state = IB_QPS_SQE;
2223                         ret = ib_modify_qp(qp_info->qp, attr,
2224                                            IB_QP_STATE | IB_QP_CUR_STATE);
2225                         kfree(attr);
2226                         if (ret)
2227                                 printk(KERN_ERR PFX "mad_error_handler - "
2228                                        "ib_modify_qp to RTS : %d\n", ret);
2229                         else
2230                                 mark_sends_for_retry(qp_info);
2231                 }
2232                 ib_mad_send_done_handler(port_priv, wc);
2233         }
2234 }
2235
2236 /*
2237  * IB MAD completion callback
2238  */
2239 static void ib_mad_completion_handler(struct work_struct *work)
2240 {
2241         struct ib_mad_port_private *port_priv;
2242         struct ib_wc wc;
2243
2244         port_priv = container_of(work, struct ib_mad_port_private, work);
2245         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2246
2247         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2248                 if (wc.status == IB_WC_SUCCESS) {
2249                         switch (wc.opcode) {
2250                         case IB_WC_SEND:
2251                                 ib_mad_send_done_handler(port_priv, &wc);
2252                                 break;
2253                         case IB_WC_RECV:
2254                                 ib_mad_recv_done_handler(port_priv, &wc);
2255                                 break;
2256                         default:
2257                                 BUG_ON(1);
2258                                 break;
2259                         }
2260                 } else
2261                         mad_error_handler(port_priv, &wc);
2262         }
2263 }
2264
2265 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2266 {
2267         unsigned long flags;
2268         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2269         struct ib_mad_send_wc mad_send_wc;
2270         struct list_head cancel_list;
2271
2272         INIT_LIST_HEAD(&cancel_list);
2273
2274         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2275         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2276                                  &mad_agent_priv->send_list, agent_list) {
2277                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2278                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2279                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2280                 }
2281         }
2282
2283         /* Empty wait list to prevent receives from finding a request */
2284         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2285         /* Empty local completion list as well */
2286         list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2287         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2288
2289         /* Report all cancelled requests */
2290         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2291         mad_send_wc.vendor_err = 0;
2292
2293         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2294                                  &cancel_list, agent_list) {
2295                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2296                 list_del(&mad_send_wr->agent_list);
2297                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2298                                                    &mad_send_wc);
2299                 atomic_dec(&mad_agent_priv->refcount);
2300         }
2301 }
2302
2303 static struct ib_mad_send_wr_private*
2304 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2305              struct ib_mad_send_buf *send_buf)
2306 {
2307         struct ib_mad_send_wr_private *mad_send_wr;
2308
2309         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2310                             agent_list) {
2311                 if (&mad_send_wr->send_buf == send_buf)
2312                         return mad_send_wr;
2313         }
2314
2315         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2316                             agent_list) {
2317                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2318                     &mad_send_wr->send_buf == send_buf)
2319                         return mad_send_wr;
2320         }
2321         return NULL;
2322 }
2323
2324 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2325                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2326 {
2327         struct ib_mad_agent_private *mad_agent_priv;
2328         struct ib_mad_send_wr_private *mad_send_wr;
2329         unsigned long flags;
2330         int active;
2331
2332         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2333                                       agent);
2334         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2335         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2336         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2337                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2338                 return -EINVAL;
2339         }
2340
2341         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2342         if (!timeout_ms) {
2343                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2344                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2345         }
2346
2347         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2348         if (active)
2349                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2350         else
2351                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2352
2353         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2354         return 0;
2355 }
2356 EXPORT_SYMBOL(ib_modify_mad);
2357
2358 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2359                    struct ib_mad_send_buf *send_buf)
2360 {
2361         ib_modify_mad(mad_agent, send_buf, 0);
2362 }
2363 EXPORT_SYMBOL(ib_cancel_mad);
2364
2365 static void local_completions(struct work_struct *work)
2366 {
2367         struct ib_mad_agent_private *mad_agent_priv;
2368         struct ib_mad_local_private *local;
2369         struct ib_mad_agent_private *recv_mad_agent;
2370         unsigned long flags;
2371         int recv = 0;
2372         struct ib_wc wc;
2373         struct ib_mad_send_wc mad_send_wc;
2374
2375         mad_agent_priv =
2376                 container_of(work, struct ib_mad_agent_private, local_work);
2377
2378         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2379         while (!list_empty(&mad_agent_priv->local_list)) {
2380                 local = list_entry(mad_agent_priv->local_list.next,
2381                                    struct ib_mad_local_private,
2382                                    completion_list);
2383                 list_del(&local->completion_list);
2384                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2385                 if (local->mad_priv) {
2386                         recv_mad_agent = local->recv_mad_agent;
2387                         if (!recv_mad_agent) {
2388                                 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2389                                 goto local_send_completion;
2390                         }
2391
2392                         recv = 1;
2393                         /*
2394                          * Defined behavior is to complete response
2395                          * before request
2396                          */
2397                         build_smp_wc(recv_mad_agent->agent.qp,
2398                                      (unsigned long) local->mad_send_wr,
2399                                      be16_to_cpu(IB_LID_PERMISSIVE),
2400                                      0, recv_mad_agent->agent.port_num, &wc);
2401
2402                         local->mad_priv->header.recv_wc.wc = &wc;
2403                         local->mad_priv->header.recv_wc.mad_len =
2404                                                 sizeof(struct ib_mad);
2405                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2406                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2407                                  &local->mad_priv->header.recv_wc.rmpp_list);
2408                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2409                         local->mad_priv->header.recv_wc.recv_buf.mad =
2410                                                 &local->mad_priv->mad.mad;
2411                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2412                                 snoop_recv(recv_mad_agent->qp_info,
2413                                           &local->mad_priv->header.recv_wc,
2414                                            IB_MAD_SNOOP_RECVS);
2415                         recv_mad_agent->agent.recv_handler(
2416                                                 &recv_mad_agent->agent,
2417                                                 &local->mad_priv->header.recv_wc);
2418                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2419                         atomic_dec(&recv_mad_agent->refcount);
2420                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2421                 }
2422
2423 local_send_completion:
2424                 /* Complete send */
2425                 mad_send_wc.status = IB_WC_SUCCESS;
2426                 mad_send_wc.vendor_err = 0;
2427                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2428                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2429                         snoop_send(mad_agent_priv->qp_info,
2430                                    &local->mad_send_wr->send_buf,
2431                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2432                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2433                                                    &mad_send_wc);
2434
2435                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2436                 atomic_dec(&mad_agent_priv->refcount);
2437                 if (!recv)
2438                         kmem_cache_free(ib_mad_cache, local->mad_priv);
2439                 kfree(local);
2440         }
2441         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2442 }
2443
2444 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2445 {
2446         int ret;
2447
2448         if (!mad_send_wr->retries--)
2449                 return -ETIMEDOUT;
2450
2451         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2452
2453         if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2454                 ret = ib_retry_rmpp(mad_send_wr);
2455                 switch (ret) {
2456                 case IB_RMPP_RESULT_UNHANDLED:
2457                         ret = ib_send_mad(mad_send_wr);
2458                         break;
2459                 case IB_RMPP_RESULT_CONSUMED:
2460                         ret = 0;
2461                         break;
2462                 default:
2463                         ret = -ECOMM;
2464                         break;
2465                 }
2466         } else
2467                 ret = ib_send_mad(mad_send_wr);
2468
2469         if (!ret) {
2470                 mad_send_wr->refcount++;
2471                 list_add_tail(&mad_send_wr->agent_list,
2472                               &mad_send_wr->mad_agent_priv->send_list);
2473         }
2474         return ret;
2475 }
2476
2477 static void timeout_sends(struct work_struct *work)
2478 {
2479         struct ib_mad_agent_private *mad_agent_priv;
2480         struct ib_mad_send_wr_private *mad_send_wr;
2481         struct ib_mad_send_wc mad_send_wc;
2482         unsigned long flags, delay;
2483
2484         mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2485                                       timed_work.work);
2486         mad_send_wc.vendor_err = 0;
2487
2488         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2489         while (!list_empty(&mad_agent_priv->wait_list)) {
2490                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2491                                          struct ib_mad_send_wr_private,
2492                                          agent_list);
2493
2494                 if (time_after(mad_send_wr->timeout, jiffies)) {
2495                         delay = mad_send_wr->timeout - jiffies;
2496                         if ((long)delay <= 0)
2497                                 delay = 1;
2498                         queue_delayed_work(mad_agent_priv->qp_info->
2499                                            port_priv->wq,
2500                                            &mad_agent_priv->timed_work, delay);
2501                         break;
2502                 }
2503
2504                 list_del(&mad_send_wr->agent_list);
2505                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2506                     !retry_send(mad_send_wr))
2507                         continue;
2508
2509                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2510
2511                 if (mad_send_wr->status == IB_WC_SUCCESS)
2512                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2513                 else
2514                         mad_send_wc.status = mad_send_wr->status;
2515                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2516                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2517                                                    &mad_send_wc);
2518
2519                 atomic_dec(&mad_agent_priv->refcount);
2520                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2521         }
2522         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2523 }
2524
2525 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2526 {
2527         struct ib_mad_port_private *port_priv = cq->cq_context;
2528         unsigned long flags;
2529
2530         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2531         if (!list_empty(&port_priv->port_list))
2532                 queue_work(port_priv->wq, &port_priv->work);
2533         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2534 }
2535
2536 /*
2537  * Allocate receive MADs and post receive WRs for them
2538  */
2539 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2540                                     struct ib_mad_private *mad)
2541 {
2542         unsigned long flags;
2543         int post, ret;
2544         struct ib_mad_private *mad_priv;
2545         struct ib_sge sg_list;
2546         struct ib_recv_wr recv_wr, *bad_recv_wr;
2547         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2548
2549         /* Initialize common scatter list fields */
2550         sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2551         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2552
2553         /* Initialize common receive WR fields */
2554         recv_wr.next = NULL;
2555         recv_wr.sg_list = &sg_list;
2556         recv_wr.num_sge = 1;
2557
2558         do {
2559                 /* Allocate and map receive buffer */
2560                 if (mad) {
2561                         mad_priv = mad;
2562                         mad = NULL;
2563                 } else {
2564                         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2565                         if (!mad_priv) {
2566                                 printk(KERN_ERR PFX "No memory for receive buffer\n");
2567                                 ret = -ENOMEM;
2568                                 break;
2569                         }
2570                 }
2571                 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2572                                                  &mad_priv->grh,
2573                                                  sizeof *mad_priv -
2574                                                    sizeof mad_priv->header,
2575                                                  DMA_FROM_DEVICE);
2576                 mad_priv->header.mapping = sg_list.addr;
2577                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2578                 mad_priv->header.mad_list.mad_queue = recv_queue;
2579
2580                 /* Post receive WR */
2581                 spin_lock_irqsave(&recv_queue->lock, flags);
2582                 post = (++recv_queue->count < recv_queue->max_active);
2583                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2584                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2585                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2586                 if (ret) {
2587                         spin_lock_irqsave(&recv_queue->lock, flags);
2588                         list_del(&mad_priv->header.mad_list.list);
2589                         recv_queue->count--;
2590                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2591                         ib_dma_unmap_single(qp_info->port_priv->device,
2592                                             mad_priv->header.mapping,
2593                                             sizeof *mad_priv -
2594                                               sizeof mad_priv->header,
2595                                             DMA_FROM_DEVICE);
2596                         kmem_cache_free(ib_mad_cache, mad_priv);
2597                         printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2598                         break;
2599                 }
2600         } while (post);
2601
2602         return ret;
2603 }
2604
2605 /*
2606  * Return all the posted receive MADs
2607  */
2608 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2609 {
2610         struct ib_mad_private_header *mad_priv_hdr;
2611         struct ib_mad_private *recv;
2612         struct ib_mad_list_head *mad_list;
2613
2614         while (!list_empty(&qp_info->recv_queue.list)) {
2615
2616                 mad_list = list_entry(qp_info->recv_queue.list.next,
2617                                       struct ib_mad_list_head, list);
2618                 mad_priv_hdr = container_of(mad_list,
2619                                             struct ib_mad_private_header,
2620                                             mad_list);
2621                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2622                                     header);
2623
2624                 /* Remove from posted receive MAD list */
2625                 list_del(&mad_list->list);
2626
2627                 ib_dma_unmap_single(qp_info->port_priv->device,
2628                                     recv->header.mapping,
2629                                     sizeof(struct ib_mad_private) -
2630                                       sizeof(struct ib_mad_private_header),
2631                                     DMA_FROM_DEVICE);
2632                 kmem_cache_free(ib_mad_cache, recv);
2633         }
2634
2635         qp_info->recv_queue.count = 0;
2636 }
2637
2638 /*
2639  * Start the port
2640  */
2641 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2642 {
2643         int ret, i;
2644         struct ib_qp_attr *attr;
2645         struct ib_qp *qp;
2646
2647         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2648         if (!attr) {
2649                 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2650                 return -ENOMEM;
2651         }
2652
2653         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2654                 qp = port_priv->qp_info[i].qp;
2655                 /*
2656                  * PKey index for QP1 is irrelevant but
2657                  * one is needed for the Reset to Init transition
2658                  */
2659                 attr->qp_state = IB_QPS_INIT;
2660                 attr->pkey_index = 0;
2661                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2662                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2663                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2664                 if (ret) {
2665                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2666                                "INIT: %d\n", i, ret);
2667                         goto out;
2668                 }
2669
2670                 attr->qp_state = IB_QPS_RTR;
2671                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2672                 if (ret) {
2673                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2674                                "RTR: %d\n", i, ret);
2675                         goto out;
2676                 }
2677
2678                 attr->qp_state = IB_QPS_RTS;
2679                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2680                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2681                 if (ret) {
2682                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2683                                "RTS: %d\n", i, ret);
2684                         goto out;
2685                 }
2686         }
2687
2688         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2689         if (ret) {
2690                 printk(KERN_ERR PFX "Failed to request completion "
2691                        "notification: %d\n", ret);
2692                 goto out;
2693         }
2694
2695         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2696                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2697                 if (ret) {
2698                         printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2699                         goto out;
2700                 }
2701         }
2702 out:
2703         kfree(attr);
2704         return ret;
2705 }
2706
2707 static void qp_event_handler(struct ib_event *event, void *qp_context)
2708 {
2709         struct ib_mad_qp_info   *qp_info = qp_context;
2710
2711         /* It's worse than that! He's dead, Jim! */
2712         printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2713                 event->event, qp_info->qp->qp_num);
2714 }
2715
2716 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2717                            struct ib_mad_queue *mad_queue)
2718 {
2719         mad_queue->qp_info = qp_info;
2720         mad_queue->count = 0;
2721         spin_lock_init(&mad_queue->lock);
2722         INIT_LIST_HEAD(&mad_queue->list);
2723 }
2724
2725 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2726                         struct ib_mad_qp_info *qp_info)
2727 {
2728         qp_info->port_priv = port_priv;
2729         init_mad_queue(qp_info, &qp_info->send_queue);
2730         init_mad_queue(qp_info, &qp_info->recv_queue);
2731         INIT_LIST_HEAD(&qp_info->overflow_list);
2732         spin_lock_init(&qp_info->snoop_lock);
2733         qp_info->snoop_table = NULL;
2734         qp_info->snoop_table_size = 0;
2735         atomic_set(&qp_info->snoop_count, 0);
2736 }
2737
2738 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2739                          enum ib_qp_type qp_type)
2740 {
2741         struct ib_qp_init_attr  qp_init_attr;
2742         int ret;
2743
2744         memset(&qp_init_attr, 0, sizeof qp_init_attr);
2745         qp_init_attr.send_cq = qp_info->port_priv->cq;
2746         qp_init_attr.recv_cq = qp_info->port_priv->cq;
2747         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2748         qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2749         qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2750         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2751         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2752         qp_init_attr.qp_type = qp_type;
2753         qp_init_attr.port_num = qp_info->port_priv->port_num;
2754         qp_init_attr.qp_context = qp_info;
2755         qp_init_attr.event_handler = qp_event_handler;
2756         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2757         if (IS_ERR(qp_info->qp)) {
2758                 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2759                        get_spl_qp_index(qp_type));
2760                 ret = PTR_ERR(qp_info->qp);
2761                 goto error;
2762         }
2763         /* Use minimum queue sizes unless the CQ is resized */
2764         qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2765         qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2766         return 0;
2767
2768 error:
2769         return ret;
2770 }
2771
2772 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2773 {
2774         ib_destroy_qp(qp_info->qp);
2775         kfree(qp_info->snoop_table);
2776 }
2777
2778 /*
2779  * Open the port
2780  * Create the QP, PD, MR, and CQ if needed
2781  */
2782 static int ib_mad_port_open(struct ib_device *device,
2783                             int port_num)
2784 {
2785         int ret, cq_size;
2786         struct ib_mad_port_private *port_priv;
2787         unsigned long flags;
2788         char name[sizeof "ib_mad123"];
2789
2790         /* Create new device info */
2791         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2792         if (!port_priv) {
2793                 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2794                 return -ENOMEM;
2795         }
2796
2797         port_priv->device = device;
2798         port_priv->port_num = port_num;
2799         spin_lock_init(&port_priv->reg_lock);
2800         INIT_LIST_HEAD(&port_priv->agent_list);
2801         init_mad_qp(port_priv, &port_priv->qp_info[0]);
2802         init_mad_qp(port_priv, &port_priv->qp_info[1]);
2803
2804         cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2805         port_priv->cq = ib_create_cq(port_priv->device,
2806                                      ib_mad_thread_completion_handler,
2807                                      NULL, port_priv, cq_size, 0);
2808         if (IS_ERR(port_priv->cq)) {
2809                 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2810                 ret = PTR_ERR(port_priv->cq);
2811                 goto error3;
2812         }
2813
2814         port_priv->pd = ib_alloc_pd(device);
2815         if (IS_ERR(port_priv->pd)) {
2816                 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2817                 ret = PTR_ERR(port_priv->pd);
2818                 goto error4;
2819         }
2820
2821         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2822         if (IS_ERR(port_priv->mr)) {
2823                 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2824                 ret = PTR_ERR(port_priv->mr);
2825                 goto error5;
2826         }
2827
2828         ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2829         if (ret)
2830                 goto error6;
2831         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2832         if (ret)
2833                 goto error7;
2834
2835         snprintf(name, sizeof name, "ib_mad%d", port_num);
2836         port_priv->wq = create_singlethread_workqueue(name);
2837         if (!port_priv->wq) {
2838                 ret = -ENOMEM;
2839                 goto error8;
2840         }
2841         INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2842
2843         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2844         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2845         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2846
2847         ret = ib_mad_port_start(port_priv);
2848         if (ret) {
2849                 printk(KERN_ERR PFX "Couldn't start port\n");
2850                 goto error9;
2851         }
2852
2853         return 0;
2854
2855 error9:
2856         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2857         list_del_init(&port_priv->port_list);
2858         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2859
2860         destroy_workqueue(port_priv->wq);
2861 error8:
2862         destroy_mad_qp(&port_priv->qp_info[1]);
2863 error7:
2864         destroy_mad_qp(&port_priv->qp_info[0]);
2865 error6:
2866         ib_dereg_mr(port_priv->mr);
2867 error5:
2868         ib_dealloc_pd(port_priv->pd);
2869 error4:
2870         ib_destroy_cq(port_priv->cq);
2871         cleanup_recv_queue(&port_priv->qp_info[1]);
2872         cleanup_recv_queue(&port_priv->qp_info[0]);
2873 error3:
2874         kfree(port_priv);
2875
2876         return ret;
2877 }
2878
2879 /*
2880  * Close the port
2881  * If there are no classes using the port, free the port
2882  * resources (CQ, MR, PD, QP) and remove the port's info structure
2883  */
2884 static int ib_mad_port_close(struct ib_device *device, int port_num)
2885 {
2886         struct ib_mad_port_private *port_priv;
2887         unsigned long flags;
2888
2889         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2890         port_priv = __ib_get_mad_port(device, port_num);
2891         if (port_priv == NULL) {
2892                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2893                 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2894                 return -ENODEV;
2895         }
2896         list_del_init(&port_priv->port_list);
2897         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2898
2899         destroy_workqueue(port_priv->wq);
2900         destroy_mad_qp(&port_priv->qp_info[1]);
2901         destroy_mad_qp(&port_priv->qp_info[0]);
2902         ib_dereg_mr(port_priv->mr);
2903         ib_dealloc_pd(port_priv->pd);
2904         ib_destroy_cq(port_priv->cq);
2905         cleanup_recv_queue(&port_priv->qp_info[1]);
2906         cleanup_recv_queue(&port_priv->qp_info[0]);
2907         /* XXX: Handle deallocation of MAD registration tables */
2908
2909         kfree(port_priv);
2910
2911         return 0;
2912 }
2913
2914 static void ib_mad_init_device(struct ib_device *device)
2915 {
2916         int start, end, i;
2917
2918         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2919                 return;
2920
2921         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2922                 start = 0;
2923                 end   = 0;
2924         } else {
2925                 start = 1;
2926                 end   = device->phys_port_cnt;
2927         }
2928
2929         for (i = start; i <= end; i++) {
2930                 if (ib_mad_port_open(device, i)) {
2931                         printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2932                                device->name, i);
2933                         goto error;
2934                 }
2935                 if (ib_agent_port_open(device, i)) {
2936                         printk(KERN_ERR PFX "Couldn't open %s port %d "
2937                                "for agents\n",
2938                                device->name, i);
2939                         goto error_agent;
2940                 }
2941         }
2942         return;
2943
2944 error_agent:
2945         if (ib_mad_port_close(device, i))
2946                 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2947                        device->name, i);
2948
2949 error:
2950         i--;
2951
2952         while (i >= start) {
2953                 if (ib_agent_port_close(device, i))
2954                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2955                                "for agents\n",
2956                                device->name, i);
2957                 if (ib_mad_port_close(device, i))
2958                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2959                                device->name, i);
2960                 i--;
2961         }
2962 }
2963
2964 static void ib_mad_remove_device(struct ib_device *device)
2965 {
2966         int i, num_ports, cur_port;
2967
2968         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2969                 num_ports = 1;
2970                 cur_port = 0;
2971         } else {
2972                 num_ports = device->phys_port_cnt;
2973                 cur_port = 1;
2974         }
2975         for (i = 0; i < num_ports; i++, cur_port++) {
2976                 if (ib_agent_port_close(device, cur_port))
2977                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2978                                "for agents\n",
2979                                device->name, cur_port);
2980                 if (ib_mad_port_close(device, cur_port))
2981                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2982                                device->name, cur_port);
2983         }
2984 }
2985
2986 static struct ib_client mad_client = {
2987         .name   = "mad",
2988         .add = ib_mad_init_device,
2989         .remove = ib_mad_remove_device
2990 };
2991
2992 static int __init ib_mad_init_module(void)
2993 {
2994         int ret;
2995
2996         spin_lock_init(&ib_mad_port_list_lock);
2997
2998         ib_mad_cache = kmem_cache_create("ib_mad",
2999                                          sizeof(struct ib_mad_private),
3000                                          0,
3001                                          SLAB_HWCACHE_ALIGN,
3002                                          NULL);
3003         if (!ib_mad_cache) {
3004                 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3005                 ret = -ENOMEM;
3006                 goto error1;
3007         }
3008
3009         INIT_LIST_HEAD(&ib_mad_port_list);
3010
3011         if (ib_register_client(&mad_client)) {
3012                 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3013                 ret = -EINVAL;
3014                 goto error2;
3015         }
3016
3017         return 0;
3018
3019 error2:
3020         kmem_cache_destroy(ib_mad_cache);
3021 error1:
3022         return ret;
3023 }
3024
3025 static void __exit ib_mad_cleanup_module(void)
3026 {
3027         ib_unregister_client(&mad_client);
3028         kmem_cache_destroy(ib_mad_cache);
3029 }
3030
3031 module_init(ib_mad_init_module);
3032 module_exit(ib_mad_cleanup_module);
3033