Merge branch 'master' into upstream
[linux-2.6] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
35  */
36 #include <linux/dma-mapping.h>
37 #include <rdma/ib_cache.h>
38
39 #include "mad_priv.h"
40 #include "mad_rmpp.h"
41 #include "smi.h"
42 #include "agent.h"
43
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_DESCRIPTION("kernel IB MAD API");
46 MODULE_AUTHOR("Hal Rosenstock");
47 MODULE_AUTHOR("Sean Hefty");
48
49 static kmem_cache_t *ib_mad_cache;
50
51 static struct list_head ib_mad_port_list;
52 static u32 ib_mad_client_id = 0;
53
54 /* Port list lock */
55 static spinlock_t ib_mad_port_list_lock;
56
57
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table **method,
60                          struct ib_mad_reg_req *mad_reg_req);
61 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
62 static struct ib_mad_agent_private *find_mad_agent(
63                                         struct ib_mad_port_private *port_priv,
64                                         struct ib_mad *mad);
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66                                     struct ib_mad_private *mad);
67 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68 static void timeout_sends(void *data);
69 static void local_completions(void *data);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71                               struct ib_mad_agent_private *agent_priv,
72                               u8 mgmt_class);
73 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74                            struct ib_mad_agent_private *agent_priv);
75
76 /*
77  * Returns a ib_mad_port_private structure or NULL for a device/port
78  * Assumes ib_mad_port_list_lock is being held
79  */
80 static inline struct ib_mad_port_private *
81 __ib_get_mad_port(struct ib_device *device, int port_num)
82 {
83         struct ib_mad_port_private *entry;
84
85         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86                 if (entry->device == device && entry->port_num == port_num)
87                         return entry;
88         }
89         return NULL;
90 }
91
92 /*
93  * Wrapper function to return a ib_mad_port_private structure or NULL
94  * for a device/port
95  */
96 static inline struct ib_mad_port_private *
97 ib_get_mad_port(struct ib_device *device, int port_num)
98 {
99         struct ib_mad_port_private *entry;
100         unsigned long flags;
101
102         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103         entry = __ib_get_mad_port(device, port_num);
104         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
105
106         return entry;
107 }
108
109 static inline u8 convert_mgmt_class(u8 mgmt_class)
110 {
111         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
113                 0 : mgmt_class;
114 }
115
116 static int get_spl_qp_index(enum ib_qp_type qp_type)
117 {
118         switch (qp_type)
119         {
120         case IB_QPT_SMI:
121                 return 0;
122         case IB_QPT_GSI:
123                 return 1;
124         default:
125                 return -1;
126         }
127 }
128
129 static int vendor_class_index(u8 mgmt_class)
130 {
131         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
132 }
133
134 static int is_vendor_class(u8 mgmt_class)
135 {
136         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
138                 return 0;
139         return 1;
140 }
141
142 static int is_vendor_oui(char *oui)
143 {
144         if (oui[0] || oui[1] || oui[2])
145                 return 1;
146         return 0;
147 }
148
149 static int is_vendor_method_in_use(
150                 struct ib_mad_mgmt_vendor_class *vendor_class,
151                 struct ib_mad_reg_req *mad_reg_req)
152 {
153         struct ib_mad_mgmt_method_table *method;
154         int i;
155
156         for (i = 0; i < MAX_MGMT_OUI; i++) {
157                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158                         method = vendor_class->method_table[i];
159                         if (method) {
160                                 if (method_in_use(&method, mad_reg_req))
161                                         return 1;
162                                 else
163                                         break;
164                         }
165                 }
166         }
167         return 0;
168 }
169
170 int ib_response_mad(struct ib_mad *mad)
171 {
172         return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
173                 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
174                 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
175                  (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
176 }
177 EXPORT_SYMBOL(ib_response_mad);
178
179 /*
180  * ib_register_mad_agent - Register to send/receive MADs
181  */
182 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
183                                            u8 port_num,
184                                            enum ib_qp_type qp_type,
185                                            struct ib_mad_reg_req *mad_reg_req,
186                                            u8 rmpp_version,
187                                            ib_mad_send_handler send_handler,
188                                            ib_mad_recv_handler recv_handler,
189                                            void *context)
190 {
191         struct ib_mad_port_private *port_priv;
192         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
193         struct ib_mad_agent_private *mad_agent_priv;
194         struct ib_mad_reg_req *reg_req = NULL;
195         struct ib_mad_mgmt_class_table *class;
196         struct ib_mad_mgmt_vendor_class_table *vendor;
197         struct ib_mad_mgmt_vendor_class *vendor_class;
198         struct ib_mad_mgmt_method_table *method;
199         int ret2, qpn;
200         unsigned long flags;
201         u8 mgmt_class, vclass;
202
203         /* Validate parameters */
204         qpn = get_spl_qp_index(qp_type);
205         if (qpn == -1)
206                 goto error1;
207
208         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
209                 goto error1;
210
211         /* Validate MAD registration request if supplied */
212         if (mad_reg_req) {
213                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
214                         goto error1;
215                 if (!recv_handler)
216                         goto error1;
217                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
218                         /*
219                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
220                          * one in this range currently allowed
221                          */
222                         if (mad_reg_req->mgmt_class !=
223                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
224                                 goto error1;
225                 } else if (mad_reg_req->mgmt_class == 0) {
226                         /*
227                          * Class 0 is reserved in IBA and is used for
228                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
229                          */
230                         goto error1;
231                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
232                         /*
233                          * If class is in "new" vendor range,
234                          * ensure supplied OUI is not zero
235                          */
236                         if (!is_vendor_oui(mad_reg_req->oui))
237                                 goto error1;
238                 }
239                 /* Make sure class supplied is consistent with RMPP */
240                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
241                         if (rmpp_version)
242                                 goto error1;
243                 }
244                 /* Make sure class supplied is consistent with QP type */
245                 if (qp_type == IB_QPT_SMI) {
246                         if ((mad_reg_req->mgmt_class !=
247                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
248                             (mad_reg_req->mgmt_class !=
249                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
250                                 goto error1;
251                 } else {
252                         if ((mad_reg_req->mgmt_class ==
253                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
254                             (mad_reg_req->mgmt_class ==
255                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
256                                 goto error1;
257                 }
258         } else {
259                 /* No registration request supplied */
260                 if (!send_handler)
261                         goto error1;
262         }
263
264         /* Validate device and port */
265         port_priv = ib_get_mad_port(device, port_num);
266         if (!port_priv) {
267                 ret = ERR_PTR(-ENODEV);
268                 goto error1;
269         }
270
271         /* Allocate structures */
272         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
273         if (!mad_agent_priv) {
274                 ret = ERR_PTR(-ENOMEM);
275                 goto error1;
276         }
277
278         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
279                                                  IB_ACCESS_LOCAL_WRITE);
280         if (IS_ERR(mad_agent_priv->agent.mr)) {
281                 ret = ERR_PTR(-ENOMEM);
282                 goto error2;
283         }
284
285         if (mad_reg_req) {
286                 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
287                 if (!reg_req) {
288                         ret = ERR_PTR(-ENOMEM);
289                         goto error3;
290                 }
291                 /* Make a copy of the MAD registration request */
292                 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
293         }
294
295         /* Now, fill in the various structures */
296         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
297         mad_agent_priv->reg_req = reg_req;
298         mad_agent_priv->agent.rmpp_version = rmpp_version;
299         mad_agent_priv->agent.device = device;
300         mad_agent_priv->agent.recv_handler = recv_handler;
301         mad_agent_priv->agent.send_handler = send_handler;
302         mad_agent_priv->agent.context = context;
303         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
304         mad_agent_priv->agent.port_num = port_num;
305
306         spin_lock_irqsave(&port_priv->reg_lock, flags);
307         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
308
309         /*
310          * Make sure MAD registration (if supplied)
311          * is non overlapping with any existing ones
312          */
313         if (mad_reg_req) {
314                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
315                 if (!is_vendor_class(mgmt_class)) {
316                         class = port_priv->version[mad_reg_req->
317                                                    mgmt_class_version].class;
318                         if (class) {
319                                 method = class->method_table[mgmt_class];
320                                 if (method) {
321                                         if (method_in_use(&method,
322                                                            mad_reg_req))
323                                                 goto error4;
324                                 }
325                         }
326                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
327                                                   mgmt_class);
328                 } else {
329                         /* "New" vendor class range */
330                         vendor = port_priv->version[mad_reg_req->
331                                                     mgmt_class_version].vendor;
332                         if (vendor) {
333                                 vclass = vendor_class_index(mgmt_class);
334                                 vendor_class = vendor->vendor_class[vclass];
335                                 if (vendor_class) {
336                                         if (is_vendor_method_in_use(
337                                                         vendor_class,
338                                                         mad_reg_req))
339                                                 goto error4;
340                                 }
341                         }
342                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
343                 }
344                 if (ret2) {
345                         ret = ERR_PTR(ret2);
346                         goto error4;
347                 }
348         }
349
350         /* Add mad agent into port's agent list */
351         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
352         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
353
354         spin_lock_init(&mad_agent_priv->lock);
355         INIT_LIST_HEAD(&mad_agent_priv->send_list);
356         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
357         INIT_LIST_HEAD(&mad_agent_priv->done_list);
358         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
359         INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
360         INIT_LIST_HEAD(&mad_agent_priv->local_list);
361         INIT_WORK(&mad_agent_priv->local_work, local_completions,
362                    mad_agent_priv);
363         atomic_set(&mad_agent_priv->refcount, 1);
364         init_completion(&mad_agent_priv->comp);
365
366         return &mad_agent_priv->agent;
367
368 error4:
369         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
370         kfree(reg_req);
371 error3:
372         ib_dereg_mr(mad_agent_priv->agent.mr);
373 error2:
374         kfree(mad_agent_priv);
375 error1:
376         return ret;
377 }
378 EXPORT_SYMBOL(ib_register_mad_agent);
379
380 static inline int is_snooping_sends(int mad_snoop_flags)
381 {
382         return (mad_snoop_flags &
383                 (/*IB_MAD_SNOOP_POSTED_SENDS |
384                  IB_MAD_SNOOP_RMPP_SENDS |*/
385                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
386                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
387 }
388
389 static inline int is_snooping_recvs(int mad_snoop_flags)
390 {
391         return (mad_snoop_flags &
392                 (IB_MAD_SNOOP_RECVS /*|
393                  IB_MAD_SNOOP_RMPP_RECVS*/));
394 }
395
396 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
397                                 struct ib_mad_snoop_private *mad_snoop_priv)
398 {
399         struct ib_mad_snoop_private **new_snoop_table;
400         unsigned long flags;
401         int i;
402
403         spin_lock_irqsave(&qp_info->snoop_lock, flags);
404         /* Check for empty slot in array. */
405         for (i = 0; i < qp_info->snoop_table_size; i++)
406                 if (!qp_info->snoop_table[i])
407                         break;
408
409         if (i == qp_info->snoop_table_size) {
410                 /* Grow table. */
411                 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
412                                           qp_info->snoop_table_size + 1,
413                                           GFP_ATOMIC);
414                 if (!new_snoop_table) {
415                         i = -ENOMEM;
416                         goto out;
417                 }
418                 if (qp_info->snoop_table) {
419                         memcpy(new_snoop_table, qp_info->snoop_table,
420                                sizeof mad_snoop_priv *
421                                qp_info->snoop_table_size);
422                         kfree(qp_info->snoop_table);
423                 }
424                 qp_info->snoop_table = new_snoop_table;
425                 qp_info->snoop_table_size++;
426         }
427         qp_info->snoop_table[i] = mad_snoop_priv;
428         atomic_inc(&qp_info->snoop_count);
429 out:
430         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
431         return i;
432 }
433
434 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
435                                            u8 port_num,
436                                            enum ib_qp_type qp_type,
437                                            int mad_snoop_flags,
438                                            ib_mad_snoop_handler snoop_handler,
439                                            ib_mad_recv_handler recv_handler,
440                                            void *context)
441 {
442         struct ib_mad_port_private *port_priv;
443         struct ib_mad_agent *ret;
444         struct ib_mad_snoop_private *mad_snoop_priv;
445         int qpn;
446
447         /* Validate parameters */
448         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
449             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
450                 ret = ERR_PTR(-EINVAL);
451                 goto error1;
452         }
453         qpn = get_spl_qp_index(qp_type);
454         if (qpn == -1) {
455                 ret = ERR_PTR(-EINVAL);
456                 goto error1;
457         }
458         port_priv = ib_get_mad_port(device, port_num);
459         if (!port_priv) {
460                 ret = ERR_PTR(-ENODEV);
461                 goto error1;
462         }
463         /* Allocate structures */
464         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
465         if (!mad_snoop_priv) {
466                 ret = ERR_PTR(-ENOMEM);
467                 goto error1;
468         }
469
470         /* Now, fill in the various structures */
471         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
472         mad_snoop_priv->agent.device = device;
473         mad_snoop_priv->agent.recv_handler = recv_handler;
474         mad_snoop_priv->agent.snoop_handler = snoop_handler;
475         mad_snoop_priv->agent.context = context;
476         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
477         mad_snoop_priv->agent.port_num = port_num;
478         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
479         init_completion(&mad_snoop_priv->comp);
480         mad_snoop_priv->snoop_index = register_snoop_agent(
481                                                 &port_priv->qp_info[qpn],
482                                                 mad_snoop_priv);
483         if (mad_snoop_priv->snoop_index < 0) {
484                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
485                 goto error2;
486         }
487
488         atomic_set(&mad_snoop_priv->refcount, 1);
489         return &mad_snoop_priv->agent;
490
491 error2:
492         kfree(mad_snoop_priv);
493 error1:
494         return ret;
495 }
496 EXPORT_SYMBOL(ib_register_mad_snoop);
497
498 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
499 {
500         if (atomic_dec_and_test(&mad_agent_priv->refcount))
501                 complete(&mad_agent_priv->comp);
502 }
503
504 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
505 {
506         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
507                 complete(&mad_snoop_priv->comp);
508 }
509
510 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
511 {
512         struct ib_mad_port_private *port_priv;
513         unsigned long flags;
514
515         /* Note that we could still be handling received MADs */
516
517         /*
518          * Canceling all sends results in dropping received response
519          * MADs, preventing us from queuing additional work
520          */
521         cancel_mads(mad_agent_priv);
522         port_priv = mad_agent_priv->qp_info->port_priv;
523         cancel_delayed_work(&mad_agent_priv->timed_work);
524
525         spin_lock_irqsave(&port_priv->reg_lock, flags);
526         remove_mad_reg_req(mad_agent_priv);
527         list_del(&mad_agent_priv->agent_list);
528         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
529
530         flush_workqueue(port_priv->wq);
531         ib_cancel_rmpp_recvs(mad_agent_priv);
532
533         deref_mad_agent(mad_agent_priv);
534         wait_for_completion(&mad_agent_priv->comp);
535
536         kfree(mad_agent_priv->reg_req);
537         ib_dereg_mr(mad_agent_priv->agent.mr);
538         kfree(mad_agent_priv);
539 }
540
541 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
542 {
543         struct ib_mad_qp_info *qp_info;
544         unsigned long flags;
545
546         qp_info = mad_snoop_priv->qp_info;
547         spin_lock_irqsave(&qp_info->snoop_lock, flags);
548         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
549         atomic_dec(&qp_info->snoop_count);
550         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
551
552         deref_snoop_agent(mad_snoop_priv);
553         wait_for_completion(&mad_snoop_priv->comp);
554
555         kfree(mad_snoop_priv);
556 }
557
558 /*
559  * ib_unregister_mad_agent - Unregisters a client from using MAD services
560  */
561 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
562 {
563         struct ib_mad_agent_private *mad_agent_priv;
564         struct ib_mad_snoop_private *mad_snoop_priv;
565
566         /* If the TID is zero, the agent can only snoop. */
567         if (mad_agent->hi_tid) {
568                 mad_agent_priv = container_of(mad_agent,
569                                               struct ib_mad_agent_private,
570                                               agent);
571                 unregister_mad_agent(mad_agent_priv);
572         } else {
573                 mad_snoop_priv = container_of(mad_agent,
574                                               struct ib_mad_snoop_private,
575                                               agent);
576                 unregister_mad_snoop(mad_snoop_priv);
577         }
578         return 0;
579 }
580 EXPORT_SYMBOL(ib_unregister_mad_agent);
581
582 static void dequeue_mad(struct ib_mad_list_head *mad_list)
583 {
584         struct ib_mad_queue *mad_queue;
585         unsigned long flags;
586
587         BUG_ON(!mad_list->mad_queue);
588         mad_queue = mad_list->mad_queue;
589         spin_lock_irqsave(&mad_queue->lock, flags);
590         list_del(&mad_list->list);
591         mad_queue->count--;
592         spin_unlock_irqrestore(&mad_queue->lock, flags);
593 }
594
595 static void snoop_send(struct ib_mad_qp_info *qp_info,
596                        struct ib_mad_send_buf *send_buf,
597                        struct ib_mad_send_wc *mad_send_wc,
598                        int mad_snoop_flags)
599 {
600         struct ib_mad_snoop_private *mad_snoop_priv;
601         unsigned long flags;
602         int i;
603
604         spin_lock_irqsave(&qp_info->snoop_lock, flags);
605         for (i = 0; i < qp_info->snoop_table_size; i++) {
606                 mad_snoop_priv = qp_info->snoop_table[i];
607                 if (!mad_snoop_priv ||
608                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
609                         continue;
610
611                 atomic_inc(&mad_snoop_priv->refcount);
612                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
613                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
614                                                     send_buf, mad_send_wc);
615                 deref_snoop_agent(mad_snoop_priv);
616                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
617         }
618         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
619 }
620
621 static void snoop_recv(struct ib_mad_qp_info *qp_info,
622                        struct ib_mad_recv_wc *mad_recv_wc,
623                        int mad_snoop_flags)
624 {
625         struct ib_mad_snoop_private *mad_snoop_priv;
626         unsigned long flags;
627         int i;
628
629         spin_lock_irqsave(&qp_info->snoop_lock, flags);
630         for (i = 0; i < qp_info->snoop_table_size; i++) {
631                 mad_snoop_priv = qp_info->snoop_table[i];
632                 if (!mad_snoop_priv ||
633                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
634                         continue;
635
636                 atomic_inc(&mad_snoop_priv->refcount);
637                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
638                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
639                                                    mad_recv_wc);
640                 deref_snoop_agent(mad_snoop_priv);
641                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
642         }
643         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
644 }
645
646 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
647                          struct ib_wc *wc)
648 {
649         memset(wc, 0, sizeof *wc);
650         wc->wr_id = wr_id;
651         wc->status = IB_WC_SUCCESS;
652         wc->opcode = IB_WC_RECV;
653         wc->pkey_index = pkey_index;
654         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
655         wc->src_qp = IB_QP0;
656         wc->qp_num = IB_QP0;
657         wc->slid = slid;
658         wc->sl = 0;
659         wc->dlid_path_bits = 0;
660         wc->port_num = port_num;
661 }
662
663 /*
664  * Return 0 if SMP is to be sent
665  * Return 1 if SMP was consumed locally (whether or not solicited)
666  * Return < 0 if error
667  */
668 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
669                                   struct ib_mad_send_wr_private *mad_send_wr)
670 {
671         int ret;
672         struct ib_smp *smp = mad_send_wr->send_buf.mad;
673         unsigned long flags;
674         struct ib_mad_local_private *local;
675         struct ib_mad_private *mad_priv;
676         struct ib_mad_port_private *port_priv;
677         struct ib_mad_agent_private *recv_mad_agent = NULL;
678         struct ib_device *device = mad_agent_priv->agent.device;
679         u8 port_num = mad_agent_priv->agent.port_num;
680         struct ib_wc mad_wc;
681         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
682
683         /*
684          * Directed route handling starts if the initial LID routed part of
685          * a request or the ending LID routed part of a response is empty.
686          * If we are at the start of the LID routed part, don't update the
687          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
688          */
689         if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
690              IB_LID_PERMISSIVE &&
691             !smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
692                 ret = -EINVAL;
693                 printk(KERN_ERR PFX "Invalid directed route\n");
694                 goto out;
695         }
696         /* Check to post send on QP or process locally */
697         ret = smi_check_local_smp(smp, device);
698         if (!ret)
699                 goto out;
700
701         local = kmalloc(sizeof *local, GFP_ATOMIC);
702         if (!local) {
703                 ret = -ENOMEM;
704                 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
705                 goto out;
706         }
707         local->mad_priv = NULL;
708         local->recv_mad_agent = NULL;
709         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
710         if (!mad_priv) {
711                 ret = -ENOMEM;
712                 printk(KERN_ERR PFX "No memory for local response MAD\n");
713                 kfree(local);
714                 goto out;
715         }
716
717         build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
718                      send_wr->wr.ud.pkey_index,
719                      send_wr->wr.ud.port_num, &mad_wc);
720
721         /* No GRH for DR SMP */
722         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
723                                   (struct ib_mad *)smp,
724                                   (struct ib_mad *)&mad_priv->mad);
725         switch (ret)
726         {
727         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
728                 if (ib_response_mad(&mad_priv->mad.mad) &&
729                     mad_agent_priv->agent.recv_handler) {
730                         local->mad_priv = mad_priv;
731                         local->recv_mad_agent = mad_agent_priv;
732                         /*
733                          * Reference MAD agent until receive
734                          * side of local completion handled
735                          */
736                         atomic_inc(&mad_agent_priv->refcount);
737                 } else
738                         kmem_cache_free(ib_mad_cache, mad_priv);
739                 break;
740         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
741                 kmem_cache_free(ib_mad_cache, mad_priv);
742                 break;
743         case IB_MAD_RESULT_SUCCESS:
744                 /* Treat like an incoming receive MAD */
745                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
746                                             mad_agent_priv->agent.port_num);
747                 if (port_priv) {
748                         mad_priv->mad.mad.mad_hdr.tid =
749                                 ((struct ib_mad *)smp)->mad_hdr.tid;
750                         recv_mad_agent = find_mad_agent(port_priv,
751                                                         &mad_priv->mad.mad);
752                 }
753                 if (!port_priv || !recv_mad_agent) {
754                         kmem_cache_free(ib_mad_cache, mad_priv);
755                         kfree(local);
756                         ret = 0;
757                         goto out;
758                 }
759                 local->mad_priv = mad_priv;
760                 local->recv_mad_agent = recv_mad_agent;
761                 break;
762         default:
763                 kmem_cache_free(ib_mad_cache, mad_priv);
764                 kfree(local);
765                 ret = -EINVAL;
766                 goto out;
767         }
768
769         local->mad_send_wr = mad_send_wr;
770         /* Reference MAD agent until send side of local completion handled */
771         atomic_inc(&mad_agent_priv->refcount);
772         /* Queue local completion to local list */
773         spin_lock_irqsave(&mad_agent_priv->lock, flags);
774         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
775         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
776         queue_work(mad_agent_priv->qp_info->port_priv->wq,
777                    &mad_agent_priv->local_work);
778         ret = 1;
779 out:
780         return ret;
781 }
782
783 static int get_pad_size(int hdr_len, int data_len)
784 {
785         int seg_size, pad;
786
787         seg_size = sizeof(struct ib_mad) - hdr_len;
788         if (data_len && seg_size) {
789                 pad = seg_size - data_len % seg_size;
790                 return pad == seg_size ? 0 : pad;
791         } else
792                 return seg_size;
793 }
794
795 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
796 {
797         struct ib_rmpp_segment *s, *t;
798
799         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
800                 list_del(&s->list);
801                 kfree(s);
802         }
803 }
804
805 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
806                                 gfp_t gfp_mask)
807 {
808         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
809         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
810         struct ib_rmpp_segment *seg = NULL;
811         int left, seg_size, pad;
812
813         send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
814         seg_size = send_buf->seg_size;
815         pad = send_wr->pad;
816
817         /* Allocate data segments. */
818         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
819                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
820                 if (!seg) {
821                         printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
822                                "alloc failed for len %zd, gfp %#x\n",
823                                sizeof (*seg) + seg_size, gfp_mask);
824                         free_send_rmpp_list(send_wr);
825                         return -ENOMEM;
826                 }
827                 seg->num = ++send_buf->seg_count;
828                 list_add_tail(&seg->list, &send_wr->rmpp_list);
829         }
830
831         /* Zero any padding */
832         if (pad)
833                 memset(seg->data + seg_size - pad, 0, pad);
834
835         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
836                                           agent.rmpp_version;
837         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
838         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
839
840         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
841                                         struct ib_rmpp_segment, list);
842         send_wr->last_ack_seg = send_wr->cur_seg;
843         return 0;
844 }
845
846 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
847                                             u32 remote_qpn, u16 pkey_index,
848                                             int rmpp_active,
849                                             int hdr_len, int data_len,
850                                             gfp_t gfp_mask)
851 {
852         struct ib_mad_agent_private *mad_agent_priv;
853         struct ib_mad_send_wr_private *mad_send_wr;
854         int pad, message_size, ret, size;
855         void *buf;
856
857         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
858                                       agent);
859         pad = get_pad_size(hdr_len, data_len);
860         message_size = hdr_len + data_len + pad;
861
862         if ((!mad_agent->rmpp_version &&
863              (rmpp_active || message_size > sizeof(struct ib_mad))) ||
864             (!rmpp_active && message_size > sizeof(struct ib_mad)))
865                 return ERR_PTR(-EINVAL);
866
867         size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
868         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
869         if (!buf)
870                 return ERR_PTR(-ENOMEM);
871
872         mad_send_wr = buf + size;
873         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
874         mad_send_wr->send_buf.mad = buf;
875         mad_send_wr->send_buf.hdr_len = hdr_len;
876         mad_send_wr->send_buf.data_len = data_len;
877         mad_send_wr->pad = pad;
878
879         mad_send_wr->mad_agent_priv = mad_agent_priv;
880         mad_send_wr->sg_list[0].length = hdr_len;
881         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
882         mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
883         mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
884
885         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
886         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
887         mad_send_wr->send_wr.num_sge = 2;
888         mad_send_wr->send_wr.opcode = IB_WR_SEND;
889         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
890         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
891         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
892         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
893
894         if (rmpp_active) {
895                 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
896                 if (ret) {
897                         kfree(buf);
898                         return ERR_PTR(ret);
899                 }
900         }
901
902         mad_send_wr->send_buf.mad_agent = mad_agent;
903         atomic_inc(&mad_agent_priv->refcount);
904         return &mad_send_wr->send_buf;
905 }
906 EXPORT_SYMBOL(ib_create_send_mad);
907
908 int ib_get_mad_data_offset(u8 mgmt_class)
909 {
910         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
911                 return IB_MGMT_SA_HDR;
912         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
913                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
914                  (mgmt_class == IB_MGMT_CLASS_BIS))
915                 return IB_MGMT_DEVICE_HDR;
916         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
917                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
918                 return IB_MGMT_VENDOR_HDR;
919         else
920                 return IB_MGMT_MAD_HDR;
921 }
922 EXPORT_SYMBOL(ib_get_mad_data_offset);
923
924 int ib_is_mad_class_rmpp(u8 mgmt_class)
925 {
926         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
927             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
928             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
929             (mgmt_class == IB_MGMT_CLASS_BIS) ||
930             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
931              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
932                 return 1;
933         return 0;
934 }
935 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
936
937 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
938 {
939         struct ib_mad_send_wr_private *mad_send_wr;
940         struct list_head *list;
941
942         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
943                                    send_buf);
944         list = &mad_send_wr->cur_seg->list;
945
946         if (mad_send_wr->cur_seg->num < seg_num) {
947                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
948                         if (mad_send_wr->cur_seg->num == seg_num)
949                                 break;
950         } else if (mad_send_wr->cur_seg->num > seg_num) {
951                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
952                         if (mad_send_wr->cur_seg->num == seg_num)
953                                 break;
954         }
955         return mad_send_wr->cur_seg->data;
956 }
957 EXPORT_SYMBOL(ib_get_rmpp_segment);
958
959 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
960 {
961         if (mad_send_wr->send_buf.seg_count)
962                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
963                                            mad_send_wr->seg_num);
964         else
965                 return mad_send_wr->send_buf.mad +
966                        mad_send_wr->send_buf.hdr_len;
967 }
968
969 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
970 {
971         struct ib_mad_agent_private *mad_agent_priv;
972         struct ib_mad_send_wr_private *mad_send_wr;
973
974         mad_agent_priv = container_of(send_buf->mad_agent,
975                                       struct ib_mad_agent_private, agent);
976         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
977                                    send_buf);
978
979         free_send_rmpp_list(mad_send_wr);
980         kfree(send_buf->mad);
981         deref_mad_agent(mad_agent_priv);
982 }
983 EXPORT_SYMBOL(ib_free_send_mad);
984
985 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
986 {
987         struct ib_mad_qp_info *qp_info;
988         struct list_head *list;
989         struct ib_send_wr *bad_send_wr;
990         struct ib_mad_agent *mad_agent;
991         struct ib_sge *sge;
992         unsigned long flags;
993         int ret;
994
995         /* Set WR ID to find mad_send_wr upon completion */
996         qp_info = mad_send_wr->mad_agent_priv->qp_info;
997         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
998         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
999
1000         mad_agent = mad_send_wr->send_buf.mad_agent;
1001         sge = mad_send_wr->sg_list;
1002         sge[0].addr = dma_map_single(mad_agent->device->dma_device,
1003                                      mad_send_wr->send_buf.mad,
1004                                      sge[0].length,
1005                                      DMA_TO_DEVICE);
1006         pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
1007
1008         sge[1].addr = dma_map_single(mad_agent->device->dma_device,
1009                                      ib_get_payload(mad_send_wr),
1010                                      sge[1].length,
1011                                      DMA_TO_DEVICE);
1012         pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
1013
1014         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1015         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1016                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1017                                    &bad_send_wr);
1018                 list = &qp_info->send_queue.list;
1019         } else {
1020                 ret = 0;
1021                 list = &qp_info->overflow_list;
1022         }
1023
1024         if (!ret) {
1025                 qp_info->send_queue.count++;
1026                 list_add_tail(&mad_send_wr->mad_list.list, list);
1027         }
1028         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1029         if (ret) {
1030                 dma_unmap_single(mad_agent->device->dma_device,
1031                                  pci_unmap_addr(mad_send_wr, header_mapping),
1032                                  sge[0].length, DMA_TO_DEVICE);
1033                 dma_unmap_single(mad_agent->device->dma_device,
1034                                  pci_unmap_addr(mad_send_wr, payload_mapping),
1035                                  sge[1].length, DMA_TO_DEVICE);
1036         }
1037         return ret;
1038 }
1039
1040 /*
1041  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1042  *  with the registered client
1043  */
1044 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1045                      struct ib_mad_send_buf **bad_send_buf)
1046 {
1047         struct ib_mad_agent_private *mad_agent_priv;
1048         struct ib_mad_send_buf *next_send_buf;
1049         struct ib_mad_send_wr_private *mad_send_wr;
1050         unsigned long flags;
1051         int ret = -EINVAL;
1052
1053         /* Walk list of send WRs and post each on send list */
1054         for (; send_buf; send_buf = next_send_buf) {
1055
1056                 mad_send_wr = container_of(send_buf,
1057                                            struct ib_mad_send_wr_private,
1058                                            send_buf);
1059                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1060
1061                 if (!send_buf->mad_agent->send_handler ||
1062                     (send_buf->timeout_ms &&
1063                      !send_buf->mad_agent->recv_handler)) {
1064                         ret = -EINVAL;
1065                         goto error;
1066                 }
1067
1068                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1069                         if (mad_agent_priv->agent.rmpp_version) {
1070                                 ret = -EINVAL;
1071                                 goto error;
1072                         }
1073                 }
1074
1075                 /*
1076                  * Save pointer to next work request to post in case the
1077                  * current one completes, and the user modifies the work
1078                  * request associated with the completion
1079                  */
1080                 next_send_buf = send_buf->next;
1081                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1082
1083                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1084                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1085                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1086                                                      mad_send_wr);
1087                         if (ret < 0)            /* error */
1088                                 goto error;
1089                         else if (ret == 1)      /* locally consumed */
1090                                 continue;
1091                 }
1092
1093                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1094                 /* Timeout will be updated after send completes */
1095                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1096                 mad_send_wr->retries = send_buf->retries;
1097                 /* Reference for work request to QP + response */
1098                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1099                 mad_send_wr->status = IB_WC_SUCCESS;
1100
1101                 /* Reference MAD agent until send completes */
1102                 atomic_inc(&mad_agent_priv->refcount);
1103                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1104                 list_add_tail(&mad_send_wr->agent_list,
1105                               &mad_agent_priv->send_list);
1106                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1107
1108                 if (mad_agent_priv->agent.rmpp_version) {
1109                         ret = ib_send_rmpp_mad(mad_send_wr);
1110                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1111                                 ret = ib_send_mad(mad_send_wr);
1112                 } else
1113                         ret = ib_send_mad(mad_send_wr);
1114                 if (ret < 0) {
1115                         /* Fail send request */
1116                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1117                         list_del(&mad_send_wr->agent_list);
1118                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1119                         atomic_dec(&mad_agent_priv->refcount);
1120                         goto error;
1121                 }
1122         }
1123         return 0;
1124 error:
1125         if (bad_send_buf)
1126                 *bad_send_buf = send_buf;
1127         return ret;
1128 }
1129 EXPORT_SYMBOL(ib_post_send_mad);
1130
1131 /*
1132  * ib_free_recv_mad - Returns data buffers used to receive
1133  *  a MAD to the access layer
1134  */
1135 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1136 {
1137         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1138         struct ib_mad_private_header *mad_priv_hdr;
1139         struct ib_mad_private *priv;
1140         struct list_head free_list;
1141
1142         INIT_LIST_HEAD(&free_list);
1143         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1144
1145         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1146                                         &free_list, list) {
1147                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1148                                            recv_buf);
1149                 mad_priv_hdr = container_of(mad_recv_wc,
1150                                             struct ib_mad_private_header,
1151                                             recv_wc);
1152                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1153                                     header);
1154                 kmem_cache_free(ib_mad_cache, priv);
1155         }
1156 }
1157 EXPORT_SYMBOL(ib_free_recv_mad);
1158
1159 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1160                                         u8 rmpp_version,
1161                                         ib_mad_send_handler send_handler,
1162                                         ib_mad_recv_handler recv_handler,
1163                                         void *context)
1164 {
1165         return ERR_PTR(-EINVAL);        /* XXX: for now */
1166 }
1167 EXPORT_SYMBOL(ib_redirect_mad_qp);
1168
1169 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1170                       struct ib_wc *wc)
1171 {
1172         printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1173         return 0;
1174 }
1175 EXPORT_SYMBOL(ib_process_mad_wc);
1176
1177 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1178                          struct ib_mad_reg_req *mad_reg_req)
1179 {
1180         int i;
1181
1182         for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1183              i < IB_MGMT_MAX_METHODS;
1184              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1185                                1+i)) {
1186                 if ((*method)->agent[i]) {
1187                         printk(KERN_ERR PFX "Method %d already in use\n", i);
1188                         return -EINVAL;
1189                 }
1190         }
1191         return 0;
1192 }
1193
1194 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1195 {
1196         /* Allocate management method table */
1197         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1198         if (!*method) {
1199                 printk(KERN_ERR PFX "No memory for "
1200                        "ib_mad_mgmt_method_table\n");
1201                 return -ENOMEM;
1202         }
1203
1204         return 0;
1205 }
1206
1207 /*
1208  * Check to see if there are any methods still in use
1209  */
1210 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1211 {
1212         int i;
1213
1214         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1215                 if (method->agent[i])
1216                         return 1;
1217         return 0;
1218 }
1219
1220 /*
1221  * Check to see if there are any method tables for this class still in use
1222  */
1223 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1224 {
1225         int i;
1226
1227         for (i = 0; i < MAX_MGMT_CLASS; i++)
1228                 if (class->method_table[i])
1229                         return 1;
1230         return 0;
1231 }
1232
1233 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1234 {
1235         int i;
1236
1237         for (i = 0; i < MAX_MGMT_OUI; i++)
1238                 if (vendor_class->method_table[i])
1239                         return 1;
1240         return 0;
1241 }
1242
1243 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1244                            char *oui)
1245 {
1246         int i;
1247
1248         for (i = 0; i < MAX_MGMT_OUI; i++)
1249                 /* Is there matching OUI for this vendor class ? */
1250                 if (!memcmp(vendor_class->oui[i], oui, 3))
1251                         return i;
1252
1253         return -1;
1254 }
1255
1256 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1257 {
1258         int i;
1259
1260         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1261                 if (vendor->vendor_class[i])
1262                         return 1;
1263
1264         return 0;
1265 }
1266
1267 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1268                                      struct ib_mad_agent_private *agent)
1269 {
1270         int i;
1271
1272         /* Remove any methods for this mad agent */
1273         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1274                 if (method->agent[i] == agent) {
1275                         method->agent[i] = NULL;
1276                 }
1277         }
1278 }
1279
1280 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1281                               struct ib_mad_agent_private *agent_priv,
1282                               u8 mgmt_class)
1283 {
1284         struct ib_mad_port_private *port_priv;
1285         struct ib_mad_mgmt_class_table **class;
1286         struct ib_mad_mgmt_method_table **method;
1287         int i, ret;
1288
1289         port_priv = agent_priv->qp_info->port_priv;
1290         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1291         if (!*class) {
1292                 /* Allocate management class table for "new" class version */
1293                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1294                 if (!*class) {
1295                         printk(KERN_ERR PFX "No memory for "
1296                                "ib_mad_mgmt_class_table\n");
1297                         ret = -ENOMEM;
1298                         goto error1;
1299                 }
1300
1301                 /* Allocate method table for this management class */
1302                 method = &(*class)->method_table[mgmt_class];
1303                 if ((ret = allocate_method_table(method)))
1304                         goto error2;
1305         } else {
1306                 method = &(*class)->method_table[mgmt_class];
1307                 if (!*method) {
1308                         /* Allocate method table for this management class */
1309                         if ((ret = allocate_method_table(method)))
1310                                 goto error1;
1311                 }
1312         }
1313
1314         /* Now, make sure methods are not already in use */
1315         if (method_in_use(method, mad_reg_req))
1316                 goto error3;
1317
1318         /* Finally, add in methods being registered */
1319         for (i = find_first_bit(mad_reg_req->method_mask,
1320                                 IB_MGMT_MAX_METHODS);
1321              i < IB_MGMT_MAX_METHODS;
1322              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1323                                1+i)) {
1324                 (*method)->agent[i] = agent_priv;
1325         }
1326         return 0;
1327
1328 error3:
1329         /* Remove any methods for this mad agent */
1330         remove_methods_mad_agent(*method, agent_priv);
1331         /* Now, check to see if there are any methods in use */
1332         if (!check_method_table(*method)) {
1333                 /* If not, release management method table */
1334                 kfree(*method);
1335                 *method = NULL;
1336         }
1337         ret = -EINVAL;
1338         goto error1;
1339 error2:
1340         kfree(*class);
1341         *class = NULL;
1342 error1:
1343         return ret;
1344 }
1345
1346 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1347                            struct ib_mad_agent_private *agent_priv)
1348 {
1349         struct ib_mad_port_private *port_priv;
1350         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1351         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1352         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1353         struct ib_mad_mgmt_method_table **method;
1354         int i, ret = -ENOMEM;
1355         u8 vclass;
1356
1357         /* "New" vendor (with OUI) class */
1358         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1359         port_priv = agent_priv->qp_info->port_priv;
1360         vendor_table = &port_priv->version[
1361                                 mad_reg_req->mgmt_class_version].vendor;
1362         if (!*vendor_table) {
1363                 /* Allocate mgmt vendor class table for "new" class version */
1364                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1365                 if (!vendor) {
1366                         printk(KERN_ERR PFX "No memory for "
1367                                "ib_mad_mgmt_vendor_class_table\n");
1368                         goto error1;
1369                 }
1370
1371                 *vendor_table = vendor;
1372         }
1373         if (!(*vendor_table)->vendor_class[vclass]) {
1374                 /* Allocate table for this management vendor class */
1375                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1376                 if (!vendor_class) {
1377                         printk(KERN_ERR PFX "No memory for "
1378                                "ib_mad_mgmt_vendor_class\n");
1379                         goto error2;
1380                 }
1381
1382                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1383         }
1384         for (i = 0; i < MAX_MGMT_OUI; i++) {
1385                 /* Is there matching OUI for this vendor class ? */
1386                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1387                             mad_reg_req->oui, 3)) {
1388                         method = &(*vendor_table)->vendor_class[
1389                                                 vclass]->method_table[i];
1390                         BUG_ON(!*method);
1391                         goto check_in_use;
1392                 }
1393         }
1394         for (i = 0; i < MAX_MGMT_OUI; i++) {
1395                 /* OUI slot available ? */
1396                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1397                                 vclass]->oui[i])) {
1398                         method = &(*vendor_table)->vendor_class[
1399                                 vclass]->method_table[i];
1400                         BUG_ON(*method);
1401                         /* Allocate method table for this OUI */
1402                         if ((ret = allocate_method_table(method)))
1403                                 goto error3;
1404                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1405                                mad_reg_req->oui, 3);
1406                         goto check_in_use;
1407                 }
1408         }
1409         printk(KERN_ERR PFX "All OUI slots in use\n");
1410         goto error3;
1411
1412 check_in_use:
1413         /* Now, make sure methods are not already in use */
1414         if (method_in_use(method, mad_reg_req))
1415                 goto error4;
1416
1417         /* Finally, add in methods being registered */
1418         for (i = find_first_bit(mad_reg_req->method_mask,
1419                                 IB_MGMT_MAX_METHODS);
1420              i < IB_MGMT_MAX_METHODS;
1421              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1422                                1+i)) {
1423                 (*method)->agent[i] = agent_priv;
1424         }
1425         return 0;
1426
1427 error4:
1428         /* Remove any methods for this mad agent */
1429         remove_methods_mad_agent(*method, agent_priv);
1430         /* Now, check to see if there are any methods in use */
1431         if (!check_method_table(*method)) {
1432                 /* If not, release management method table */
1433                 kfree(*method);
1434                 *method = NULL;
1435         }
1436         ret = -EINVAL;
1437 error3:
1438         if (vendor_class) {
1439                 (*vendor_table)->vendor_class[vclass] = NULL;
1440                 kfree(vendor_class);
1441         }
1442 error2:
1443         if (vendor) {
1444                 *vendor_table = NULL;
1445                 kfree(vendor);
1446         }
1447 error1:
1448         return ret;
1449 }
1450
1451 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1452 {
1453         struct ib_mad_port_private *port_priv;
1454         struct ib_mad_mgmt_class_table *class;
1455         struct ib_mad_mgmt_method_table *method;
1456         struct ib_mad_mgmt_vendor_class_table *vendor;
1457         struct ib_mad_mgmt_vendor_class *vendor_class;
1458         int index;
1459         u8 mgmt_class;
1460
1461         /*
1462          * Was MAD registration request supplied
1463          * with original registration ?
1464          */
1465         if (!agent_priv->reg_req) {
1466                 goto out;
1467         }
1468
1469         port_priv = agent_priv->qp_info->port_priv;
1470         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1471         class = port_priv->version[
1472                         agent_priv->reg_req->mgmt_class_version].class;
1473         if (!class)
1474                 goto vendor_check;
1475
1476         method = class->method_table[mgmt_class];
1477         if (method) {
1478                 /* Remove any methods for this mad agent */
1479                 remove_methods_mad_agent(method, agent_priv);
1480                 /* Now, check to see if there are any methods still in use */
1481                 if (!check_method_table(method)) {
1482                         /* If not, release management method table */
1483                          kfree(method);
1484                          class->method_table[mgmt_class] = NULL;
1485                          /* Any management classes left ? */
1486                         if (!check_class_table(class)) {
1487                                 /* If not, release management class table */
1488                                 kfree(class);
1489                                 port_priv->version[
1490                                         agent_priv->reg_req->
1491                                         mgmt_class_version].class = NULL;
1492                         }
1493                 }
1494         }
1495
1496 vendor_check:
1497         if (!is_vendor_class(mgmt_class))
1498                 goto out;
1499
1500         /* normalize mgmt_class to vendor range 2 */
1501         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1502         vendor = port_priv->version[
1503                         agent_priv->reg_req->mgmt_class_version].vendor;
1504
1505         if (!vendor)
1506                 goto out;
1507
1508         vendor_class = vendor->vendor_class[mgmt_class];
1509         if (vendor_class) {
1510                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1511                 if (index < 0)
1512                         goto out;
1513                 method = vendor_class->method_table[index];
1514                 if (method) {
1515                         /* Remove any methods for this mad agent */
1516                         remove_methods_mad_agent(method, agent_priv);
1517                         /*
1518                          * Now, check to see if there are
1519                          * any methods still in use
1520                          */
1521                         if (!check_method_table(method)) {
1522                                 /* If not, release management method table */
1523                                 kfree(method);
1524                                 vendor_class->method_table[index] = NULL;
1525                                 memset(vendor_class->oui[index], 0, 3);
1526                                 /* Any OUIs left ? */
1527                                 if (!check_vendor_class(vendor_class)) {
1528                                         /* If not, release vendor class table */
1529                                         kfree(vendor_class);
1530                                         vendor->vendor_class[mgmt_class] = NULL;
1531                                         /* Any other vendor classes left ? */
1532                                         if (!check_vendor_table(vendor)) {
1533                                                 kfree(vendor);
1534                                                 port_priv->version[
1535                                                         agent_priv->reg_req->
1536                                                         mgmt_class_version].
1537                                                         vendor = NULL;
1538                                         }
1539                                 }
1540                         }
1541                 }
1542         }
1543
1544 out:
1545         return;
1546 }
1547
1548 static struct ib_mad_agent_private *
1549 find_mad_agent(struct ib_mad_port_private *port_priv,
1550                struct ib_mad *mad)
1551 {
1552         struct ib_mad_agent_private *mad_agent = NULL;
1553         unsigned long flags;
1554
1555         spin_lock_irqsave(&port_priv->reg_lock, flags);
1556         if (ib_response_mad(mad)) {
1557                 u32 hi_tid;
1558                 struct ib_mad_agent_private *entry;
1559
1560                 /*
1561                  * Routing is based on high 32 bits of transaction ID
1562                  * of MAD.
1563                  */
1564                 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1565                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1566                         if (entry->agent.hi_tid == hi_tid) {
1567                                 mad_agent = entry;
1568                                 break;
1569                         }
1570                 }
1571         } else {
1572                 struct ib_mad_mgmt_class_table *class;
1573                 struct ib_mad_mgmt_method_table *method;
1574                 struct ib_mad_mgmt_vendor_class_table *vendor;
1575                 struct ib_mad_mgmt_vendor_class *vendor_class;
1576                 struct ib_vendor_mad *vendor_mad;
1577                 int index;
1578
1579                 /*
1580                  * Routing is based on version, class, and method
1581                  * For "newer" vendor MADs, also based on OUI
1582                  */
1583                 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1584                         goto out;
1585                 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1586                         class = port_priv->version[
1587                                         mad->mad_hdr.class_version].class;
1588                         if (!class)
1589                                 goto out;
1590                         method = class->method_table[convert_mgmt_class(
1591                                                         mad->mad_hdr.mgmt_class)];
1592                         if (method)
1593                                 mad_agent = method->agent[mad->mad_hdr.method &
1594                                                           ~IB_MGMT_METHOD_RESP];
1595                 } else {
1596                         vendor = port_priv->version[
1597                                         mad->mad_hdr.class_version].vendor;
1598                         if (!vendor)
1599                                 goto out;
1600                         vendor_class = vendor->vendor_class[vendor_class_index(
1601                                                 mad->mad_hdr.mgmt_class)];
1602                         if (!vendor_class)
1603                                 goto out;
1604                         /* Find matching OUI */
1605                         vendor_mad = (struct ib_vendor_mad *)mad;
1606                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1607                         if (index == -1)
1608                                 goto out;
1609                         method = vendor_class->method_table[index];
1610                         if (method) {
1611                                 mad_agent = method->agent[mad->mad_hdr.method &
1612                                                           ~IB_MGMT_METHOD_RESP];
1613                         }
1614                 }
1615         }
1616
1617         if (mad_agent) {
1618                 if (mad_agent->agent.recv_handler)
1619                         atomic_inc(&mad_agent->refcount);
1620                 else {
1621                         printk(KERN_NOTICE PFX "No receive handler for client "
1622                                "%p on port %d\n",
1623                                &mad_agent->agent, port_priv->port_num);
1624                         mad_agent = NULL;
1625                 }
1626         }
1627 out:
1628         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1629
1630         return mad_agent;
1631 }
1632
1633 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1634 {
1635         int valid = 0;
1636
1637         /* Make sure MAD base version is understood */
1638         if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1639                 printk(KERN_ERR PFX "MAD received with unsupported base "
1640                        "version %d\n", mad->mad_hdr.base_version);
1641                 goto out;
1642         }
1643
1644         /* Filter SMI packets sent to other than QP0 */
1645         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1646             (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1647                 if (qp_num == 0)
1648                         valid = 1;
1649         } else {
1650                 /* Filter GSI packets sent to QP0 */
1651                 if (qp_num != 0)
1652                         valid = 1;
1653         }
1654
1655 out:
1656         return valid;
1657 }
1658
1659 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1660                        struct ib_mad_hdr *mad_hdr)
1661 {
1662         struct ib_rmpp_mad *rmpp_mad;
1663
1664         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1665         return !mad_agent_priv->agent.rmpp_version ||
1666                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1667                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1668                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1669 }
1670
1671 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1672                                      struct ib_mad_recv_wc *rwc)
1673 {
1674         return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1675                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1676 }
1677
1678 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1679                                    struct ib_mad_send_wr_private *wr,
1680                                    struct ib_mad_recv_wc *rwc )
1681 {
1682         struct ib_ah_attr attr;
1683         u8 send_resp, rcv_resp;
1684         union ib_gid sgid;
1685         struct ib_device *device = mad_agent_priv->agent.device;
1686         u8 port_num = mad_agent_priv->agent.port_num;
1687         u8 lmc;
1688
1689         send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1690                      mad_hdr.method & IB_MGMT_METHOD_RESP;
1691         rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1692
1693         if (send_resp == rcv_resp)
1694                 /* both requests, or both responses. GIDs different */
1695                 return 0;
1696
1697         if (ib_query_ah(wr->send_buf.ah, &attr))
1698                 /* Assume not equal, to avoid false positives. */
1699                 return 0;
1700
1701         if (!!(attr.ah_flags & IB_AH_GRH) !=
1702             !!(rwc->wc->wc_flags & IB_WC_GRH))
1703                 /* one has GID, other does not.  Assume different */
1704                 return 0;
1705
1706         if (!send_resp && rcv_resp) {
1707                 /* is request/response. */
1708                 if (!(attr.ah_flags & IB_AH_GRH)) {
1709                         if (ib_get_cached_lmc(device, port_num, &lmc))
1710                                 return 0;
1711                         return (!lmc || !((attr.src_path_bits ^
1712                                            rwc->wc->dlid_path_bits) &
1713                                           ((1 << lmc) - 1)));
1714                 } else {
1715                         if (ib_get_cached_gid(device, port_num,
1716                                               attr.grh.sgid_index, &sgid))
1717                                 return 0;
1718                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1719                                        16);
1720                 }
1721         }
1722
1723         if (!(attr.ah_flags & IB_AH_GRH))
1724                 return attr.dlid == rwc->wc->slid;
1725         else
1726                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1727                                16);
1728 }
1729
1730 static inline int is_direct(u8 class)
1731 {
1732         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1733 }
1734
1735 struct ib_mad_send_wr_private*
1736 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1737                  struct ib_mad_recv_wc *wc)
1738 {
1739         struct ib_mad_send_wr_private *wr;
1740         struct ib_mad *mad;
1741
1742         mad = (struct ib_mad *)wc->recv_buf.mad;
1743
1744         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1745                 if ((wr->tid == mad->mad_hdr.tid) &&
1746                     rcv_has_same_class(wr, wc) &&
1747                     /*
1748                      * Don't check GID for direct routed MADs.
1749                      * These might have permissive LIDs.
1750                      */
1751                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1752                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1753                         return wr;
1754         }
1755
1756         /*
1757          * It's possible to receive the response before we've
1758          * been notified that the send has completed
1759          */
1760         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1761                 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1762                     wr->tid == mad->mad_hdr.tid &&
1763                     wr->timeout &&
1764                     rcv_has_same_class(wr, wc) &&
1765                     /*
1766                      * Don't check GID for direct routed MADs.
1767                      * These might have permissive LIDs.
1768                      */
1769                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1770                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1771                         /* Verify request has not been canceled */
1772                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1773         }
1774         return NULL;
1775 }
1776
1777 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1778 {
1779         mad_send_wr->timeout = 0;
1780         if (mad_send_wr->refcount == 1)
1781                 list_move_tail(&mad_send_wr->agent_list,
1782                               &mad_send_wr->mad_agent_priv->done_list);
1783 }
1784
1785 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1786                                  struct ib_mad_recv_wc *mad_recv_wc)
1787 {
1788         struct ib_mad_send_wr_private *mad_send_wr;
1789         struct ib_mad_send_wc mad_send_wc;
1790         unsigned long flags;
1791
1792         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1793         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1794         if (mad_agent_priv->agent.rmpp_version) {
1795                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1796                                                       mad_recv_wc);
1797                 if (!mad_recv_wc) {
1798                         deref_mad_agent(mad_agent_priv);
1799                         return;
1800                 }
1801         }
1802
1803         /* Complete corresponding request */
1804         if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1805                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1806                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1807                 if (!mad_send_wr) {
1808                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1809                         ib_free_recv_mad(mad_recv_wc);
1810                         deref_mad_agent(mad_agent_priv);
1811                         return;
1812                 }
1813                 ib_mark_mad_done(mad_send_wr);
1814                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1815
1816                 /* Defined behavior is to complete response before request */
1817                 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1818                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1819                                                    mad_recv_wc);
1820                 atomic_dec(&mad_agent_priv->refcount);
1821
1822                 mad_send_wc.status = IB_WC_SUCCESS;
1823                 mad_send_wc.vendor_err = 0;
1824                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1825                 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1826         } else {
1827                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1828                                                    mad_recv_wc);
1829                 deref_mad_agent(mad_agent_priv);
1830         }
1831 }
1832
1833 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1834                                      struct ib_wc *wc)
1835 {
1836         struct ib_mad_qp_info *qp_info;
1837         struct ib_mad_private_header *mad_priv_hdr;
1838         struct ib_mad_private *recv, *response;
1839         struct ib_mad_list_head *mad_list;
1840         struct ib_mad_agent_private *mad_agent;
1841
1842         response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1843         if (!response)
1844                 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1845                        "for response buffer\n");
1846
1847         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1848         qp_info = mad_list->mad_queue->qp_info;
1849         dequeue_mad(mad_list);
1850
1851         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1852                                     mad_list);
1853         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1854         dma_unmap_single(port_priv->device->dma_device,
1855                          pci_unmap_addr(&recv->header, mapping),
1856                          sizeof(struct ib_mad_private) -
1857                          sizeof(struct ib_mad_private_header),
1858                          DMA_FROM_DEVICE);
1859
1860         /* Setup MAD receive work completion from "normal" work completion */
1861         recv->header.wc = *wc;
1862         recv->header.recv_wc.wc = &recv->header.wc;
1863         recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1864         recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1865         recv->header.recv_wc.recv_buf.grh = &recv->grh;
1866
1867         if (atomic_read(&qp_info->snoop_count))
1868                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1869
1870         /* Validate MAD */
1871         if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1872                 goto out;
1873
1874         if (recv->mad.mad.mad_hdr.mgmt_class ==
1875             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1876                 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1877                                             port_priv->device->node_type,
1878                                             port_priv->port_num,
1879                                             port_priv->device->phys_port_cnt))
1880                         goto out;
1881                 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1882                         goto local;
1883                 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1884                                             port_priv->device->node_type,
1885                                             port_priv->port_num))
1886                         goto out;
1887                 if (!smi_check_local_smp(&recv->mad.smp, port_priv->device))
1888                         goto out;
1889         }
1890
1891 local:
1892         /* Give driver "right of first refusal" on incoming MAD */
1893         if (port_priv->device->process_mad) {
1894                 int ret;
1895
1896                 if (!response) {
1897                         printk(KERN_ERR PFX "No memory for response MAD\n");
1898                         /*
1899                          * Is it better to assume that
1900                          * it wouldn't be processed ?
1901                          */
1902                         goto out;
1903                 }
1904
1905                 ret = port_priv->device->process_mad(port_priv->device, 0,
1906                                                      port_priv->port_num,
1907                                                      wc, &recv->grh,
1908                                                      &recv->mad.mad,
1909                                                      &response->mad.mad);
1910                 if (ret & IB_MAD_RESULT_SUCCESS) {
1911                         if (ret & IB_MAD_RESULT_CONSUMED)
1912                                 goto out;
1913                         if (ret & IB_MAD_RESULT_REPLY) {
1914                                 agent_send_response(&response->mad.mad,
1915                                                     &recv->grh, wc,
1916                                                     port_priv->device,
1917                                                     port_priv->port_num,
1918                                                     qp_info->qp->qp_num);
1919                                 goto out;
1920                         }
1921                 }
1922         }
1923
1924         mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1925         if (mad_agent) {
1926                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1927                 /*
1928                  * recv is freed up in error cases in ib_mad_complete_recv
1929                  * or via recv_handler in ib_mad_complete_recv()
1930                  */
1931                 recv = NULL;
1932         }
1933
1934 out:
1935         /* Post another receive request for this QP */
1936         if (response) {
1937                 ib_mad_post_receive_mads(qp_info, response);
1938                 if (recv)
1939                         kmem_cache_free(ib_mad_cache, recv);
1940         } else
1941                 ib_mad_post_receive_mads(qp_info, recv);
1942 }
1943
1944 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1945 {
1946         struct ib_mad_send_wr_private *mad_send_wr;
1947         unsigned long delay;
1948
1949         if (list_empty(&mad_agent_priv->wait_list)) {
1950                 cancel_delayed_work(&mad_agent_priv->timed_work);
1951         } else {
1952                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1953                                          struct ib_mad_send_wr_private,
1954                                          agent_list);
1955
1956                 if (time_after(mad_agent_priv->timeout,
1957                                mad_send_wr->timeout)) {
1958                         mad_agent_priv->timeout = mad_send_wr->timeout;
1959                         cancel_delayed_work(&mad_agent_priv->timed_work);
1960                         delay = mad_send_wr->timeout - jiffies;
1961                         if ((long)delay <= 0)
1962                                 delay = 1;
1963                         queue_delayed_work(mad_agent_priv->qp_info->
1964                                            port_priv->wq,
1965                                            &mad_agent_priv->timed_work, delay);
1966                 }
1967         }
1968 }
1969
1970 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1971 {
1972         struct ib_mad_agent_private *mad_agent_priv;
1973         struct ib_mad_send_wr_private *temp_mad_send_wr;
1974         struct list_head *list_item;
1975         unsigned long delay;
1976
1977         mad_agent_priv = mad_send_wr->mad_agent_priv;
1978         list_del(&mad_send_wr->agent_list);
1979
1980         delay = mad_send_wr->timeout;
1981         mad_send_wr->timeout += jiffies;
1982
1983         if (delay) {
1984                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1985                         temp_mad_send_wr = list_entry(list_item,
1986                                                 struct ib_mad_send_wr_private,
1987                                                 agent_list);
1988                         if (time_after(mad_send_wr->timeout,
1989                                        temp_mad_send_wr->timeout))
1990                                 break;
1991                 }
1992         }
1993         else
1994                 list_item = &mad_agent_priv->wait_list;
1995         list_add(&mad_send_wr->agent_list, list_item);
1996
1997         /* Reschedule a work item if we have a shorter timeout */
1998         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1999                 cancel_delayed_work(&mad_agent_priv->timed_work);
2000                 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2001                                    &mad_agent_priv->timed_work, delay);
2002         }
2003 }
2004
2005 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2006                           int timeout_ms)
2007 {
2008         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2009         wait_for_response(mad_send_wr);
2010 }
2011
2012 /*
2013  * Process a send work completion
2014  */
2015 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2016                              struct ib_mad_send_wc *mad_send_wc)
2017 {
2018         struct ib_mad_agent_private     *mad_agent_priv;
2019         unsigned long                   flags;
2020         int                             ret;
2021
2022         mad_agent_priv = mad_send_wr->mad_agent_priv;
2023         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2024         if (mad_agent_priv->agent.rmpp_version) {
2025                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2026                 if (ret == IB_RMPP_RESULT_CONSUMED)
2027                         goto done;
2028         } else
2029                 ret = IB_RMPP_RESULT_UNHANDLED;
2030
2031         if (mad_send_wc->status != IB_WC_SUCCESS &&
2032             mad_send_wr->status == IB_WC_SUCCESS) {
2033                 mad_send_wr->status = mad_send_wc->status;
2034                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2035         }
2036
2037         if (--mad_send_wr->refcount > 0) {
2038                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2039                     mad_send_wr->status == IB_WC_SUCCESS) {
2040                         wait_for_response(mad_send_wr);
2041                 }
2042                 goto done;
2043         }
2044
2045         /* Remove send from MAD agent and notify client of completion */
2046         list_del(&mad_send_wr->agent_list);
2047         adjust_timeout(mad_agent_priv);
2048         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2049
2050         if (mad_send_wr->status != IB_WC_SUCCESS )
2051                 mad_send_wc->status = mad_send_wr->status;
2052         if (ret == IB_RMPP_RESULT_INTERNAL)
2053                 ib_rmpp_send_handler(mad_send_wc);
2054         else
2055                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2056                                                    mad_send_wc);
2057
2058         /* Release reference on agent taken when sending */
2059         deref_mad_agent(mad_agent_priv);
2060         return;
2061 done:
2062         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2063 }
2064
2065 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2066                                      struct ib_wc *wc)
2067 {
2068         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2069         struct ib_mad_list_head         *mad_list;
2070         struct ib_mad_qp_info           *qp_info;
2071         struct ib_mad_queue             *send_queue;
2072         struct ib_send_wr               *bad_send_wr;
2073         struct ib_mad_send_wc           mad_send_wc;
2074         unsigned long flags;
2075         int ret;
2076
2077         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2078         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2079                                    mad_list);
2080         send_queue = mad_list->mad_queue;
2081         qp_info = send_queue->qp_info;
2082
2083 retry:
2084         dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
2085                          pci_unmap_addr(mad_send_wr, header_mapping),
2086                          mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2087         dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
2088                          pci_unmap_addr(mad_send_wr, payload_mapping),
2089                          mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2090         queued_send_wr = NULL;
2091         spin_lock_irqsave(&send_queue->lock, flags);
2092         list_del(&mad_list->list);
2093
2094         /* Move queued send to the send queue */
2095         if (send_queue->count-- > send_queue->max_active) {
2096                 mad_list = container_of(qp_info->overflow_list.next,
2097                                         struct ib_mad_list_head, list);
2098                 queued_send_wr = container_of(mad_list,
2099                                         struct ib_mad_send_wr_private,
2100                                         mad_list);
2101                 list_move_tail(&mad_list->list, &send_queue->list);
2102         }
2103         spin_unlock_irqrestore(&send_queue->lock, flags);
2104
2105         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2106         mad_send_wc.status = wc->status;
2107         mad_send_wc.vendor_err = wc->vendor_err;
2108         if (atomic_read(&qp_info->snoop_count))
2109                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2110                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2111         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2112
2113         if (queued_send_wr) {
2114                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2115                                    &bad_send_wr);
2116                 if (ret) {
2117                         printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2118                         mad_send_wr = queued_send_wr;
2119                         wc->status = IB_WC_LOC_QP_OP_ERR;
2120                         goto retry;
2121                 }
2122         }
2123 }
2124
2125 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2126 {
2127         struct ib_mad_send_wr_private *mad_send_wr;
2128         struct ib_mad_list_head *mad_list;
2129         unsigned long flags;
2130
2131         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2132         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2133                 mad_send_wr = container_of(mad_list,
2134                                            struct ib_mad_send_wr_private,
2135                                            mad_list);
2136                 mad_send_wr->retry = 1;
2137         }
2138         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2139 }
2140
2141 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2142                               struct ib_wc *wc)
2143 {
2144         struct ib_mad_list_head *mad_list;
2145         struct ib_mad_qp_info *qp_info;
2146         struct ib_mad_send_wr_private *mad_send_wr;
2147         int ret;
2148
2149         /* Determine if failure was a send or receive */
2150         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2151         qp_info = mad_list->mad_queue->qp_info;
2152         if (mad_list->mad_queue == &qp_info->recv_queue)
2153                 /*
2154                  * Receive errors indicate that the QP has entered the error
2155                  * state - error handling/shutdown code will cleanup
2156                  */
2157                 return;
2158
2159         /*
2160          * Send errors will transition the QP to SQE - move
2161          * QP to RTS and repost flushed work requests
2162          */
2163         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2164                                    mad_list);
2165         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2166                 if (mad_send_wr->retry) {
2167                         /* Repost send */
2168                         struct ib_send_wr *bad_send_wr;
2169
2170                         mad_send_wr->retry = 0;
2171                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2172                                         &bad_send_wr);
2173                         if (ret)
2174                                 ib_mad_send_done_handler(port_priv, wc);
2175                 } else
2176                         ib_mad_send_done_handler(port_priv, wc);
2177         } else {
2178                 struct ib_qp_attr *attr;
2179
2180                 /* Transition QP to RTS and fail offending send */
2181                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2182                 if (attr) {
2183                         attr->qp_state = IB_QPS_RTS;
2184                         attr->cur_qp_state = IB_QPS_SQE;
2185                         ret = ib_modify_qp(qp_info->qp, attr,
2186                                            IB_QP_STATE | IB_QP_CUR_STATE);
2187                         kfree(attr);
2188                         if (ret)
2189                                 printk(KERN_ERR PFX "mad_error_handler - "
2190                                        "ib_modify_qp to RTS : %d\n", ret);
2191                         else
2192                                 mark_sends_for_retry(qp_info);
2193                 }
2194                 ib_mad_send_done_handler(port_priv, wc);
2195         }
2196 }
2197
2198 /*
2199  * IB MAD completion callback
2200  */
2201 static void ib_mad_completion_handler(void *data)
2202 {
2203         struct ib_mad_port_private *port_priv;
2204         struct ib_wc wc;
2205
2206         port_priv = (struct ib_mad_port_private *)data;
2207         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2208
2209         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2210                 if (wc.status == IB_WC_SUCCESS) {
2211                         switch (wc.opcode) {
2212                         case IB_WC_SEND:
2213                                 ib_mad_send_done_handler(port_priv, &wc);
2214                                 break;
2215                         case IB_WC_RECV:
2216                                 ib_mad_recv_done_handler(port_priv, &wc);
2217                                 break;
2218                         default:
2219                                 BUG_ON(1);
2220                                 break;
2221                         }
2222                 } else
2223                         mad_error_handler(port_priv, &wc);
2224         }
2225 }
2226
2227 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2228 {
2229         unsigned long flags;
2230         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2231         struct ib_mad_send_wc mad_send_wc;
2232         struct list_head cancel_list;
2233
2234         INIT_LIST_HEAD(&cancel_list);
2235
2236         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2237         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2238                                  &mad_agent_priv->send_list, agent_list) {
2239                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2240                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2241                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2242                 }
2243         }
2244
2245         /* Empty wait list to prevent receives from finding a request */
2246         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2247         /* Empty local completion list as well */
2248         list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2249         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2250
2251         /* Report all cancelled requests */
2252         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2253         mad_send_wc.vendor_err = 0;
2254
2255         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2256                                  &cancel_list, agent_list) {
2257                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2258                 list_del(&mad_send_wr->agent_list);
2259                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2260                                                    &mad_send_wc);
2261                 atomic_dec(&mad_agent_priv->refcount);
2262         }
2263 }
2264
2265 static struct ib_mad_send_wr_private*
2266 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2267              struct ib_mad_send_buf *send_buf)
2268 {
2269         struct ib_mad_send_wr_private *mad_send_wr;
2270
2271         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2272                             agent_list) {
2273                 if (&mad_send_wr->send_buf == send_buf)
2274                         return mad_send_wr;
2275         }
2276
2277         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2278                             agent_list) {
2279                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2280                     &mad_send_wr->send_buf == send_buf)
2281                         return mad_send_wr;
2282         }
2283         return NULL;
2284 }
2285
2286 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2287                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2288 {
2289         struct ib_mad_agent_private *mad_agent_priv;
2290         struct ib_mad_send_wr_private *mad_send_wr;
2291         unsigned long flags;
2292         int active;
2293
2294         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2295                                       agent);
2296         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2297         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2298         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2299                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2300                 return -EINVAL;
2301         }
2302
2303         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2304         if (!timeout_ms) {
2305                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2306                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2307         }
2308
2309         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2310         if (active)
2311                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2312         else
2313                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2314
2315         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2316         return 0;
2317 }
2318 EXPORT_SYMBOL(ib_modify_mad);
2319
2320 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2321                    struct ib_mad_send_buf *send_buf)
2322 {
2323         ib_modify_mad(mad_agent, send_buf, 0);
2324 }
2325 EXPORT_SYMBOL(ib_cancel_mad);
2326
2327 static void local_completions(void *data)
2328 {
2329         struct ib_mad_agent_private *mad_agent_priv;
2330         struct ib_mad_local_private *local;
2331         struct ib_mad_agent_private *recv_mad_agent;
2332         unsigned long flags;
2333         int recv = 0;
2334         struct ib_wc wc;
2335         struct ib_mad_send_wc mad_send_wc;
2336
2337         mad_agent_priv = (struct ib_mad_agent_private *)data;
2338
2339         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2340         while (!list_empty(&mad_agent_priv->local_list)) {
2341                 local = list_entry(mad_agent_priv->local_list.next,
2342                                    struct ib_mad_local_private,
2343                                    completion_list);
2344                 list_del(&local->completion_list);
2345                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2346                 if (local->mad_priv) {
2347                         recv_mad_agent = local->recv_mad_agent;
2348                         if (!recv_mad_agent) {
2349                                 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2350                                 goto local_send_completion;
2351                         }
2352
2353                         recv = 1;
2354                         /*
2355                          * Defined behavior is to complete response
2356                          * before request
2357                          */
2358                         build_smp_wc((unsigned long) local->mad_send_wr,
2359                                      be16_to_cpu(IB_LID_PERMISSIVE),
2360                                      0, recv_mad_agent->agent.port_num, &wc);
2361
2362                         local->mad_priv->header.recv_wc.wc = &wc;
2363                         local->mad_priv->header.recv_wc.mad_len =
2364                                                 sizeof(struct ib_mad);
2365                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2366                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2367                                  &local->mad_priv->header.recv_wc.rmpp_list);
2368                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2369                         local->mad_priv->header.recv_wc.recv_buf.mad =
2370                                                 &local->mad_priv->mad.mad;
2371                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2372                                 snoop_recv(recv_mad_agent->qp_info,
2373                                           &local->mad_priv->header.recv_wc,
2374                                            IB_MAD_SNOOP_RECVS);
2375                         recv_mad_agent->agent.recv_handler(
2376                                                 &recv_mad_agent->agent,
2377                                                 &local->mad_priv->header.recv_wc);
2378                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2379                         atomic_dec(&recv_mad_agent->refcount);
2380                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2381                 }
2382
2383 local_send_completion:
2384                 /* Complete send */
2385                 mad_send_wc.status = IB_WC_SUCCESS;
2386                 mad_send_wc.vendor_err = 0;
2387                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2388                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2389                         snoop_send(mad_agent_priv->qp_info,
2390                                    &local->mad_send_wr->send_buf,
2391                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2392                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2393                                                    &mad_send_wc);
2394
2395                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2396                 atomic_dec(&mad_agent_priv->refcount);
2397                 if (!recv)
2398                         kmem_cache_free(ib_mad_cache, local->mad_priv);
2399                 kfree(local);
2400         }
2401         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2402 }
2403
2404 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2405 {
2406         int ret;
2407
2408         if (!mad_send_wr->retries--)
2409                 return -ETIMEDOUT;
2410
2411         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2412
2413         if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2414                 ret = ib_retry_rmpp(mad_send_wr);
2415                 switch (ret) {
2416                 case IB_RMPP_RESULT_UNHANDLED:
2417                         ret = ib_send_mad(mad_send_wr);
2418                         break;
2419                 case IB_RMPP_RESULT_CONSUMED:
2420                         ret = 0;
2421                         break;
2422                 default:
2423                         ret = -ECOMM;
2424                         break;
2425                 }
2426         } else
2427                 ret = ib_send_mad(mad_send_wr);
2428
2429         if (!ret) {
2430                 mad_send_wr->refcount++;
2431                 list_add_tail(&mad_send_wr->agent_list,
2432                               &mad_send_wr->mad_agent_priv->send_list);
2433         }
2434         return ret;
2435 }
2436
2437 static void timeout_sends(void *data)
2438 {
2439         struct ib_mad_agent_private *mad_agent_priv;
2440         struct ib_mad_send_wr_private *mad_send_wr;
2441         struct ib_mad_send_wc mad_send_wc;
2442         unsigned long flags, delay;
2443
2444         mad_agent_priv = (struct ib_mad_agent_private *)data;
2445         mad_send_wc.vendor_err = 0;
2446
2447         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2448         while (!list_empty(&mad_agent_priv->wait_list)) {
2449                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2450                                          struct ib_mad_send_wr_private,
2451                                          agent_list);
2452
2453                 if (time_after(mad_send_wr->timeout, jiffies)) {
2454                         delay = mad_send_wr->timeout - jiffies;
2455                         if ((long)delay <= 0)
2456                                 delay = 1;
2457                         queue_delayed_work(mad_agent_priv->qp_info->
2458                                            port_priv->wq,
2459                                            &mad_agent_priv->timed_work, delay);
2460                         break;
2461                 }
2462
2463                 list_del(&mad_send_wr->agent_list);
2464                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2465                     !retry_send(mad_send_wr))
2466                         continue;
2467
2468                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2469
2470                 if (mad_send_wr->status == IB_WC_SUCCESS)
2471                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2472                 else
2473                         mad_send_wc.status = mad_send_wr->status;
2474                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2475                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2476                                                    &mad_send_wc);
2477
2478                 atomic_dec(&mad_agent_priv->refcount);
2479                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2480         }
2481         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2482 }
2483
2484 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2485 {
2486         struct ib_mad_port_private *port_priv = cq->cq_context;
2487         unsigned long flags;
2488
2489         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2490         if (!list_empty(&port_priv->port_list))
2491                 queue_work(port_priv->wq, &port_priv->work);
2492         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2493 }
2494
2495 /*
2496  * Allocate receive MADs and post receive WRs for them
2497  */
2498 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2499                                     struct ib_mad_private *mad)
2500 {
2501         unsigned long flags;
2502         int post, ret;
2503         struct ib_mad_private *mad_priv;
2504         struct ib_sge sg_list;
2505         struct ib_recv_wr recv_wr, *bad_recv_wr;
2506         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2507
2508         /* Initialize common scatter list fields */
2509         sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2510         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2511
2512         /* Initialize common receive WR fields */
2513         recv_wr.next = NULL;
2514         recv_wr.sg_list = &sg_list;
2515         recv_wr.num_sge = 1;
2516
2517         do {
2518                 /* Allocate and map receive buffer */
2519                 if (mad) {
2520                         mad_priv = mad;
2521                         mad = NULL;
2522                 } else {
2523                         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2524                         if (!mad_priv) {
2525                                 printk(KERN_ERR PFX "No memory for receive buffer\n");
2526                                 ret = -ENOMEM;
2527                                 break;
2528                         }
2529                 }
2530                 sg_list.addr = dma_map_single(qp_info->port_priv->
2531                                                 device->dma_device,
2532                                               &mad_priv->grh,
2533                                               sizeof *mad_priv -
2534                                                 sizeof mad_priv->header,
2535                                               DMA_FROM_DEVICE);
2536                 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2537                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2538                 mad_priv->header.mad_list.mad_queue = recv_queue;
2539
2540                 /* Post receive WR */
2541                 spin_lock_irqsave(&recv_queue->lock, flags);
2542                 post = (++recv_queue->count < recv_queue->max_active);
2543                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2544                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2545                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2546                 if (ret) {
2547                         spin_lock_irqsave(&recv_queue->lock, flags);
2548                         list_del(&mad_priv->header.mad_list.list);
2549                         recv_queue->count--;
2550                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2551                         dma_unmap_single(qp_info->port_priv->device->dma_device,
2552                                          pci_unmap_addr(&mad_priv->header,
2553                                                         mapping),
2554                                          sizeof *mad_priv -
2555                                            sizeof mad_priv->header,
2556                                          DMA_FROM_DEVICE);
2557                         kmem_cache_free(ib_mad_cache, mad_priv);
2558                         printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2559                         break;
2560                 }
2561         } while (post);
2562
2563         return ret;
2564 }
2565
2566 /*
2567  * Return all the posted receive MADs
2568  */
2569 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2570 {
2571         struct ib_mad_private_header *mad_priv_hdr;
2572         struct ib_mad_private *recv;
2573         struct ib_mad_list_head *mad_list;
2574
2575         while (!list_empty(&qp_info->recv_queue.list)) {
2576
2577                 mad_list = list_entry(qp_info->recv_queue.list.next,
2578                                       struct ib_mad_list_head, list);
2579                 mad_priv_hdr = container_of(mad_list,
2580                                             struct ib_mad_private_header,
2581                                             mad_list);
2582                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2583                                     header);
2584
2585                 /* Remove from posted receive MAD list */
2586                 list_del(&mad_list->list);
2587
2588                 dma_unmap_single(qp_info->port_priv->device->dma_device,
2589                                  pci_unmap_addr(&recv->header, mapping),
2590                                  sizeof(struct ib_mad_private) -
2591                                  sizeof(struct ib_mad_private_header),
2592                                  DMA_FROM_DEVICE);
2593                 kmem_cache_free(ib_mad_cache, recv);
2594         }
2595
2596         qp_info->recv_queue.count = 0;
2597 }
2598
2599 /*
2600  * Start the port
2601  */
2602 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2603 {
2604         int ret, i;
2605         struct ib_qp_attr *attr;
2606         struct ib_qp *qp;
2607
2608         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2609         if (!attr) {
2610                 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2611                 return -ENOMEM;
2612         }
2613
2614         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2615                 qp = port_priv->qp_info[i].qp;
2616                 /*
2617                  * PKey index for QP1 is irrelevant but
2618                  * one is needed for the Reset to Init transition
2619                  */
2620                 attr->qp_state = IB_QPS_INIT;
2621                 attr->pkey_index = 0;
2622                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2623                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2624                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2625                 if (ret) {
2626                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2627                                "INIT: %d\n", i, ret);
2628                         goto out;
2629                 }
2630
2631                 attr->qp_state = IB_QPS_RTR;
2632                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2633                 if (ret) {
2634                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2635                                "RTR: %d\n", i, ret);
2636                         goto out;
2637                 }
2638
2639                 attr->qp_state = IB_QPS_RTS;
2640                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2641                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2642                 if (ret) {
2643                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2644                                "RTS: %d\n", i, ret);
2645                         goto out;
2646                 }
2647         }
2648
2649         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2650         if (ret) {
2651                 printk(KERN_ERR PFX "Failed to request completion "
2652                        "notification: %d\n", ret);
2653                 goto out;
2654         }
2655
2656         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2657                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2658                 if (ret) {
2659                         printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2660                         goto out;
2661                 }
2662         }
2663 out:
2664         kfree(attr);
2665         return ret;
2666 }
2667
2668 static void qp_event_handler(struct ib_event *event, void *qp_context)
2669 {
2670         struct ib_mad_qp_info   *qp_info = qp_context;
2671
2672         /* It's worse than that! He's dead, Jim! */
2673         printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2674                 event->event, qp_info->qp->qp_num);
2675 }
2676
2677 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2678                            struct ib_mad_queue *mad_queue)
2679 {
2680         mad_queue->qp_info = qp_info;
2681         mad_queue->count = 0;
2682         spin_lock_init(&mad_queue->lock);
2683         INIT_LIST_HEAD(&mad_queue->list);
2684 }
2685
2686 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2687                         struct ib_mad_qp_info *qp_info)
2688 {
2689         qp_info->port_priv = port_priv;
2690         init_mad_queue(qp_info, &qp_info->send_queue);
2691         init_mad_queue(qp_info, &qp_info->recv_queue);
2692         INIT_LIST_HEAD(&qp_info->overflow_list);
2693         spin_lock_init(&qp_info->snoop_lock);
2694         qp_info->snoop_table = NULL;
2695         qp_info->snoop_table_size = 0;
2696         atomic_set(&qp_info->snoop_count, 0);
2697 }
2698
2699 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2700                          enum ib_qp_type qp_type)
2701 {
2702         struct ib_qp_init_attr  qp_init_attr;
2703         int ret;
2704
2705         memset(&qp_init_attr, 0, sizeof qp_init_attr);
2706         qp_init_attr.send_cq = qp_info->port_priv->cq;
2707         qp_init_attr.recv_cq = qp_info->port_priv->cq;
2708         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2709         qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2710         qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2711         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2712         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2713         qp_init_attr.qp_type = qp_type;
2714         qp_init_attr.port_num = qp_info->port_priv->port_num;
2715         qp_init_attr.qp_context = qp_info;
2716         qp_init_attr.event_handler = qp_event_handler;
2717         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2718         if (IS_ERR(qp_info->qp)) {
2719                 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2720                        get_spl_qp_index(qp_type));
2721                 ret = PTR_ERR(qp_info->qp);
2722                 goto error;
2723         }
2724         /* Use minimum queue sizes unless the CQ is resized */
2725         qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2726         qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2727         return 0;
2728
2729 error:
2730         return ret;
2731 }
2732
2733 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2734 {
2735         ib_destroy_qp(qp_info->qp);
2736         kfree(qp_info->snoop_table);
2737 }
2738
2739 /*
2740  * Open the port
2741  * Create the QP, PD, MR, and CQ if needed
2742  */
2743 static int ib_mad_port_open(struct ib_device *device,
2744                             int port_num)
2745 {
2746         int ret, cq_size;
2747         struct ib_mad_port_private *port_priv;
2748         unsigned long flags;
2749         char name[sizeof "ib_mad123"];
2750
2751         /* Create new device info */
2752         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2753         if (!port_priv) {
2754                 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2755                 return -ENOMEM;
2756         }
2757
2758         port_priv->device = device;
2759         port_priv->port_num = port_num;
2760         spin_lock_init(&port_priv->reg_lock);
2761         INIT_LIST_HEAD(&port_priv->agent_list);
2762         init_mad_qp(port_priv, &port_priv->qp_info[0]);
2763         init_mad_qp(port_priv, &port_priv->qp_info[1]);
2764
2765         cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2766         port_priv->cq = ib_create_cq(port_priv->device,
2767                                      ib_mad_thread_completion_handler,
2768                                      NULL, port_priv, cq_size);
2769         if (IS_ERR(port_priv->cq)) {
2770                 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2771                 ret = PTR_ERR(port_priv->cq);
2772                 goto error3;
2773         }
2774
2775         port_priv->pd = ib_alloc_pd(device);
2776         if (IS_ERR(port_priv->pd)) {
2777                 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2778                 ret = PTR_ERR(port_priv->pd);
2779                 goto error4;
2780         }
2781
2782         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2783         if (IS_ERR(port_priv->mr)) {
2784                 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2785                 ret = PTR_ERR(port_priv->mr);
2786                 goto error5;
2787         }
2788
2789         ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2790         if (ret)
2791                 goto error6;
2792         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2793         if (ret)
2794                 goto error7;
2795
2796         snprintf(name, sizeof name, "ib_mad%d", port_num);
2797         port_priv->wq = create_singlethread_workqueue(name);
2798         if (!port_priv->wq) {
2799                 ret = -ENOMEM;
2800                 goto error8;
2801         }
2802         INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2803
2804         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2805         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2806         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2807
2808         ret = ib_mad_port_start(port_priv);
2809         if (ret) {
2810                 printk(KERN_ERR PFX "Couldn't start port\n");
2811                 goto error9;
2812         }
2813
2814         return 0;
2815
2816 error9:
2817         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2818         list_del_init(&port_priv->port_list);
2819         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2820
2821         destroy_workqueue(port_priv->wq);
2822 error8:
2823         destroy_mad_qp(&port_priv->qp_info[1]);
2824 error7:
2825         destroy_mad_qp(&port_priv->qp_info[0]);
2826 error6:
2827         ib_dereg_mr(port_priv->mr);
2828 error5:
2829         ib_dealloc_pd(port_priv->pd);
2830 error4:
2831         ib_destroy_cq(port_priv->cq);
2832         cleanup_recv_queue(&port_priv->qp_info[1]);
2833         cleanup_recv_queue(&port_priv->qp_info[0]);
2834 error3:
2835         kfree(port_priv);
2836
2837         return ret;
2838 }
2839
2840 /*
2841  * Close the port
2842  * If there are no classes using the port, free the port
2843  * resources (CQ, MR, PD, QP) and remove the port's info structure
2844  */
2845 static int ib_mad_port_close(struct ib_device *device, int port_num)
2846 {
2847         struct ib_mad_port_private *port_priv;
2848         unsigned long flags;
2849
2850         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2851         port_priv = __ib_get_mad_port(device, port_num);
2852         if (port_priv == NULL) {
2853                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2854                 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2855                 return -ENODEV;
2856         }
2857         list_del_init(&port_priv->port_list);
2858         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2859
2860         destroy_workqueue(port_priv->wq);
2861         destroy_mad_qp(&port_priv->qp_info[1]);
2862         destroy_mad_qp(&port_priv->qp_info[0]);
2863         ib_dereg_mr(port_priv->mr);
2864         ib_dealloc_pd(port_priv->pd);
2865         ib_destroy_cq(port_priv->cq);
2866         cleanup_recv_queue(&port_priv->qp_info[1]);
2867         cleanup_recv_queue(&port_priv->qp_info[0]);
2868         /* XXX: Handle deallocation of MAD registration tables */
2869
2870         kfree(port_priv);
2871
2872         return 0;
2873 }
2874
2875 static void ib_mad_init_device(struct ib_device *device)
2876 {
2877         int start, end, i;
2878
2879         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2880                 return;
2881
2882         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2883                 start = 0;
2884                 end   = 0;
2885         } else {
2886                 start = 1;
2887                 end   = device->phys_port_cnt;
2888         }
2889
2890         for (i = start; i <= end; i++) {
2891                 if (ib_mad_port_open(device, i)) {
2892                         printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2893                                device->name, i);
2894                         goto error;
2895                 }
2896                 if (ib_agent_port_open(device, i)) {
2897                         printk(KERN_ERR PFX "Couldn't open %s port %d "
2898                                "for agents\n",
2899                                device->name, i);
2900                         goto error_agent;
2901                 }
2902         }
2903         return;
2904
2905 error_agent:
2906         if (ib_mad_port_close(device, i))
2907                 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2908                        device->name, i);
2909
2910 error:
2911         i--;
2912
2913         while (i >= start) {
2914                 if (ib_agent_port_close(device, i))
2915                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2916                                "for agents\n",
2917                                device->name, i);
2918                 if (ib_mad_port_close(device, i))
2919                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2920                                device->name, i);
2921                 i--;
2922         }
2923 }
2924
2925 static void ib_mad_remove_device(struct ib_device *device)
2926 {
2927         int i, num_ports, cur_port;
2928
2929         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2930                 num_ports = 1;
2931                 cur_port = 0;
2932         } else {
2933                 num_ports = device->phys_port_cnt;
2934                 cur_port = 1;
2935         }
2936         for (i = 0; i < num_ports; i++, cur_port++) {
2937                 if (ib_agent_port_close(device, cur_port))
2938                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2939                                "for agents\n",
2940                                device->name, cur_port);
2941                 if (ib_mad_port_close(device, cur_port))
2942                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2943                                device->name, cur_port);
2944         }
2945 }
2946
2947 static struct ib_client mad_client = {
2948         .name   = "mad",
2949         .add = ib_mad_init_device,
2950         .remove = ib_mad_remove_device
2951 };
2952
2953 static int __init ib_mad_init_module(void)
2954 {
2955         int ret;
2956
2957         spin_lock_init(&ib_mad_port_list_lock);
2958
2959         ib_mad_cache = kmem_cache_create("ib_mad",
2960                                          sizeof(struct ib_mad_private),
2961                                          0,
2962                                          SLAB_HWCACHE_ALIGN,
2963                                          NULL,
2964                                          NULL);
2965         if (!ib_mad_cache) {
2966                 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2967                 ret = -ENOMEM;
2968                 goto error1;
2969         }
2970
2971         INIT_LIST_HEAD(&ib_mad_port_list);
2972
2973         if (ib_register_client(&mad_client)) {
2974                 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2975                 ret = -EINVAL;
2976                 goto error2;
2977         }
2978
2979         return 0;
2980
2981 error2:
2982         kmem_cache_destroy(ib_mad_cache);
2983 error1:
2984         return ret;
2985 }
2986
2987 static void __exit ib_mad_cleanup_module(void)
2988 {
2989         ib_unregister_client(&mad_client);
2990
2991         if (kmem_cache_destroy(ib_mad_cache)) {
2992                 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2993         }
2994 }
2995
2996 module_init(ib_mad_init_module);
2997 module_exit(ib_mad_cleanup_module);
2998