Merge branch 'pxa-tosa' into pxa
[linux-2.6] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  */
35 #include <linux/dma-mapping.h>
36 #include <rdma/ib_cache.h>
37
38 #include "mad_priv.h"
39 #include "mad_rmpp.h"
40 #include "smi.h"
41 #include "agent.h"
42
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
47
48 static struct kmem_cache *ib_mad_cache;
49
50 static struct list_head ib_mad_port_list;
51 static u32 ib_mad_client_id = 0;
52
53 /* Port list lock */
54 static spinlock_t ib_mad_port_list_lock;
55
56
57 /* Forward declarations */
58 static int method_in_use(struct ib_mad_mgmt_method_table **method,
59                          struct ib_mad_reg_req *mad_reg_req);
60 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
61 static struct ib_mad_agent_private *find_mad_agent(
62                                         struct ib_mad_port_private *port_priv,
63                                         struct ib_mad *mad);
64 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
65                                     struct ib_mad_private *mad);
66 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
67 static void timeout_sends(struct work_struct *work);
68 static void local_completions(struct work_struct *work);
69 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
70                               struct ib_mad_agent_private *agent_priv,
71                               u8 mgmt_class);
72 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
73                            struct ib_mad_agent_private *agent_priv);
74
75 /*
76  * Returns a ib_mad_port_private structure or NULL for a device/port
77  * Assumes ib_mad_port_list_lock is being held
78  */
79 static inline struct ib_mad_port_private *
80 __ib_get_mad_port(struct ib_device *device, int port_num)
81 {
82         struct ib_mad_port_private *entry;
83
84         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
85                 if (entry->device == device && entry->port_num == port_num)
86                         return entry;
87         }
88         return NULL;
89 }
90
91 /*
92  * Wrapper function to return a ib_mad_port_private structure or NULL
93  * for a device/port
94  */
95 static inline struct ib_mad_port_private *
96 ib_get_mad_port(struct ib_device *device, int port_num)
97 {
98         struct ib_mad_port_private *entry;
99         unsigned long flags;
100
101         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
102         entry = __ib_get_mad_port(device, port_num);
103         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
104
105         return entry;
106 }
107
108 static inline u8 convert_mgmt_class(u8 mgmt_class)
109 {
110         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
111         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
112                 0 : mgmt_class;
113 }
114
115 static int get_spl_qp_index(enum ib_qp_type qp_type)
116 {
117         switch (qp_type)
118         {
119         case IB_QPT_SMI:
120                 return 0;
121         case IB_QPT_GSI:
122                 return 1;
123         default:
124                 return -1;
125         }
126 }
127
128 static int vendor_class_index(u8 mgmt_class)
129 {
130         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
131 }
132
133 static int is_vendor_class(u8 mgmt_class)
134 {
135         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
136             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
137                 return 0;
138         return 1;
139 }
140
141 static int is_vendor_oui(char *oui)
142 {
143         if (oui[0] || oui[1] || oui[2])
144                 return 1;
145         return 0;
146 }
147
148 static int is_vendor_method_in_use(
149                 struct ib_mad_mgmt_vendor_class *vendor_class,
150                 struct ib_mad_reg_req *mad_reg_req)
151 {
152         struct ib_mad_mgmt_method_table *method;
153         int i;
154
155         for (i = 0; i < MAX_MGMT_OUI; i++) {
156                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
157                         method = vendor_class->method_table[i];
158                         if (method) {
159                                 if (method_in_use(&method, mad_reg_req))
160                                         return 1;
161                                 else
162                                         break;
163                         }
164                 }
165         }
166         return 0;
167 }
168
169 int ib_response_mad(struct ib_mad *mad)
170 {
171         return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
172                 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
173                 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
174                  (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
175 }
176 EXPORT_SYMBOL(ib_response_mad);
177
178 /*
179  * ib_register_mad_agent - Register to send/receive MADs
180  */
181 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
182                                            u8 port_num,
183                                            enum ib_qp_type qp_type,
184                                            struct ib_mad_reg_req *mad_reg_req,
185                                            u8 rmpp_version,
186                                            ib_mad_send_handler send_handler,
187                                            ib_mad_recv_handler recv_handler,
188                                            void *context)
189 {
190         struct ib_mad_port_private *port_priv;
191         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
192         struct ib_mad_agent_private *mad_agent_priv;
193         struct ib_mad_reg_req *reg_req = NULL;
194         struct ib_mad_mgmt_class_table *class;
195         struct ib_mad_mgmt_vendor_class_table *vendor;
196         struct ib_mad_mgmt_vendor_class *vendor_class;
197         struct ib_mad_mgmt_method_table *method;
198         int ret2, qpn;
199         unsigned long flags;
200         u8 mgmt_class, vclass;
201
202         /* Validate parameters */
203         qpn = get_spl_qp_index(qp_type);
204         if (qpn == -1)
205                 goto error1;
206
207         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
208                 goto error1;
209
210         /* Validate MAD registration request if supplied */
211         if (mad_reg_req) {
212                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
213                         goto error1;
214                 if (!recv_handler)
215                         goto error1;
216                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
217                         /*
218                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
219                          * one in this range currently allowed
220                          */
221                         if (mad_reg_req->mgmt_class !=
222                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
223                                 goto error1;
224                 } else if (mad_reg_req->mgmt_class == 0) {
225                         /*
226                          * Class 0 is reserved in IBA and is used for
227                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
228                          */
229                         goto error1;
230                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
231                         /*
232                          * If class is in "new" vendor range,
233                          * ensure supplied OUI is not zero
234                          */
235                         if (!is_vendor_oui(mad_reg_req->oui))
236                                 goto error1;
237                 }
238                 /* Make sure class supplied is consistent with RMPP */
239                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
240                         if (rmpp_version)
241                                 goto error1;
242                 }
243                 /* Make sure class supplied is consistent with QP type */
244                 if (qp_type == IB_QPT_SMI) {
245                         if ((mad_reg_req->mgmt_class !=
246                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
247                             (mad_reg_req->mgmt_class !=
248                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
249                                 goto error1;
250                 } else {
251                         if ((mad_reg_req->mgmt_class ==
252                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
253                             (mad_reg_req->mgmt_class ==
254                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
255                                 goto error1;
256                 }
257         } else {
258                 /* No registration request supplied */
259                 if (!send_handler)
260                         goto error1;
261         }
262
263         /* Validate device and port */
264         port_priv = ib_get_mad_port(device, port_num);
265         if (!port_priv) {
266                 ret = ERR_PTR(-ENODEV);
267                 goto error1;
268         }
269
270         /* Allocate structures */
271         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
272         if (!mad_agent_priv) {
273                 ret = ERR_PTR(-ENOMEM);
274                 goto error1;
275         }
276
277         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
278                                                  IB_ACCESS_LOCAL_WRITE);
279         if (IS_ERR(mad_agent_priv->agent.mr)) {
280                 ret = ERR_PTR(-ENOMEM);
281                 goto error2;
282         }
283
284         if (mad_reg_req) {
285                 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
286                 if (!reg_req) {
287                         ret = ERR_PTR(-ENOMEM);
288                         goto error3;
289                 }
290                 /* Make a copy of the MAD registration request */
291                 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
292         }
293
294         /* Now, fill in the various structures */
295         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
296         mad_agent_priv->reg_req = reg_req;
297         mad_agent_priv->agent.rmpp_version = rmpp_version;
298         mad_agent_priv->agent.device = device;
299         mad_agent_priv->agent.recv_handler = recv_handler;
300         mad_agent_priv->agent.send_handler = send_handler;
301         mad_agent_priv->agent.context = context;
302         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
303         mad_agent_priv->agent.port_num = port_num;
304
305         spin_lock_irqsave(&port_priv->reg_lock, flags);
306         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
307
308         /*
309          * Make sure MAD registration (if supplied)
310          * is non overlapping with any existing ones
311          */
312         if (mad_reg_req) {
313                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
314                 if (!is_vendor_class(mgmt_class)) {
315                         class = port_priv->version[mad_reg_req->
316                                                    mgmt_class_version].class;
317                         if (class) {
318                                 method = class->method_table[mgmt_class];
319                                 if (method) {
320                                         if (method_in_use(&method,
321                                                            mad_reg_req))
322                                                 goto error4;
323                                 }
324                         }
325                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
326                                                   mgmt_class);
327                 } else {
328                         /* "New" vendor class range */
329                         vendor = port_priv->version[mad_reg_req->
330                                                     mgmt_class_version].vendor;
331                         if (vendor) {
332                                 vclass = vendor_class_index(mgmt_class);
333                                 vendor_class = vendor->vendor_class[vclass];
334                                 if (vendor_class) {
335                                         if (is_vendor_method_in_use(
336                                                         vendor_class,
337                                                         mad_reg_req))
338                                                 goto error4;
339                                 }
340                         }
341                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
342                 }
343                 if (ret2) {
344                         ret = ERR_PTR(ret2);
345                         goto error4;
346                 }
347         }
348
349         /* Add mad agent into port's agent list */
350         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
351         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
352
353         spin_lock_init(&mad_agent_priv->lock);
354         INIT_LIST_HEAD(&mad_agent_priv->send_list);
355         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
356         INIT_LIST_HEAD(&mad_agent_priv->done_list);
357         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
358         INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
359         INIT_LIST_HEAD(&mad_agent_priv->local_list);
360         INIT_WORK(&mad_agent_priv->local_work, local_completions);
361         atomic_set(&mad_agent_priv->refcount, 1);
362         init_completion(&mad_agent_priv->comp);
363
364         return &mad_agent_priv->agent;
365
366 error4:
367         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
368         kfree(reg_req);
369 error3:
370         ib_dereg_mr(mad_agent_priv->agent.mr);
371 error2:
372         kfree(mad_agent_priv);
373 error1:
374         return ret;
375 }
376 EXPORT_SYMBOL(ib_register_mad_agent);
377
378 static inline int is_snooping_sends(int mad_snoop_flags)
379 {
380         return (mad_snoop_flags &
381                 (/*IB_MAD_SNOOP_POSTED_SENDS |
382                  IB_MAD_SNOOP_RMPP_SENDS |*/
383                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
384                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
385 }
386
387 static inline int is_snooping_recvs(int mad_snoop_flags)
388 {
389         return (mad_snoop_flags &
390                 (IB_MAD_SNOOP_RECVS /*|
391                  IB_MAD_SNOOP_RMPP_RECVS*/));
392 }
393
394 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
395                                 struct ib_mad_snoop_private *mad_snoop_priv)
396 {
397         struct ib_mad_snoop_private **new_snoop_table;
398         unsigned long flags;
399         int i;
400
401         spin_lock_irqsave(&qp_info->snoop_lock, flags);
402         /* Check for empty slot in array. */
403         for (i = 0; i < qp_info->snoop_table_size; i++)
404                 if (!qp_info->snoop_table[i])
405                         break;
406
407         if (i == qp_info->snoop_table_size) {
408                 /* Grow table. */
409                 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
410                                           qp_info->snoop_table_size + 1,
411                                           GFP_ATOMIC);
412                 if (!new_snoop_table) {
413                         i = -ENOMEM;
414                         goto out;
415                 }
416                 if (qp_info->snoop_table) {
417                         memcpy(new_snoop_table, qp_info->snoop_table,
418                                sizeof mad_snoop_priv *
419                                qp_info->snoop_table_size);
420                         kfree(qp_info->snoop_table);
421                 }
422                 qp_info->snoop_table = new_snoop_table;
423                 qp_info->snoop_table_size++;
424         }
425         qp_info->snoop_table[i] = mad_snoop_priv;
426         atomic_inc(&qp_info->snoop_count);
427 out:
428         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
429         return i;
430 }
431
432 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
433                                            u8 port_num,
434                                            enum ib_qp_type qp_type,
435                                            int mad_snoop_flags,
436                                            ib_mad_snoop_handler snoop_handler,
437                                            ib_mad_recv_handler recv_handler,
438                                            void *context)
439 {
440         struct ib_mad_port_private *port_priv;
441         struct ib_mad_agent *ret;
442         struct ib_mad_snoop_private *mad_snoop_priv;
443         int qpn;
444
445         /* Validate parameters */
446         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
447             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
448                 ret = ERR_PTR(-EINVAL);
449                 goto error1;
450         }
451         qpn = get_spl_qp_index(qp_type);
452         if (qpn == -1) {
453                 ret = ERR_PTR(-EINVAL);
454                 goto error1;
455         }
456         port_priv = ib_get_mad_port(device, port_num);
457         if (!port_priv) {
458                 ret = ERR_PTR(-ENODEV);
459                 goto error1;
460         }
461         /* Allocate structures */
462         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
463         if (!mad_snoop_priv) {
464                 ret = ERR_PTR(-ENOMEM);
465                 goto error1;
466         }
467
468         /* Now, fill in the various structures */
469         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
470         mad_snoop_priv->agent.device = device;
471         mad_snoop_priv->agent.recv_handler = recv_handler;
472         mad_snoop_priv->agent.snoop_handler = snoop_handler;
473         mad_snoop_priv->agent.context = context;
474         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
475         mad_snoop_priv->agent.port_num = port_num;
476         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
477         init_completion(&mad_snoop_priv->comp);
478         mad_snoop_priv->snoop_index = register_snoop_agent(
479                                                 &port_priv->qp_info[qpn],
480                                                 mad_snoop_priv);
481         if (mad_snoop_priv->snoop_index < 0) {
482                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
483                 goto error2;
484         }
485
486         atomic_set(&mad_snoop_priv->refcount, 1);
487         return &mad_snoop_priv->agent;
488
489 error2:
490         kfree(mad_snoop_priv);
491 error1:
492         return ret;
493 }
494 EXPORT_SYMBOL(ib_register_mad_snoop);
495
496 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
497 {
498         if (atomic_dec_and_test(&mad_agent_priv->refcount))
499                 complete(&mad_agent_priv->comp);
500 }
501
502 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
503 {
504         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
505                 complete(&mad_snoop_priv->comp);
506 }
507
508 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
509 {
510         struct ib_mad_port_private *port_priv;
511         unsigned long flags;
512
513         /* Note that we could still be handling received MADs */
514
515         /*
516          * Canceling all sends results in dropping received response
517          * MADs, preventing us from queuing additional work
518          */
519         cancel_mads(mad_agent_priv);
520         port_priv = mad_agent_priv->qp_info->port_priv;
521         cancel_delayed_work(&mad_agent_priv->timed_work);
522
523         spin_lock_irqsave(&port_priv->reg_lock, flags);
524         remove_mad_reg_req(mad_agent_priv);
525         list_del(&mad_agent_priv->agent_list);
526         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
527
528         flush_workqueue(port_priv->wq);
529         ib_cancel_rmpp_recvs(mad_agent_priv);
530
531         deref_mad_agent(mad_agent_priv);
532         wait_for_completion(&mad_agent_priv->comp);
533
534         kfree(mad_agent_priv->reg_req);
535         ib_dereg_mr(mad_agent_priv->agent.mr);
536         kfree(mad_agent_priv);
537 }
538
539 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
540 {
541         struct ib_mad_qp_info *qp_info;
542         unsigned long flags;
543
544         qp_info = mad_snoop_priv->qp_info;
545         spin_lock_irqsave(&qp_info->snoop_lock, flags);
546         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
547         atomic_dec(&qp_info->snoop_count);
548         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
549
550         deref_snoop_agent(mad_snoop_priv);
551         wait_for_completion(&mad_snoop_priv->comp);
552
553         kfree(mad_snoop_priv);
554 }
555
556 /*
557  * ib_unregister_mad_agent - Unregisters a client from using MAD services
558  */
559 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
560 {
561         struct ib_mad_agent_private *mad_agent_priv;
562         struct ib_mad_snoop_private *mad_snoop_priv;
563
564         /* If the TID is zero, the agent can only snoop. */
565         if (mad_agent->hi_tid) {
566                 mad_agent_priv = container_of(mad_agent,
567                                               struct ib_mad_agent_private,
568                                               agent);
569                 unregister_mad_agent(mad_agent_priv);
570         } else {
571                 mad_snoop_priv = container_of(mad_agent,
572                                               struct ib_mad_snoop_private,
573                                               agent);
574                 unregister_mad_snoop(mad_snoop_priv);
575         }
576         return 0;
577 }
578 EXPORT_SYMBOL(ib_unregister_mad_agent);
579
580 static void dequeue_mad(struct ib_mad_list_head *mad_list)
581 {
582         struct ib_mad_queue *mad_queue;
583         unsigned long flags;
584
585         BUG_ON(!mad_list->mad_queue);
586         mad_queue = mad_list->mad_queue;
587         spin_lock_irqsave(&mad_queue->lock, flags);
588         list_del(&mad_list->list);
589         mad_queue->count--;
590         spin_unlock_irqrestore(&mad_queue->lock, flags);
591 }
592
593 static void snoop_send(struct ib_mad_qp_info *qp_info,
594                        struct ib_mad_send_buf *send_buf,
595                        struct ib_mad_send_wc *mad_send_wc,
596                        int mad_snoop_flags)
597 {
598         struct ib_mad_snoop_private *mad_snoop_priv;
599         unsigned long flags;
600         int i;
601
602         spin_lock_irqsave(&qp_info->snoop_lock, flags);
603         for (i = 0; i < qp_info->snoop_table_size; i++) {
604                 mad_snoop_priv = qp_info->snoop_table[i];
605                 if (!mad_snoop_priv ||
606                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
607                         continue;
608
609                 atomic_inc(&mad_snoop_priv->refcount);
610                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
611                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
612                                                     send_buf, mad_send_wc);
613                 deref_snoop_agent(mad_snoop_priv);
614                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
615         }
616         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
617 }
618
619 static void snoop_recv(struct ib_mad_qp_info *qp_info,
620                        struct ib_mad_recv_wc *mad_recv_wc,
621                        int mad_snoop_flags)
622 {
623         struct ib_mad_snoop_private *mad_snoop_priv;
624         unsigned long flags;
625         int i;
626
627         spin_lock_irqsave(&qp_info->snoop_lock, flags);
628         for (i = 0; i < qp_info->snoop_table_size; i++) {
629                 mad_snoop_priv = qp_info->snoop_table[i];
630                 if (!mad_snoop_priv ||
631                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
632                         continue;
633
634                 atomic_inc(&mad_snoop_priv->refcount);
635                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
636                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
637                                                    mad_recv_wc);
638                 deref_snoop_agent(mad_snoop_priv);
639                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
640         }
641         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
642 }
643
644 static void build_smp_wc(struct ib_qp *qp,
645                          u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
646                          struct ib_wc *wc)
647 {
648         memset(wc, 0, sizeof *wc);
649         wc->wr_id = wr_id;
650         wc->status = IB_WC_SUCCESS;
651         wc->opcode = IB_WC_RECV;
652         wc->pkey_index = pkey_index;
653         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
654         wc->src_qp = IB_QP0;
655         wc->qp = qp;
656         wc->slid = slid;
657         wc->sl = 0;
658         wc->dlid_path_bits = 0;
659         wc->port_num = port_num;
660 }
661
662 /*
663  * Return 0 if SMP is to be sent
664  * Return 1 if SMP was consumed locally (whether or not solicited)
665  * Return < 0 if error
666  */
667 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
668                                   struct ib_mad_send_wr_private *mad_send_wr)
669 {
670         int ret = 0;
671         struct ib_smp *smp = mad_send_wr->send_buf.mad;
672         unsigned long flags;
673         struct ib_mad_local_private *local;
674         struct ib_mad_private *mad_priv;
675         struct ib_mad_port_private *port_priv;
676         struct ib_mad_agent_private *recv_mad_agent = NULL;
677         struct ib_device *device = mad_agent_priv->agent.device;
678         u8 port_num;
679         struct ib_wc mad_wc;
680         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
681
682         if (device->node_type == RDMA_NODE_IB_SWITCH &&
683             smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
684                 port_num = send_wr->wr.ud.port_num;
685         else
686                 port_num = mad_agent_priv->agent.port_num;
687
688         /*
689          * Directed route handling starts if the initial LID routed part of
690          * a request or the ending LID routed part of a response is empty.
691          * If we are at the start of the LID routed part, don't update the
692          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
693          */
694         if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
695              IB_LID_PERMISSIVE &&
696              smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
697              IB_SMI_DISCARD) {
698                 ret = -EINVAL;
699                 printk(KERN_ERR PFX "Invalid directed route\n");
700                 goto out;
701         }
702
703         /* Check to post send on QP or process locally */
704         if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
705             smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
706                 goto out;
707
708         local = kmalloc(sizeof *local, GFP_ATOMIC);
709         if (!local) {
710                 ret = -ENOMEM;
711                 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
712                 goto out;
713         }
714         local->mad_priv = NULL;
715         local->recv_mad_agent = NULL;
716         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
717         if (!mad_priv) {
718                 ret = -ENOMEM;
719                 printk(KERN_ERR PFX "No memory for local response MAD\n");
720                 kfree(local);
721                 goto out;
722         }
723
724         build_smp_wc(mad_agent_priv->agent.qp,
725                      send_wr->wr_id, be16_to_cpu(smp->dr_slid),
726                      send_wr->wr.ud.pkey_index,
727                      send_wr->wr.ud.port_num, &mad_wc);
728
729         /* No GRH for DR SMP */
730         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
731                                   (struct ib_mad *)smp,
732                                   (struct ib_mad *)&mad_priv->mad);
733         switch (ret)
734         {
735         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
736                 if (ib_response_mad(&mad_priv->mad.mad) &&
737                     mad_agent_priv->agent.recv_handler) {
738                         local->mad_priv = mad_priv;
739                         local->recv_mad_agent = mad_agent_priv;
740                         /*
741                          * Reference MAD agent until receive
742                          * side of local completion handled
743                          */
744                         atomic_inc(&mad_agent_priv->refcount);
745                 } else
746                         kmem_cache_free(ib_mad_cache, mad_priv);
747                 break;
748         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
749                 kmem_cache_free(ib_mad_cache, mad_priv);
750                 kfree(local);
751                 ret = 1;
752                 goto out;
753         case IB_MAD_RESULT_SUCCESS:
754                 /* Treat like an incoming receive MAD */
755                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
756                                             mad_agent_priv->agent.port_num);
757                 if (port_priv) {
758                         memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
759                         recv_mad_agent = find_mad_agent(port_priv,
760                                                         &mad_priv->mad.mad);
761                 }
762                 if (!port_priv || !recv_mad_agent) {
763                         kmem_cache_free(ib_mad_cache, mad_priv);
764                         kfree(local);
765                         ret = 0;
766                         goto out;
767                 }
768                 local->mad_priv = mad_priv;
769                 local->recv_mad_agent = recv_mad_agent;
770                 break;
771         default:
772                 kmem_cache_free(ib_mad_cache, mad_priv);
773                 kfree(local);
774                 ret = -EINVAL;
775                 goto out;
776         }
777
778         local->mad_send_wr = mad_send_wr;
779         /* Reference MAD agent until send side of local completion handled */
780         atomic_inc(&mad_agent_priv->refcount);
781         /* Queue local completion to local list */
782         spin_lock_irqsave(&mad_agent_priv->lock, flags);
783         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
784         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
785         queue_work(mad_agent_priv->qp_info->port_priv->wq,
786                    &mad_agent_priv->local_work);
787         ret = 1;
788 out:
789         return ret;
790 }
791
792 static int get_pad_size(int hdr_len, int data_len)
793 {
794         int seg_size, pad;
795
796         seg_size = sizeof(struct ib_mad) - hdr_len;
797         if (data_len && seg_size) {
798                 pad = seg_size - data_len % seg_size;
799                 return pad == seg_size ? 0 : pad;
800         } else
801                 return seg_size;
802 }
803
804 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
805 {
806         struct ib_rmpp_segment *s, *t;
807
808         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
809                 list_del(&s->list);
810                 kfree(s);
811         }
812 }
813
814 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
815                                 gfp_t gfp_mask)
816 {
817         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
818         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
819         struct ib_rmpp_segment *seg = NULL;
820         int left, seg_size, pad;
821
822         send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
823         seg_size = send_buf->seg_size;
824         pad = send_wr->pad;
825
826         /* Allocate data segments. */
827         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
828                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
829                 if (!seg) {
830                         printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
831                                "alloc failed for len %zd, gfp %#x\n",
832                                sizeof (*seg) + seg_size, gfp_mask);
833                         free_send_rmpp_list(send_wr);
834                         return -ENOMEM;
835                 }
836                 seg->num = ++send_buf->seg_count;
837                 list_add_tail(&seg->list, &send_wr->rmpp_list);
838         }
839
840         /* Zero any padding */
841         if (pad)
842                 memset(seg->data + seg_size - pad, 0, pad);
843
844         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
845                                           agent.rmpp_version;
846         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
847         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
848
849         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
850                                         struct ib_rmpp_segment, list);
851         send_wr->last_ack_seg = send_wr->cur_seg;
852         return 0;
853 }
854
855 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
856                                             u32 remote_qpn, u16 pkey_index,
857                                             int rmpp_active,
858                                             int hdr_len, int data_len,
859                                             gfp_t gfp_mask)
860 {
861         struct ib_mad_agent_private *mad_agent_priv;
862         struct ib_mad_send_wr_private *mad_send_wr;
863         int pad, message_size, ret, size;
864         void *buf;
865
866         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
867                                       agent);
868         pad = get_pad_size(hdr_len, data_len);
869         message_size = hdr_len + data_len + pad;
870
871         if ((!mad_agent->rmpp_version &&
872              (rmpp_active || message_size > sizeof(struct ib_mad))) ||
873             (!rmpp_active && message_size > sizeof(struct ib_mad)))
874                 return ERR_PTR(-EINVAL);
875
876         size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
877         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
878         if (!buf)
879                 return ERR_PTR(-ENOMEM);
880
881         mad_send_wr = buf + size;
882         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
883         mad_send_wr->send_buf.mad = buf;
884         mad_send_wr->send_buf.hdr_len = hdr_len;
885         mad_send_wr->send_buf.data_len = data_len;
886         mad_send_wr->pad = pad;
887
888         mad_send_wr->mad_agent_priv = mad_agent_priv;
889         mad_send_wr->sg_list[0].length = hdr_len;
890         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
891         mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
892         mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
893
894         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
895         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
896         mad_send_wr->send_wr.num_sge = 2;
897         mad_send_wr->send_wr.opcode = IB_WR_SEND;
898         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
899         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
900         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
901         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
902
903         if (rmpp_active) {
904                 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
905                 if (ret) {
906                         kfree(buf);
907                         return ERR_PTR(ret);
908                 }
909         }
910
911         mad_send_wr->send_buf.mad_agent = mad_agent;
912         atomic_inc(&mad_agent_priv->refcount);
913         return &mad_send_wr->send_buf;
914 }
915 EXPORT_SYMBOL(ib_create_send_mad);
916
917 int ib_get_mad_data_offset(u8 mgmt_class)
918 {
919         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
920                 return IB_MGMT_SA_HDR;
921         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
922                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
923                  (mgmt_class == IB_MGMT_CLASS_BIS))
924                 return IB_MGMT_DEVICE_HDR;
925         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
926                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
927                 return IB_MGMT_VENDOR_HDR;
928         else
929                 return IB_MGMT_MAD_HDR;
930 }
931 EXPORT_SYMBOL(ib_get_mad_data_offset);
932
933 int ib_is_mad_class_rmpp(u8 mgmt_class)
934 {
935         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
936             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
937             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
938             (mgmt_class == IB_MGMT_CLASS_BIS) ||
939             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
940              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
941                 return 1;
942         return 0;
943 }
944 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
945
946 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
947 {
948         struct ib_mad_send_wr_private *mad_send_wr;
949         struct list_head *list;
950
951         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
952                                    send_buf);
953         list = &mad_send_wr->cur_seg->list;
954
955         if (mad_send_wr->cur_seg->num < seg_num) {
956                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
957                         if (mad_send_wr->cur_seg->num == seg_num)
958                                 break;
959         } else if (mad_send_wr->cur_seg->num > seg_num) {
960                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
961                         if (mad_send_wr->cur_seg->num == seg_num)
962                                 break;
963         }
964         return mad_send_wr->cur_seg->data;
965 }
966 EXPORT_SYMBOL(ib_get_rmpp_segment);
967
968 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
969 {
970         if (mad_send_wr->send_buf.seg_count)
971                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
972                                            mad_send_wr->seg_num);
973         else
974                 return mad_send_wr->send_buf.mad +
975                        mad_send_wr->send_buf.hdr_len;
976 }
977
978 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
979 {
980         struct ib_mad_agent_private *mad_agent_priv;
981         struct ib_mad_send_wr_private *mad_send_wr;
982
983         mad_agent_priv = container_of(send_buf->mad_agent,
984                                       struct ib_mad_agent_private, agent);
985         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
986                                    send_buf);
987
988         free_send_rmpp_list(mad_send_wr);
989         kfree(send_buf->mad);
990         deref_mad_agent(mad_agent_priv);
991 }
992 EXPORT_SYMBOL(ib_free_send_mad);
993
994 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
995 {
996         struct ib_mad_qp_info *qp_info;
997         struct list_head *list;
998         struct ib_send_wr *bad_send_wr;
999         struct ib_mad_agent *mad_agent;
1000         struct ib_sge *sge;
1001         unsigned long flags;
1002         int ret;
1003
1004         /* Set WR ID to find mad_send_wr upon completion */
1005         qp_info = mad_send_wr->mad_agent_priv->qp_info;
1006         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1007         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1008
1009         mad_agent = mad_send_wr->send_buf.mad_agent;
1010         sge = mad_send_wr->sg_list;
1011         sge[0].addr = ib_dma_map_single(mad_agent->device,
1012                                         mad_send_wr->send_buf.mad,
1013                                         sge[0].length,
1014                                         DMA_TO_DEVICE);
1015         mad_send_wr->header_mapping = sge[0].addr;
1016
1017         sge[1].addr = ib_dma_map_single(mad_agent->device,
1018                                         ib_get_payload(mad_send_wr),
1019                                         sge[1].length,
1020                                         DMA_TO_DEVICE);
1021         mad_send_wr->payload_mapping = sge[1].addr;
1022
1023         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1024         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1025                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1026                                    &bad_send_wr);
1027                 list = &qp_info->send_queue.list;
1028         } else {
1029                 ret = 0;
1030                 list = &qp_info->overflow_list;
1031         }
1032
1033         if (!ret) {
1034                 qp_info->send_queue.count++;
1035                 list_add_tail(&mad_send_wr->mad_list.list, list);
1036         }
1037         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1038         if (ret) {
1039                 ib_dma_unmap_single(mad_agent->device,
1040                                     mad_send_wr->header_mapping,
1041                                     sge[0].length, DMA_TO_DEVICE);
1042                 ib_dma_unmap_single(mad_agent->device,
1043                                     mad_send_wr->payload_mapping,
1044                                     sge[1].length, DMA_TO_DEVICE);
1045         }
1046         return ret;
1047 }
1048
1049 /*
1050  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1051  *  with the registered client
1052  */
1053 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1054                      struct ib_mad_send_buf **bad_send_buf)
1055 {
1056         struct ib_mad_agent_private *mad_agent_priv;
1057         struct ib_mad_send_buf *next_send_buf;
1058         struct ib_mad_send_wr_private *mad_send_wr;
1059         unsigned long flags;
1060         int ret = -EINVAL;
1061
1062         /* Walk list of send WRs and post each on send list */
1063         for (; send_buf; send_buf = next_send_buf) {
1064
1065                 mad_send_wr = container_of(send_buf,
1066                                            struct ib_mad_send_wr_private,
1067                                            send_buf);
1068                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1069
1070                 if (!send_buf->mad_agent->send_handler ||
1071                     (send_buf->timeout_ms &&
1072                      !send_buf->mad_agent->recv_handler)) {
1073                         ret = -EINVAL;
1074                         goto error;
1075                 }
1076
1077                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1078                         if (mad_agent_priv->agent.rmpp_version) {
1079                                 ret = -EINVAL;
1080                                 goto error;
1081                         }
1082                 }
1083
1084                 /*
1085                  * Save pointer to next work request to post in case the
1086                  * current one completes, and the user modifies the work
1087                  * request associated with the completion
1088                  */
1089                 next_send_buf = send_buf->next;
1090                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1091
1092                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1093                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1094                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1095                                                      mad_send_wr);
1096                         if (ret < 0)            /* error */
1097                                 goto error;
1098                         else if (ret == 1)      /* locally consumed */
1099                                 continue;
1100                 }
1101
1102                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1103                 /* Timeout will be updated after send completes */
1104                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1105                 mad_send_wr->max_retries = send_buf->retries;
1106                 mad_send_wr->retries_left = send_buf->retries;
1107                 send_buf->retries = 0;
1108                 /* Reference for work request to QP + response */
1109                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1110                 mad_send_wr->status = IB_WC_SUCCESS;
1111
1112                 /* Reference MAD agent until send completes */
1113                 atomic_inc(&mad_agent_priv->refcount);
1114                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1115                 list_add_tail(&mad_send_wr->agent_list,
1116                               &mad_agent_priv->send_list);
1117                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1118
1119                 if (mad_agent_priv->agent.rmpp_version) {
1120                         ret = ib_send_rmpp_mad(mad_send_wr);
1121                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1122                                 ret = ib_send_mad(mad_send_wr);
1123                 } else
1124                         ret = ib_send_mad(mad_send_wr);
1125                 if (ret < 0) {
1126                         /* Fail send request */
1127                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1128                         list_del(&mad_send_wr->agent_list);
1129                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1130                         atomic_dec(&mad_agent_priv->refcount);
1131                         goto error;
1132                 }
1133         }
1134         return 0;
1135 error:
1136         if (bad_send_buf)
1137                 *bad_send_buf = send_buf;
1138         return ret;
1139 }
1140 EXPORT_SYMBOL(ib_post_send_mad);
1141
1142 /*
1143  * ib_free_recv_mad - Returns data buffers used to receive
1144  *  a MAD to the access layer
1145  */
1146 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1147 {
1148         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1149         struct ib_mad_private_header *mad_priv_hdr;
1150         struct ib_mad_private *priv;
1151         struct list_head free_list;
1152
1153         INIT_LIST_HEAD(&free_list);
1154         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1155
1156         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1157                                         &free_list, list) {
1158                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1159                                            recv_buf);
1160                 mad_priv_hdr = container_of(mad_recv_wc,
1161                                             struct ib_mad_private_header,
1162                                             recv_wc);
1163                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1164                                     header);
1165                 kmem_cache_free(ib_mad_cache, priv);
1166         }
1167 }
1168 EXPORT_SYMBOL(ib_free_recv_mad);
1169
1170 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1171                                         u8 rmpp_version,
1172                                         ib_mad_send_handler send_handler,
1173                                         ib_mad_recv_handler recv_handler,
1174                                         void *context)
1175 {
1176         return ERR_PTR(-EINVAL);        /* XXX: for now */
1177 }
1178 EXPORT_SYMBOL(ib_redirect_mad_qp);
1179
1180 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1181                       struct ib_wc *wc)
1182 {
1183         printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1184         return 0;
1185 }
1186 EXPORT_SYMBOL(ib_process_mad_wc);
1187
1188 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1189                          struct ib_mad_reg_req *mad_reg_req)
1190 {
1191         int i;
1192
1193         for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1194              i < IB_MGMT_MAX_METHODS;
1195              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1196                                1+i)) {
1197                 if ((*method)->agent[i]) {
1198                         printk(KERN_ERR PFX "Method %d already in use\n", i);
1199                         return -EINVAL;
1200                 }
1201         }
1202         return 0;
1203 }
1204
1205 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1206 {
1207         /* Allocate management method table */
1208         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1209         if (!*method) {
1210                 printk(KERN_ERR PFX "No memory for "
1211                        "ib_mad_mgmt_method_table\n");
1212                 return -ENOMEM;
1213         }
1214
1215         return 0;
1216 }
1217
1218 /*
1219  * Check to see if there are any methods still in use
1220  */
1221 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1222 {
1223         int i;
1224
1225         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1226                 if (method->agent[i])
1227                         return 1;
1228         return 0;
1229 }
1230
1231 /*
1232  * Check to see if there are any method tables for this class still in use
1233  */
1234 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1235 {
1236         int i;
1237
1238         for (i = 0; i < MAX_MGMT_CLASS; i++)
1239                 if (class->method_table[i])
1240                         return 1;
1241         return 0;
1242 }
1243
1244 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1245 {
1246         int i;
1247
1248         for (i = 0; i < MAX_MGMT_OUI; i++)
1249                 if (vendor_class->method_table[i])
1250                         return 1;
1251         return 0;
1252 }
1253
1254 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1255                            char *oui)
1256 {
1257         int i;
1258
1259         for (i = 0; i < MAX_MGMT_OUI; i++)
1260                 /* Is there matching OUI for this vendor class ? */
1261                 if (!memcmp(vendor_class->oui[i], oui, 3))
1262                         return i;
1263
1264         return -1;
1265 }
1266
1267 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1268 {
1269         int i;
1270
1271         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1272                 if (vendor->vendor_class[i])
1273                         return 1;
1274
1275         return 0;
1276 }
1277
1278 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1279                                      struct ib_mad_agent_private *agent)
1280 {
1281         int i;
1282
1283         /* Remove any methods for this mad agent */
1284         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1285                 if (method->agent[i] == agent) {
1286                         method->agent[i] = NULL;
1287                 }
1288         }
1289 }
1290
1291 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1292                               struct ib_mad_agent_private *agent_priv,
1293                               u8 mgmt_class)
1294 {
1295         struct ib_mad_port_private *port_priv;
1296         struct ib_mad_mgmt_class_table **class;
1297         struct ib_mad_mgmt_method_table **method;
1298         int i, ret;
1299
1300         port_priv = agent_priv->qp_info->port_priv;
1301         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1302         if (!*class) {
1303                 /* Allocate management class table for "new" class version */
1304                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1305                 if (!*class) {
1306                         printk(KERN_ERR PFX "No memory for "
1307                                "ib_mad_mgmt_class_table\n");
1308                         ret = -ENOMEM;
1309                         goto error1;
1310                 }
1311
1312                 /* Allocate method table for this management class */
1313                 method = &(*class)->method_table[mgmt_class];
1314                 if ((ret = allocate_method_table(method)))
1315                         goto error2;
1316         } else {
1317                 method = &(*class)->method_table[mgmt_class];
1318                 if (!*method) {
1319                         /* Allocate method table for this management class */
1320                         if ((ret = allocate_method_table(method)))
1321                                 goto error1;
1322                 }
1323         }
1324
1325         /* Now, make sure methods are not already in use */
1326         if (method_in_use(method, mad_reg_req))
1327                 goto error3;
1328
1329         /* Finally, add in methods being registered */
1330         for (i = find_first_bit(mad_reg_req->method_mask,
1331                                 IB_MGMT_MAX_METHODS);
1332              i < IB_MGMT_MAX_METHODS;
1333              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1334                                1+i)) {
1335                 (*method)->agent[i] = agent_priv;
1336         }
1337         return 0;
1338
1339 error3:
1340         /* Remove any methods for this mad agent */
1341         remove_methods_mad_agent(*method, agent_priv);
1342         /* Now, check to see if there are any methods in use */
1343         if (!check_method_table(*method)) {
1344                 /* If not, release management method table */
1345                 kfree(*method);
1346                 *method = NULL;
1347         }
1348         ret = -EINVAL;
1349         goto error1;
1350 error2:
1351         kfree(*class);
1352         *class = NULL;
1353 error1:
1354         return ret;
1355 }
1356
1357 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1358                            struct ib_mad_agent_private *agent_priv)
1359 {
1360         struct ib_mad_port_private *port_priv;
1361         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1362         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1363         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1364         struct ib_mad_mgmt_method_table **method;
1365         int i, ret = -ENOMEM;
1366         u8 vclass;
1367
1368         /* "New" vendor (with OUI) class */
1369         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1370         port_priv = agent_priv->qp_info->port_priv;
1371         vendor_table = &port_priv->version[
1372                                 mad_reg_req->mgmt_class_version].vendor;
1373         if (!*vendor_table) {
1374                 /* Allocate mgmt vendor class table for "new" class version */
1375                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1376                 if (!vendor) {
1377                         printk(KERN_ERR PFX "No memory for "
1378                                "ib_mad_mgmt_vendor_class_table\n");
1379                         goto error1;
1380                 }
1381
1382                 *vendor_table = vendor;
1383         }
1384         if (!(*vendor_table)->vendor_class[vclass]) {
1385                 /* Allocate table for this management vendor class */
1386                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1387                 if (!vendor_class) {
1388                         printk(KERN_ERR PFX "No memory for "
1389                                "ib_mad_mgmt_vendor_class\n");
1390                         goto error2;
1391                 }
1392
1393                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1394         }
1395         for (i = 0; i < MAX_MGMT_OUI; i++) {
1396                 /* Is there matching OUI for this vendor class ? */
1397                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1398                             mad_reg_req->oui, 3)) {
1399                         method = &(*vendor_table)->vendor_class[
1400                                                 vclass]->method_table[i];
1401                         BUG_ON(!*method);
1402                         goto check_in_use;
1403                 }
1404         }
1405         for (i = 0; i < MAX_MGMT_OUI; i++) {
1406                 /* OUI slot available ? */
1407                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1408                                 vclass]->oui[i])) {
1409                         method = &(*vendor_table)->vendor_class[
1410                                 vclass]->method_table[i];
1411                         BUG_ON(*method);
1412                         /* Allocate method table for this OUI */
1413                         if ((ret = allocate_method_table(method)))
1414                                 goto error3;
1415                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1416                                mad_reg_req->oui, 3);
1417                         goto check_in_use;
1418                 }
1419         }
1420         printk(KERN_ERR PFX "All OUI slots in use\n");
1421         goto error3;
1422
1423 check_in_use:
1424         /* Now, make sure methods are not already in use */
1425         if (method_in_use(method, mad_reg_req))
1426                 goto error4;
1427
1428         /* Finally, add in methods being registered */
1429         for (i = find_first_bit(mad_reg_req->method_mask,
1430                                 IB_MGMT_MAX_METHODS);
1431              i < IB_MGMT_MAX_METHODS;
1432              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1433                                1+i)) {
1434                 (*method)->agent[i] = agent_priv;
1435         }
1436         return 0;
1437
1438 error4:
1439         /* Remove any methods for this mad agent */
1440         remove_methods_mad_agent(*method, agent_priv);
1441         /* Now, check to see if there are any methods in use */
1442         if (!check_method_table(*method)) {
1443                 /* If not, release management method table */
1444                 kfree(*method);
1445                 *method = NULL;
1446         }
1447         ret = -EINVAL;
1448 error3:
1449         if (vendor_class) {
1450                 (*vendor_table)->vendor_class[vclass] = NULL;
1451                 kfree(vendor_class);
1452         }
1453 error2:
1454         if (vendor) {
1455                 *vendor_table = NULL;
1456                 kfree(vendor);
1457         }
1458 error1:
1459         return ret;
1460 }
1461
1462 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1463 {
1464         struct ib_mad_port_private *port_priv;
1465         struct ib_mad_mgmt_class_table *class;
1466         struct ib_mad_mgmt_method_table *method;
1467         struct ib_mad_mgmt_vendor_class_table *vendor;
1468         struct ib_mad_mgmt_vendor_class *vendor_class;
1469         int index;
1470         u8 mgmt_class;
1471
1472         /*
1473          * Was MAD registration request supplied
1474          * with original registration ?
1475          */
1476         if (!agent_priv->reg_req) {
1477                 goto out;
1478         }
1479
1480         port_priv = agent_priv->qp_info->port_priv;
1481         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1482         class = port_priv->version[
1483                         agent_priv->reg_req->mgmt_class_version].class;
1484         if (!class)
1485                 goto vendor_check;
1486
1487         method = class->method_table[mgmt_class];
1488         if (method) {
1489                 /* Remove any methods for this mad agent */
1490                 remove_methods_mad_agent(method, agent_priv);
1491                 /* Now, check to see if there are any methods still in use */
1492                 if (!check_method_table(method)) {
1493                         /* If not, release management method table */
1494                          kfree(method);
1495                          class->method_table[mgmt_class] = NULL;
1496                          /* Any management classes left ? */
1497                         if (!check_class_table(class)) {
1498                                 /* If not, release management class table */
1499                                 kfree(class);
1500                                 port_priv->version[
1501                                         agent_priv->reg_req->
1502                                         mgmt_class_version].class = NULL;
1503                         }
1504                 }
1505         }
1506
1507 vendor_check:
1508         if (!is_vendor_class(mgmt_class))
1509                 goto out;
1510
1511         /* normalize mgmt_class to vendor range 2 */
1512         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1513         vendor = port_priv->version[
1514                         agent_priv->reg_req->mgmt_class_version].vendor;
1515
1516         if (!vendor)
1517                 goto out;
1518
1519         vendor_class = vendor->vendor_class[mgmt_class];
1520         if (vendor_class) {
1521                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1522                 if (index < 0)
1523                         goto out;
1524                 method = vendor_class->method_table[index];
1525                 if (method) {
1526                         /* Remove any methods for this mad agent */
1527                         remove_methods_mad_agent(method, agent_priv);
1528                         /*
1529                          * Now, check to see if there are
1530                          * any methods still in use
1531                          */
1532                         if (!check_method_table(method)) {
1533                                 /* If not, release management method table */
1534                                 kfree(method);
1535                                 vendor_class->method_table[index] = NULL;
1536                                 memset(vendor_class->oui[index], 0, 3);
1537                                 /* Any OUIs left ? */
1538                                 if (!check_vendor_class(vendor_class)) {
1539                                         /* If not, release vendor class table */
1540                                         kfree(vendor_class);
1541                                         vendor->vendor_class[mgmt_class] = NULL;
1542                                         /* Any other vendor classes left ? */
1543                                         if (!check_vendor_table(vendor)) {
1544                                                 kfree(vendor);
1545                                                 port_priv->version[
1546                                                         agent_priv->reg_req->
1547                                                         mgmt_class_version].
1548                                                         vendor = NULL;
1549                                         }
1550                                 }
1551                         }
1552                 }
1553         }
1554
1555 out:
1556         return;
1557 }
1558
1559 static struct ib_mad_agent_private *
1560 find_mad_agent(struct ib_mad_port_private *port_priv,
1561                struct ib_mad *mad)
1562 {
1563         struct ib_mad_agent_private *mad_agent = NULL;
1564         unsigned long flags;
1565
1566         spin_lock_irqsave(&port_priv->reg_lock, flags);
1567         if (ib_response_mad(mad)) {
1568                 u32 hi_tid;
1569                 struct ib_mad_agent_private *entry;
1570
1571                 /*
1572                  * Routing is based on high 32 bits of transaction ID
1573                  * of MAD.
1574                  */
1575                 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1576                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1577                         if (entry->agent.hi_tid == hi_tid) {
1578                                 mad_agent = entry;
1579                                 break;
1580                         }
1581                 }
1582         } else {
1583                 struct ib_mad_mgmt_class_table *class;
1584                 struct ib_mad_mgmt_method_table *method;
1585                 struct ib_mad_mgmt_vendor_class_table *vendor;
1586                 struct ib_mad_mgmt_vendor_class *vendor_class;
1587                 struct ib_vendor_mad *vendor_mad;
1588                 int index;
1589
1590                 /*
1591                  * Routing is based on version, class, and method
1592                  * For "newer" vendor MADs, also based on OUI
1593                  */
1594                 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1595                         goto out;
1596                 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1597                         class = port_priv->version[
1598                                         mad->mad_hdr.class_version].class;
1599                         if (!class)
1600                                 goto out;
1601                         method = class->method_table[convert_mgmt_class(
1602                                                         mad->mad_hdr.mgmt_class)];
1603                         if (method)
1604                                 mad_agent = method->agent[mad->mad_hdr.method &
1605                                                           ~IB_MGMT_METHOD_RESP];
1606                 } else {
1607                         vendor = port_priv->version[
1608                                         mad->mad_hdr.class_version].vendor;
1609                         if (!vendor)
1610                                 goto out;
1611                         vendor_class = vendor->vendor_class[vendor_class_index(
1612                                                 mad->mad_hdr.mgmt_class)];
1613                         if (!vendor_class)
1614                                 goto out;
1615                         /* Find matching OUI */
1616                         vendor_mad = (struct ib_vendor_mad *)mad;
1617                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1618                         if (index == -1)
1619                                 goto out;
1620                         method = vendor_class->method_table[index];
1621                         if (method) {
1622                                 mad_agent = method->agent[mad->mad_hdr.method &
1623                                                           ~IB_MGMT_METHOD_RESP];
1624                         }
1625                 }
1626         }
1627
1628         if (mad_agent) {
1629                 if (mad_agent->agent.recv_handler)
1630                         atomic_inc(&mad_agent->refcount);
1631                 else {
1632                         printk(KERN_NOTICE PFX "No receive handler for client "
1633                                "%p on port %d\n",
1634                                &mad_agent->agent, port_priv->port_num);
1635                         mad_agent = NULL;
1636                 }
1637         }
1638 out:
1639         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1640
1641         return mad_agent;
1642 }
1643
1644 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1645 {
1646         int valid = 0;
1647
1648         /* Make sure MAD base version is understood */
1649         if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1650                 printk(KERN_ERR PFX "MAD received with unsupported base "
1651                        "version %d\n", mad->mad_hdr.base_version);
1652                 goto out;
1653         }
1654
1655         /* Filter SMI packets sent to other than QP0 */
1656         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1657             (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1658                 if (qp_num == 0)
1659                         valid = 1;
1660         } else {
1661                 /* Filter GSI packets sent to QP0 */
1662                 if (qp_num != 0)
1663                         valid = 1;
1664         }
1665
1666 out:
1667         return valid;
1668 }
1669
1670 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1671                        struct ib_mad_hdr *mad_hdr)
1672 {
1673         struct ib_rmpp_mad *rmpp_mad;
1674
1675         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1676         return !mad_agent_priv->agent.rmpp_version ||
1677                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1678                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1679                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1680 }
1681
1682 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1683                                      struct ib_mad_recv_wc *rwc)
1684 {
1685         return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1686                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1687 }
1688
1689 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1690                                    struct ib_mad_send_wr_private *wr,
1691                                    struct ib_mad_recv_wc *rwc )
1692 {
1693         struct ib_ah_attr attr;
1694         u8 send_resp, rcv_resp;
1695         union ib_gid sgid;
1696         struct ib_device *device = mad_agent_priv->agent.device;
1697         u8 port_num = mad_agent_priv->agent.port_num;
1698         u8 lmc;
1699
1700         send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1701                      mad_hdr.method & IB_MGMT_METHOD_RESP;
1702         rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1703
1704         if (send_resp == rcv_resp)
1705                 /* both requests, or both responses. GIDs different */
1706                 return 0;
1707
1708         if (ib_query_ah(wr->send_buf.ah, &attr))
1709                 /* Assume not equal, to avoid false positives. */
1710                 return 0;
1711
1712         if (!!(attr.ah_flags & IB_AH_GRH) !=
1713             !!(rwc->wc->wc_flags & IB_WC_GRH))
1714                 /* one has GID, other does not.  Assume different */
1715                 return 0;
1716
1717         if (!send_resp && rcv_resp) {
1718                 /* is request/response. */
1719                 if (!(attr.ah_flags & IB_AH_GRH)) {
1720                         if (ib_get_cached_lmc(device, port_num, &lmc))
1721                                 return 0;
1722                         return (!lmc || !((attr.src_path_bits ^
1723                                            rwc->wc->dlid_path_bits) &
1724                                           ((1 << lmc) - 1)));
1725                 } else {
1726                         if (ib_get_cached_gid(device, port_num,
1727                                               attr.grh.sgid_index, &sgid))
1728                                 return 0;
1729                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1730                                        16);
1731                 }
1732         }
1733
1734         if (!(attr.ah_flags & IB_AH_GRH))
1735                 return attr.dlid == rwc->wc->slid;
1736         else
1737                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1738                                16);
1739 }
1740
1741 static inline int is_direct(u8 class)
1742 {
1743         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1744 }
1745
1746 struct ib_mad_send_wr_private*
1747 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1748                  struct ib_mad_recv_wc *wc)
1749 {
1750         struct ib_mad_send_wr_private *wr;
1751         struct ib_mad *mad;
1752
1753         mad = (struct ib_mad *)wc->recv_buf.mad;
1754
1755         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1756                 if ((wr->tid == mad->mad_hdr.tid) &&
1757                     rcv_has_same_class(wr, wc) &&
1758                     /*
1759                      * Don't check GID for direct routed MADs.
1760                      * These might have permissive LIDs.
1761                      */
1762                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1763                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1764                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1765         }
1766
1767         /*
1768          * It's possible to receive the response before we've
1769          * been notified that the send has completed
1770          */
1771         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1772                 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1773                     wr->tid == mad->mad_hdr.tid &&
1774                     wr->timeout &&
1775                     rcv_has_same_class(wr, wc) &&
1776                     /*
1777                      * Don't check GID for direct routed MADs.
1778                      * These might have permissive LIDs.
1779                      */
1780                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1781                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1782                         /* Verify request has not been canceled */
1783                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1784         }
1785         return NULL;
1786 }
1787
1788 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1789 {
1790         mad_send_wr->timeout = 0;
1791         if (mad_send_wr->refcount == 1)
1792                 list_move_tail(&mad_send_wr->agent_list,
1793                               &mad_send_wr->mad_agent_priv->done_list);
1794 }
1795
1796 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1797                                  struct ib_mad_recv_wc *mad_recv_wc)
1798 {
1799         struct ib_mad_send_wr_private *mad_send_wr;
1800         struct ib_mad_send_wc mad_send_wc;
1801         unsigned long flags;
1802
1803         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1804         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1805         if (mad_agent_priv->agent.rmpp_version) {
1806                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1807                                                       mad_recv_wc);
1808                 if (!mad_recv_wc) {
1809                         deref_mad_agent(mad_agent_priv);
1810                         return;
1811                 }
1812         }
1813
1814         /* Complete corresponding request */
1815         if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1816                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1817                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1818                 if (!mad_send_wr) {
1819                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1820                         ib_free_recv_mad(mad_recv_wc);
1821                         deref_mad_agent(mad_agent_priv);
1822                         return;
1823                 }
1824                 ib_mark_mad_done(mad_send_wr);
1825                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1826
1827                 /* Defined behavior is to complete response before request */
1828                 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1829                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1830                                                    mad_recv_wc);
1831                 atomic_dec(&mad_agent_priv->refcount);
1832
1833                 mad_send_wc.status = IB_WC_SUCCESS;
1834                 mad_send_wc.vendor_err = 0;
1835                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1836                 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1837         } else {
1838                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1839                                                    mad_recv_wc);
1840                 deref_mad_agent(mad_agent_priv);
1841         }
1842 }
1843
1844 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1845                                      struct ib_wc *wc)
1846 {
1847         struct ib_mad_qp_info *qp_info;
1848         struct ib_mad_private_header *mad_priv_hdr;
1849         struct ib_mad_private *recv, *response = NULL;
1850         struct ib_mad_list_head *mad_list;
1851         struct ib_mad_agent_private *mad_agent;
1852         int port_num;
1853
1854         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1855         qp_info = mad_list->mad_queue->qp_info;
1856         dequeue_mad(mad_list);
1857
1858         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1859                                     mad_list);
1860         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1861         ib_dma_unmap_single(port_priv->device,
1862                             recv->header.mapping,
1863                             sizeof(struct ib_mad_private) -
1864                               sizeof(struct ib_mad_private_header),
1865                             DMA_FROM_DEVICE);
1866
1867         /* Setup MAD receive work completion from "normal" work completion */
1868         recv->header.wc = *wc;
1869         recv->header.recv_wc.wc = &recv->header.wc;
1870         recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1871         recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1872         recv->header.recv_wc.recv_buf.grh = &recv->grh;
1873
1874         if (atomic_read(&qp_info->snoop_count))
1875                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1876
1877         /* Validate MAD */
1878         if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1879                 goto out;
1880
1881         response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1882         if (!response) {
1883                 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1884                        "for response buffer\n");
1885                 goto out;
1886         }
1887
1888         if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1889                 port_num = wc->port_num;
1890         else
1891                 port_num = port_priv->port_num;
1892
1893         if (recv->mad.mad.mad_hdr.mgmt_class ==
1894             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1895                 enum smi_forward_action retsmi;
1896
1897                 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1898                                            port_priv->device->node_type,
1899                                            port_num,
1900                                            port_priv->device->phys_port_cnt) ==
1901                                            IB_SMI_DISCARD)
1902                         goto out;
1903
1904                 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1905                 if (retsmi == IB_SMI_LOCAL)
1906                         goto local;
1907
1908                 if (retsmi == IB_SMI_SEND) { /* don't forward */
1909                         if (smi_handle_dr_smp_send(&recv->mad.smp,
1910                                                    port_priv->device->node_type,
1911                                                    port_num) == IB_SMI_DISCARD)
1912                                 goto out;
1913
1914                         if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1915                                 goto out;
1916                 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1917                         /* forward case for switches */
1918                         memcpy(response, recv, sizeof(*response));
1919                         response->header.recv_wc.wc = &response->header.wc;
1920                         response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1921                         response->header.recv_wc.recv_buf.grh = &response->grh;
1922
1923                         agent_send_response(&response->mad.mad,
1924                                             &response->grh, wc,
1925                                             port_priv->device,
1926                                             smi_get_fwd_port(&recv->mad.smp),
1927                                             qp_info->qp->qp_num);
1928
1929                         goto out;
1930                 }
1931         }
1932
1933 local:
1934         /* Give driver "right of first refusal" on incoming MAD */
1935         if (port_priv->device->process_mad) {
1936                 int ret;
1937
1938                 ret = port_priv->device->process_mad(port_priv->device, 0,
1939                                                      port_priv->port_num,
1940                                                      wc, &recv->grh,
1941                                                      &recv->mad.mad,
1942                                                      &response->mad.mad);
1943                 if (ret & IB_MAD_RESULT_SUCCESS) {
1944                         if (ret & IB_MAD_RESULT_CONSUMED)
1945                                 goto out;
1946                         if (ret & IB_MAD_RESULT_REPLY) {
1947                                 agent_send_response(&response->mad.mad,
1948                                                     &recv->grh, wc,
1949                                                     port_priv->device,
1950                                                     port_num,
1951                                                     qp_info->qp->qp_num);
1952                                 goto out;
1953                         }
1954                 }
1955         }
1956
1957         mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1958         if (mad_agent) {
1959                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1960                 /*
1961                  * recv is freed up in error cases in ib_mad_complete_recv
1962                  * or via recv_handler in ib_mad_complete_recv()
1963                  */
1964                 recv = NULL;
1965         }
1966
1967 out:
1968         /* Post another receive request for this QP */
1969         if (response) {
1970                 ib_mad_post_receive_mads(qp_info, response);
1971                 if (recv)
1972                         kmem_cache_free(ib_mad_cache, recv);
1973         } else
1974                 ib_mad_post_receive_mads(qp_info, recv);
1975 }
1976
1977 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1978 {
1979         struct ib_mad_send_wr_private *mad_send_wr;
1980         unsigned long delay;
1981
1982         if (list_empty(&mad_agent_priv->wait_list)) {
1983                 cancel_delayed_work(&mad_agent_priv->timed_work);
1984         } else {
1985                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1986                                          struct ib_mad_send_wr_private,
1987                                          agent_list);
1988
1989                 if (time_after(mad_agent_priv->timeout,
1990                                mad_send_wr->timeout)) {
1991                         mad_agent_priv->timeout = mad_send_wr->timeout;
1992                         cancel_delayed_work(&mad_agent_priv->timed_work);
1993                         delay = mad_send_wr->timeout - jiffies;
1994                         if ((long)delay <= 0)
1995                                 delay = 1;
1996                         queue_delayed_work(mad_agent_priv->qp_info->
1997                                            port_priv->wq,
1998                                            &mad_agent_priv->timed_work, delay);
1999                 }
2000         }
2001 }
2002
2003 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2004 {
2005         struct ib_mad_agent_private *mad_agent_priv;
2006         struct ib_mad_send_wr_private *temp_mad_send_wr;
2007         struct list_head *list_item;
2008         unsigned long delay;
2009
2010         mad_agent_priv = mad_send_wr->mad_agent_priv;
2011         list_del(&mad_send_wr->agent_list);
2012
2013         delay = mad_send_wr->timeout;
2014         mad_send_wr->timeout += jiffies;
2015
2016         if (delay) {
2017                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2018                         temp_mad_send_wr = list_entry(list_item,
2019                                                 struct ib_mad_send_wr_private,
2020                                                 agent_list);
2021                         if (time_after(mad_send_wr->timeout,
2022                                        temp_mad_send_wr->timeout))
2023                                 break;
2024                 }
2025         }
2026         else
2027                 list_item = &mad_agent_priv->wait_list;
2028         list_add(&mad_send_wr->agent_list, list_item);
2029
2030         /* Reschedule a work item if we have a shorter timeout */
2031         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2032                 cancel_delayed_work(&mad_agent_priv->timed_work);
2033                 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2034                                    &mad_agent_priv->timed_work, delay);
2035         }
2036 }
2037
2038 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2039                           int timeout_ms)
2040 {
2041         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2042         wait_for_response(mad_send_wr);
2043 }
2044
2045 /*
2046  * Process a send work completion
2047  */
2048 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2049                              struct ib_mad_send_wc *mad_send_wc)
2050 {
2051         struct ib_mad_agent_private     *mad_agent_priv;
2052         unsigned long                   flags;
2053         int                             ret;
2054
2055         mad_agent_priv = mad_send_wr->mad_agent_priv;
2056         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2057         if (mad_agent_priv->agent.rmpp_version) {
2058                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2059                 if (ret == IB_RMPP_RESULT_CONSUMED)
2060                         goto done;
2061         } else
2062                 ret = IB_RMPP_RESULT_UNHANDLED;
2063
2064         if (mad_send_wc->status != IB_WC_SUCCESS &&
2065             mad_send_wr->status == IB_WC_SUCCESS) {
2066                 mad_send_wr->status = mad_send_wc->status;
2067                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2068         }
2069
2070         if (--mad_send_wr->refcount > 0) {
2071                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2072                     mad_send_wr->status == IB_WC_SUCCESS) {
2073                         wait_for_response(mad_send_wr);
2074                 }
2075                 goto done;
2076         }
2077
2078         /* Remove send from MAD agent and notify client of completion */
2079         list_del(&mad_send_wr->agent_list);
2080         adjust_timeout(mad_agent_priv);
2081         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2082
2083         if (mad_send_wr->status != IB_WC_SUCCESS )
2084                 mad_send_wc->status = mad_send_wr->status;
2085         if (ret == IB_RMPP_RESULT_INTERNAL)
2086                 ib_rmpp_send_handler(mad_send_wc);
2087         else
2088                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2089                                                    mad_send_wc);
2090
2091         /* Release reference on agent taken when sending */
2092         deref_mad_agent(mad_agent_priv);
2093         return;
2094 done:
2095         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2096 }
2097
2098 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2099                                      struct ib_wc *wc)
2100 {
2101         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2102         struct ib_mad_list_head         *mad_list;
2103         struct ib_mad_qp_info           *qp_info;
2104         struct ib_mad_queue             *send_queue;
2105         struct ib_send_wr               *bad_send_wr;
2106         struct ib_mad_send_wc           mad_send_wc;
2107         unsigned long flags;
2108         int ret;
2109
2110         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2111         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2112                                    mad_list);
2113         send_queue = mad_list->mad_queue;
2114         qp_info = send_queue->qp_info;
2115
2116 retry:
2117         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2118                             mad_send_wr->header_mapping,
2119                             mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2120         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2121                             mad_send_wr->payload_mapping,
2122                             mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2123         queued_send_wr = NULL;
2124         spin_lock_irqsave(&send_queue->lock, flags);
2125         list_del(&mad_list->list);
2126
2127         /* Move queued send to the send queue */
2128         if (send_queue->count-- > send_queue->max_active) {
2129                 mad_list = container_of(qp_info->overflow_list.next,
2130                                         struct ib_mad_list_head, list);
2131                 queued_send_wr = container_of(mad_list,
2132                                         struct ib_mad_send_wr_private,
2133                                         mad_list);
2134                 list_move_tail(&mad_list->list, &send_queue->list);
2135         }
2136         spin_unlock_irqrestore(&send_queue->lock, flags);
2137
2138         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2139         mad_send_wc.status = wc->status;
2140         mad_send_wc.vendor_err = wc->vendor_err;
2141         if (atomic_read(&qp_info->snoop_count))
2142                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2143                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2144         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2145
2146         if (queued_send_wr) {
2147                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2148                                    &bad_send_wr);
2149                 if (ret) {
2150                         printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2151                         mad_send_wr = queued_send_wr;
2152                         wc->status = IB_WC_LOC_QP_OP_ERR;
2153                         goto retry;
2154                 }
2155         }
2156 }
2157
2158 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2159 {
2160         struct ib_mad_send_wr_private *mad_send_wr;
2161         struct ib_mad_list_head *mad_list;
2162         unsigned long flags;
2163
2164         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2165         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2166                 mad_send_wr = container_of(mad_list,
2167                                            struct ib_mad_send_wr_private,
2168                                            mad_list);
2169                 mad_send_wr->retry = 1;
2170         }
2171         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2172 }
2173
2174 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2175                               struct ib_wc *wc)
2176 {
2177         struct ib_mad_list_head *mad_list;
2178         struct ib_mad_qp_info *qp_info;
2179         struct ib_mad_send_wr_private *mad_send_wr;
2180         int ret;
2181
2182         /* Determine if failure was a send or receive */
2183         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2184         qp_info = mad_list->mad_queue->qp_info;
2185         if (mad_list->mad_queue == &qp_info->recv_queue)
2186                 /*
2187                  * Receive errors indicate that the QP has entered the error
2188                  * state - error handling/shutdown code will cleanup
2189                  */
2190                 return;
2191
2192         /*
2193          * Send errors will transition the QP to SQE - move
2194          * QP to RTS and repost flushed work requests
2195          */
2196         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2197                                    mad_list);
2198         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2199                 if (mad_send_wr->retry) {
2200                         /* Repost send */
2201                         struct ib_send_wr *bad_send_wr;
2202
2203                         mad_send_wr->retry = 0;
2204                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2205                                         &bad_send_wr);
2206                         if (ret)
2207                                 ib_mad_send_done_handler(port_priv, wc);
2208                 } else
2209                         ib_mad_send_done_handler(port_priv, wc);
2210         } else {
2211                 struct ib_qp_attr *attr;
2212
2213                 /* Transition QP to RTS and fail offending send */
2214                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2215                 if (attr) {
2216                         attr->qp_state = IB_QPS_RTS;
2217                         attr->cur_qp_state = IB_QPS_SQE;
2218                         ret = ib_modify_qp(qp_info->qp, attr,
2219                                            IB_QP_STATE | IB_QP_CUR_STATE);
2220                         kfree(attr);
2221                         if (ret)
2222                                 printk(KERN_ERR PFX "mad_error_handler - "
2223                                        "ib_modify_qp to RTS : %d\n", ret);
2224                         else
2225                                 mark_sends_for_retry(qp_info);
2226                 }
2227                 ib_mad_send_done_handler(port_priv, wc);
2228         }
2229 }
2230
2231 /*
2232  * IB MAD completion callback
2233  */
2234 static void ib_mad_completion_handler(struct work_struct *work)
2235 {
2236         struct ib_mad_port_private *port_priv;
2237         struct ib_wc wc;
2238
2239         port_priv = container_of(work, struct ib_mad_port_private, work);
2240         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2241
2242         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2243                 if (wc.status == IB_WC_SUCCESS) {
2244                         switch (wc.opcode) {
2245                         case IB_WC_SEND:
2246                                 ib_mad_send_done_handler(port_priv, &wc);
2247                                 break;
2248                         case IB_WC_RECV:
2249                                 ib_mad_recv_done_handler(port_priv, &wc);
2250                                 break;
2251                         default:
2252                                 BUG_ON(1);
2253                                 break;
2254                         }
2255                 } else
2256                         mad_error_handler(port_priv, &wc);
2257         }
2258 }
2259
2260 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2261 {
2262         unsigned long flags;
2263         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2264         struct ib_mad_send_wc mad_send_wc;
2265         struct list_head cancel_list;
2266
2267         INIT_LIST_HEAD(&cancel_list);
2268
2269         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2270         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2271                                  &mad_agent_priv->send_list, agent_list) {
2272                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2273                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2274                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2275                 }
2276         }
2277
2278         /* Empty wait list to prevent receives from finding a request */
2279         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2280         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2281
2282         /* Report all cancelled requests */
2283         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2284         mad_send_wc.vendor_err = 0;
2285
2286         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2287                                  &cancel_list, agent_list) {
2288                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2289                 list_del(&mad_send_wr->agent_list);
2290                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2291                                                    &mad_send_wc);
2292                 atomic_dec(&mad_agent_priv->refcount);
2293         }
2294 }
2295
2296 static struct ib_mad_send_wr_private*
2297 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2298              struct ib_mad_send_buf *send_buf)
2299 {
2300         struct ib_mad_send_wr_private *mad_send_wr;
2301
2302         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2303                             agent_list) {
2304                 if (&mad_send_wr->send_buf == send_buf)
2305                         return mad_send_wr;
2306         }
2307
2308         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2309                             agent_list) {
2310                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2311                     &mad_send_wr->send_buf == send_buf)
2312                         return mad_send_wr;
2313         }
2314         return NULL;
2315 }
2316
2317 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2318                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2319 {
2320         struct ib_mad_agent_private *mad_agent_priv;
2321         struct ib_mad_send_wr_private *mad_send_wr;
2322         unsigned long flags;
2323         int active;
2324
2325         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2326                                       agent);
2327         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2328         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2329         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2330                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2331                 return -EINVAL;
2332         }
2333
2334         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2335         if (!timeout_ms) {
2336                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2337                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2338         }
2339
2340         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2341         if (active)
2342                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2343         else
2344                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2345
2346         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2347         return 0;
2348 }
2349 EXPORT_SYMBOL(ib_modify_mad);
2350
2351 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2352                    struct ib_mad_send_buf *send_buf)
2353 {
2354         ib_modify_mad(mad_agent, send_buf, 0);
2355 }
2356 EXPORT_SYMBOL(ib_cancel_mad);
2357
2358 static void local_completions(struct work_struct *work)
2359 {
2360         struct ib_mad_agent_private *mad_agent_priv;
2361         struct ib_mad_local_private *local;
2362         struct ib_mad_agent_private *recv_mad_agent;
2363         unsigned long flags;
2364         int recv = 0;
2365         struct ib_wc wc;
2366         struct ib_mad_send_wc mad_send_wc;
2367
2368         mad_agent_priv =
2369                 container_of(work, struct ib_mad_agent_private, local_work);
2370
2371         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2372         while (!list_empty(&mad_agent_priv->local_list)) {
2373                 local = list_entry(mad_agent_priv->local_list.next,
2374                                    struct ib_mad_local_private,
2375                                    completion_list);
2376                 list_del(&local->completion_list);
2377                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2378                 if (local->mad_priv) {
2379                         recv_mad_agent = local->recv_mad_agent;
2380                         if (!recv_mad_agent) {
2381                                 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2382                                 goto local_send_completion;
2383                         }
2384
2385                         recv = 1;
2386                         /*
2387                          * Defined behavior is to complete response
2388                          * before request
2389                          */
2390                         build_smp_wc(recv_mad_agent->agent.qp,
2391                                      (unsigned long) local->mad_send_wr,
2392                                      be16_to_cpu(IB_LID_PERMISSIVE),
2393                                      0, recv_mad_agent->agent.port_num, &wc);
2394
2395                         local->mad_priv->header.recv_wc.wc = &wc;
2396                         local->mad_priv->header.recv_wc.mad_len =
2397                                                 sizeof(struct ib_mad);
2398                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2399                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2400                                  &local->mad_priv->header.recv_wc.rmpp_list);
2401                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2402                         local->mad_priv->header.recv_wc.recv_buf.mad =
2403                                                 &local->mad_priv->mad.mad;
2404                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2405                                 snoop_recv(recv_mad_agent->qp_info,
2406                                           &local->mad_priv->header.recv_wc,
2407                                            IB_MAD_SNOOP_RECVS);
2408                         recv_mad_agent->agent.recv_handler(
2409                                                 &recv_mad_agent->agent,
2410                                                 &local->mad_priv->header.recv_wc);
2411                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2412                         atomic_dec(&recv_mad_agent->refcount);
2413                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2414                 }
2415
2416 local_send_completion:
2417                 /* Complete send */
2418                 mad_send_wc.status = IB_WC_SUCCESS;
2419                 mad_send_wc.vendor_err = 0;
2420                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2421                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2422                         snoop_send(mad_agent_priv->qp_info,
2423                                    &local->mad_send_wr->send_buf,
2424                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2425                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2426                                                    &mad_send_wc);
2427
2428                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2429                 atomic_dec(&mad_agent_priv->refcount);
2430                 if (!recv)
2431                         kmem_cache_free(ib_mad_cache, local->mad_priv);
2432                 kfree(local);
2433         }
2434         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2435 }
2436
2437 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2438 {
2439         int ret;
2440
2441         if (!mad_send_wr->retries_left)
2442                 return -ETIMEDOUT;
2443
2444         mad_send_wr->retries_left--;
2445         mad_send_wr->send_buf.retries++;
2446
2447         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2448
2449         if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2450                 ret = ib_retry_rmpp(mad_send_wr);
2451                 switch (ret) {
2452                 case IB_RMPP_RESULT_UNHANDLED:
2453                         ret = ib_send_mad(mad_send_wr);
2454                         break;
2455                 case IB_RMPP_RESULT_CONSUMED:
2456                         ret = 0;
2457                         break;
2458                 default:
2459                         ret = -ECOMM;
2460                         break;
2461                 }
2462         } else
2463                 ret = ib_send_mad(mad_send_wr);
2464
2465         if (!ret) {
2466                 mad_send_wr->refcount++;
2467                 list_add_tail(&mad_send_wr->agent_list,
2468                               &mad_send_wr->mad_agent_priv->send_list);
2469         }
2470         return ret;
2471 }
2472
2473 static void timeout_sends(struct work_struct *work)
2474 {
2475         struct ib_mad_agent_private *mad_agent_priv;
2476         struct ib_mad_send_wr_private *mad_send_wr;
2477         struct ib_mad_send_wc mad_send_wc;
2478         unsigned long flags, delay;
2479
2480         mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2481                                       timed_work.work);
2482         mad_send_wc.vendor_err = 0;
2483
2484         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2485         while (!list_empty(&mad_agent_priv->wait_list)) {
2486                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2487                                          struct ib_mad_send_wr_private,
2488                                          agent_list);
2489
2490                 if (time_after(mad_send_wr->timeout, jiffies)) {
2491                         delay = mad_send_wr->timeout - jiffies;
2492                         if ((long)delay <= 0)
2493                                 delay = 1;
2494                         queue_delayed_work(mad_agent_priv->qp_info->
2495                                            port_priv->wq,
2496                                            &mad_agent_priv->timed_work, delay);
2497                         break;
2498                 }
2499
2500                 list_del(&mad_send_wr->agent_list);
2501                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2502                     !retry_send(mad_send_wr))
2503                         continue;
2504
2505                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2506
2507                 if (mad_send_wr->status == IB_WC_SUCCESS)
2508                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2509                 else
2510                         mad_send_wc.status = mad_send_wr->status;
2511                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2512                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2513                                                    &mad_send_wc);
2514
2515                 atomic_dec(&mad_agent_priv->refcount);
2516                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2517         }
2518         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2519 }
2520
2521 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2522 {
2523         struct ib_mad_port_private *port_priv = cq->cq_context;
2524         unsigned long flags;
2525
2526         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2527         if (!list_empty(&port_priv->port_list))
2528                 queue_work(port_priv->wq, &port_priv->work);
2529         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2530 }
2531
2532 /*
2533  * Allocate receive MADs and post receive WRs for them
2534  */
2535 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2536                                     struct ib_mad_private *mad)
2537 {
2538         unsigned long flags;
2539         int post, ret;
2540         struct ib_mad_private *mad_priv;
2541         struct ib_sge sg_list;
2542         struct ib_recv_wr recv_wr, *bad_recv_wr;
2543         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2544
2545         /* Initialize common scatter list fields */
2546         sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2547         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2548
2549         /* Initialize common receive WR fields */
2550         recv_wr.next = NULL;
2551         recv_wr.sg_list = &sg_list;
2552         recv_wr.num_sge = 1;
2553
2554         do {
2555                 /* Allocate and map receive buffer */
2556                 if (mad) {
2557                         mad_priv = mad;
2558                         mad = NULL;
2559                 } else {
2560                         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2561                         if (!mad_priv) {
2562                                 printk(KERN_ERR PFX "No memory for receive buffer\n");
2563                                 ret = -ENOMEM;
2564                                 break;
2565                         }
2566                 }
2567                 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2568                                                  &mad_priv->grh,
2569                                                  sizeof *mad_priv -
2570                                                    sizeof mad_priv->header,
2571                                                  DMA_FROM_DEVICE);
2572                 mad_priv->header.mapping = sg_list.addr;
2573                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2574                 mad_priv->header.mad_list.mad_queue = recv_queue;
2575
2576                 /* Post receive WR */
2577                 spin_lock_irqsave(&recv_queue->lock, flags);
2578                 post = (++recv_queue->count < recv_queue->max_active);
2579                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2580                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2581                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2582                 if (ret) {
2583                         spin_lock_irqsave(&recv_queue->lock, flags);
2584                         list_del(&mad_priv->header.mad_list.list);
2585                         recv_queue->count--;
2586                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2587                         ib_dma_unmap_single(qp_info->port_priv->device,
2588                                             mad_priv->header.mapping,
2589                                             sizeof *mad_priv -
2590                                               sizeof mad_priv->header,
2591                                             DMA_FROM_DEVICE);
2592                         kmem_cache_free(ib_mad_cache, mad_priv);
2593                         printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2594                         break;
2595                 }
2596         } while (post);
2597
2598         return ret;
2599 }
2600
2601 /*
2602  * Return all the posted receive MADs
2603  */
2604 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2605 {
2606         struct ib_mad_private_header *mad_priv_hdr;
2607         struct ib_mad_private *recv;
2608         struct ib_mad_list_head *mad_list;
2609
2610         while (!list_empty(&qp_info->recv_queue.list)) {
2611
2612                 mad_list = list_entry(qp_info->recv_queue.list.next,
2613                                       struct ib_mad_list_head, list);
2614                 mad_priv_hdr = container_of(mad_list,
2615                                             struct ib_mad_private_header,
2616                                             mad_list);
2617                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2618                                     header);
2619
2620                 /* Remove from posted receive MAD list */
2621                 list_del(&mad_list->list);
2622
2623                 ib_dma_unmap_single(qp_info->port_priv->device,
2624                                     recv->header.mapping,
2625                                     sizeof(struct ib_mad_private) -
2626                                       sizeof(struct ib_mad_private_header),
2627                                     DMA_FROM_DEVICE);
2628                 kmem_cache_free(ib_mad_cache, recv);
2629         }
2630
2631         qp_info->recv_queue.count = 0;
2632 }
2633
2634 /*
2635  * Start the port
2636  */
2637 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2638 {
2639         int ret, i;
2640         struct ib_qp_attr *attr;
2641         struct ib_qp *qp;
2642
2643         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2644         if (!attr) {
2645                 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2646                 return -ENOMEM;
2647         }
2648
2649         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2650                 qp = port_priv->qp_info[i].qp;
2651                 /*
2652                  * PKey index for QP1 is irrelevant but
2653                  * one is needed for the Reset to Init transition
2654                  */
2655                 attr->qp_state = IB_QPS_INIT;
2656                 attr->pkey_index = 0;
2657                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2658                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2659                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2660                 if (ret) {
2661                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2662                                "INIT: %d\n", i, ret);
2663                         goto out;
2664                 }
2665
2666                 attr->qp_state = IB_QPS_RTR;
2667                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2668                 if (ret) {
2669                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2670                                "RTR: %d\n", i, ret);
2671                         goto out;
2672                 }
2673
2674                 attr->qp_state = IB_QPS_RTS;
2675                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2676                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2677                 if (ret) {
2678                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2679                                "RTS: %d\n", i, ret);
2680                         goto out;
2681                 }
2682         }
2683
2684         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2685         if (ret) {
2686                 printk(KERN_ERR PFX "Failed to request completion "
2687                        "notification: %d\n", ret);
2688                 goto out;
2689         }
2690
2691         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2692                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2693                 if (ret) {
2694                         printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2695                         goto out;
2696                 }
2697         }
2698 out:
2699         kfree(attr);
2700         return ret;
2701 }
2702
2703 static void qp_event_handler(struct ib_event *event, void *qp_context)
2704 {
2705         struct ib_mad_qp_info   *qp_info = qp_context;
2706
2707         /* It's worse than that! He's dead, Jim! */
2708         printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2709                 event->event, qp_info->qp->qp_num);
2710 }
2711
2712 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2713                            struct ib_mad_queue *mad_queue)
2714 {
2715         mad_queue->qp_info = qp_info;
2716         mad_queue->count = 0;
2717         spin_lock_init(&mad_queue->lock);
2718         INIT_LIST_HEAD(&mad_queue->list);
2719 }
2720
2721 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2722                         struct ib_mad_qp_info *qp_info)
2723 {
2724         qp_info->port_priv = port_priv;
2725         init_mad_queue(qp_info, &qp_info->send_queue);
2726         init_mad_queue(qp_info, &qp_info->recv_queue);
2727         INIT_LIST_HEAD(&qp_info->overflow_list);
2728         spin_lock_init(&qp_info->snoop_lock);
2729         qp_info->snoop_table = NULL;
2730         qp_info->snoop_table_size = 0;
2731         atomic_set(&qp_info->snoop_count, 0);
2732 }
2733
2734 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2735                          enum ib_qp_type qp_type)
2736 {
2737         struct ib_qp_init_attr  qp_init_attr;
2738         int ret;
2739
2740         memset(&qp_init_attr, 0, sizeof qp_init_attr);
2741         qp_init_attr.send_cq = qp_info->port_priv->cq;
2742         qp_init_attr.recv_cq = qp_info->port_priv->cq;
2743         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2744         qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2745         qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2746         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2747         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2748         qp_init_attr.qp_type = qp_type;
2749         qp_init_attr.port_num = qp_info->port_priv->port_num;
2750         qp_init_attr.qp_context = qp_info;
2751         qp_init_attr.event_handler = qp_event_handler;
2752         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2753         if (IS_ERR(qp_info->qp)) {
2754                 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2755                        get_spl_qp_index(qp_type));
2756                 ret = PTR_ERR(qp_info->qp);
2757                 goto error;
2758         }
2759         /* Use minimum queue sizes unless the CQ is resized */
2760         qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2761         qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2762         return 0;
2763
2764 error:
2765         return ret;
2766 }
2767
2768 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2769 {
2770         ib_destroy_qp(qp_info->qp);
2771         kfree(qp_info->snoop_table);
2772 }
2773
2774 /*
2775  * Open the port
2776  * Create the QP, PD, MR, and CQ if needed
2777  */
2778 static int ib_mad_port_open(struct ib_device *device,
2779                             int port_num)
2780 {
2781         int ret, cq_size;
2782         struct ib_mad_port_private *port_priv;
2783         unsigned long flags;
2784         char name[sizeof "ib_mad123"];
2785
2786         /* Create new device info */
2787         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2788         if (!port_priv) {
2789                 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2790                 return -ENOMEM;
2791         }
2792
2793         port_priv->device = device;
2794         port_priv->port_num = port_num;
2795         spin_lock_init(&port_priv->reg_lock);
2796         INIT_LIST_HEAD(&port_priv->agent_list);
2797         init_mad_qp(port_priv, &port_priv->qp_info[0]);
2798         init_mad_qp(port_priv, &port_priv->qp_info[1]);
2799
2800         cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2801         port_priv->cq = ib_create_cq(port_priv->device,
2802                                      ib_mad_thread_completion_handler,
2803                                      NULL, port_priv, cq_size, 0);
2804         if (IS_ERR(port_priv->cq)) {
2805                 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2806                 ret = PTR_ERR(port_priv->cq);
2807                 goto error3;
2808         }
2809
2810         port_priv->pd = ib_alloc_pd(device);
2811         if (IS_ERR(port_priv->pd)) {
2812                 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2813                 ret = PTR_ERR(port_priv->pd);
2814                 goto error4;
2815         }
2816
2817         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2818         if (IS_ERR(port_priv->mr)) {
2819                 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2820                 ret = PTR_ERR(port_priv->mr);
2821                 goto error5;
2822         }
2823
2824         ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2825         if (ret)
2826                 goto error6;
2827         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2828         if (ret)
2829                 goto error7;
2830
2831         snprintf(name, sizeof name, "ib_mad%d", port_num);
2832         port_priv->wq = create_singlethread_workqueue(name);
2833         if (!port_priv->wq) {
2834                 ret = -ENOMEM;
2835                 goto error8;
2836         }
2837         INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2838
2839         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2840         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2841         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2842
2843         ret = ib_mad_port_start(port_priv);
2844         if (ret) {
2845                 printk(KERN_ERR PFX "Couldn't start port\n");
2846                 goto error9;
2847         }
2848
2849         return 0;
2850
2851 error9:
2852         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2853         list_del_init(&port_priv->port_list);
2854         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2855
2856         destroy_workqueue(port_priv->wq);
2857 error8:
2858         destroy_mad_qp(&port_priv->qp_info[1]);
2859 error7:
2860         destroy_mad_qp(&port_priv->qp_info[0]);
2861 error6:
2862         ib_dereg_mr(port_priv->mr);
2863 error5:
2864         ib_dealloc_pd(port_priv->pd);
2865 error4:
2866         ib_destroy_cq(port_priv->cq);
2867         cleanup_recv_queue(&port_priv->qp_info[1]);
2868         cleanup_recv_queue(&port_priv->qp_info[0]);
2869 error3:
2870         kfree(port_priv);
2871
2872         return ret;
2873 }
2874
2875 /*
2876  * Close the port
2877  * If there are no classes using the port, free the port
2878  * resources (CQ, MR, PD, QP) and remove the port's info structure
2879  */
2880 static int ib_mad_port_close(struct ib_device *device, int port_num)
2881 {
2882         struct ib_mad_port_private *port_priv;
2883         unsigned long flags;
2884
2885         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2886         port_priv = __ib_get_mad_port(device, port_num);
2887         if (port_priv == NULL) {
2888                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2889                 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2890                 return -ENODEV;
2891         }
2892         list_del_init(&port_priv->port_list);
2893         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2894
2895         destroy_workqueue(port_priv->wq);
2896         destroy_mad_qp(&port_priv->qp_info[1]);
2897         destroy_mad_qp(&port_priv->qp_info[0]);
2898         ib_dereg_mr(port_priv->mr);
2899         ib_dealloc_pd(port_priv->pd);
2900         ib_destroy_cq(port_priv->cq);
2901         cleanup_recv_queue(&port_priv->qp_info[1]);
2902         cleanup_recv_queue(&port_priv->qp_info[0]);
2903         /* XXX: Handle deallocation of MAD registration tables */
2904
2905         kfree(port_priv);
2906
2907         return 0;
2908 }
2909
2910 static void ib_mad_init_device(struct ib_device *device)
2911 {
2912         int start, end, i;
2913
2914         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2915                 return;
2916
2917         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2918                 start = 0;
2919                 end   = 0;
2920         } else {
2921                 start = 1;
2922                 end   = device->phys_port_cnt;
2923         }
2924
2925         for (i = start; i <= end; i++) {
2926                 if (ib_mad_port_open(device, i)) {
2927                         printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2928                                device->name, i);
2929                         goto error;
2930                 }
2931                 if (ib_agent_port_open(device, i)) {
2932                         printk(KERN_ERR PFX "Couldn't open %s port %d "
2933                                "for agents\n",
2934                                device->name, i);
2935                         goto error_agent;
2936                 }
2937         }
2938         return;
2939
2940 error_agent:
2941         if (ib_mad_port_close(device, i))
2942                 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2943                        device->name, i);
2944
2945 error:
2946         i--;
2947
2948         while (i >= start) {
2949                 if (ib_agent_port_close(device, i))
2950                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2951                                "for agents\n",
2952                                device->name, i);
2953                 if (ib_mad_port_close(device, i))
2954                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2955                                device->name, i);
2956                 i--;
2957         }
2958 }
2959
2960 static void ib_mad_remove_device(struct ib_device *device)
2961 {
2962         int i, num_ports, cur_port;
2963
2964         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2965                 num_ports = 1;
2966                 cur_port = 0;
2967         } else {
2968                 num_ports = device->phys_port_cnt;
2969                 cur_port = 1;
2970         }
2971         for (i = 0; i < num_ports; i++, cur_port++) {
2972                 if (ib_agent_port_close(device, cur_port))
2973                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2974                                "for agents\n",
2975                                device->name, cur_port);
2976                 if (ib_mad_port_close(device, cur_port))
2977                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2978                                device->name, cur_port);
2979         }
2980 }
2981
2982 static struct ib_client mad_client = {
2983         .name   = "mad",
2984         .add = ib_mad_init_device,
2985         .remove = ib_mad_remove_device
2986 };
2987
2988 static int __init ib_mad_init_module(void)
2989 {
2990         int ret;
2991
2992         spin_lock_init(&ib_mad_port_list_lock);
2993
2994         ib_mad_cache = kmem_cache_create("ib_mad",
2995                                          sizeof(struct ib_mad_private),
2996                                          0,
2997                                          SLAB_HWCACHE_ALIGN,
2998                                          NULL);
2999         if (!ib_mad_cache) {
3000                 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3001                 ret = -ENOMEM;
3002                 goto error1;
3003         }
3004
3005         INIT_LIST_HEAD(&ib_mad_port_list);
3006
3007         if (ib_register_client(&mad_client)) {
3008                 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3009                 ret = -EINVAL;
3010                 goto error2;
3011         }
3012
3013         return 0;
3014
3015 error2:
3016         kmem_cache_destroy(ib_mad_cache);
3017 error1:
3018         return ret;
3019 }
3020
3021 static void __exit ib_mad_cleanup_module(void)
3022 {
3023         ib_unregister_client(&mad_client);
3024         kmem_cache_destroy(ib_mad_cache);
3025 }
3026
3027 module_init(ib_mad_init_module);
3028 module_exit(ib_mad_cleanup_module);
3029