2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
40 #include <linux/dma-mapping.h>
44 #include <rdma/ib_smi.h>
47 #include "agent_priv.h"
51 spinlock_t ib_agent_port_list_lock;
52 static LIST_HEAD(ib_agent_port_list);
55 * Caller must hold ib_agent_port_list_lock
57 static inline struct ib_agent_port_private *
58 __ib_get_agent_port(struct ib_device *device, int port_num,
59 struct ib_mad_agent *mad_agent)
61 struct ib_agent_port_private *entry;
63 BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */
66 list_for_each_entry(entry, &ib_agent_port_list, port_list) {
67 if (entry->smp_agent->device == device &&
68 entry->port_num == port_num)
72 list_for_each_entry(entry, &ib_agent_port_list, port_list) {
73 if ((entry->smp_agent == mad_agent) ||
74 (entry->perf_mgmt_agent == mad_agent))
81 static inline struct ib_agent_port_private *
82 ib_get_agent_port(struct ib_device *device, int port_num,
83 struct ib_mad_agent *mad_agent)
85 struct ib_agent_port_private *entry;
88 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
89 entry = __ib_get_agent_port(device, port_num, mad_agent);
90 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
95 int smi_check_local_dr_smp(struct ib_smp *smp,
96 struct ib_device *device,
99 struct ib_agent_port_private *port_priv;
101 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
103 port_priv = ib_get_agent_port(device, port_num, NULL);
105 printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d "
107 device->name, port_num);
111 return smi_check_local_smp(port_priv->smp_agent, smp);
114 static int agent_mad_send(struct ib_mad_agent *mad_agent,
115 struct ib_agent_port_private *port_priv,
116 struct ib_mad_private *mad_priv,
120 struct ib_agent_send_wr *agent_send_wr;
121 struct ib_sge gather_list;
122 struct ib_send_wr send_wr;
123 struct ib_send_wr *bad_send_wr;
124 struct ib_ah_attr ah_attr;
128 agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
131 agent_send_wr->mad = mad_priv;
133 gather_list.addr = dma_map_single(mad_agent->device->dma_device,
135 sizeof(mad_priv->mad),
137 gather_list.length = sizeof(mad_priv->mad);
138 gather_list.lkey = mad_agent->mr->lkey;
141 send_wr.opcode = IB_WR_SEND;
142 send_wr.sg_list = &gather_list;
144 send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */
145 send_wr.wr.ud.timeout_ms = 0;
146 send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
148 ah_attr.dlid = wc->slid;
149 ah_attr.port_num = mad_agent->port_num;
150 ah_attr.src_path_bits = wc->dlid_path_bits;
152 ah_attr.static_rate = 0;
153 ah_attr.ah_flags = 0; /* No GRH */
154 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
155 if (wc->wc_flags & IB_WC_GRH) {
156 ah_attr.ah_flags = IB_AH_GRH;
157 /* Should sgid be looked up ? */
158 ah_attr.grh.sgid_index = 0;
159 ah_attr.grh.hop_limit = grh->hop_limit;
160 ah_attr.grh.flow_label = be32_to_cpu(
161 grh->version_tclass_flow) & 0xfffff;
162 ah_attr.grh.traffic_class = (be32_to_cpu(
163 grh->version_tclass_flow) >> 20) & 0xff;
164 memcpy(ah_attr.grh.dgid.raw,
166 sizeof(ah_attr.grh.dgid));
170 agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
171 if (IS_ERR(agent_send_wr->ah)) {
172 printk(KERN_ERR SPFX "No memory for address handle\n");
173 kfree(agent_send_wr);
177 send_wr.wr.ud.ah = agent_send_wr->ah;
178 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
179 send_wr.wr.ud.pkey_index = wc->pkey_index;
180 send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
181 } else { /* for SMPs */
182 send_wr.wr.ud.pkey_index = 0;
183 send_wr.wr.ud.remote_qkey = 0;
185 send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr;
186 send_wr.wr_id = (unsigned long)agent_send_wr;
188 pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr);
191 spin_lock_irqsave(&port_priv->send_list_lock, flags);
192 if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) {
193 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
194 dma_unmap_single(mad_agent->device->dma_device,
195 pci_unmap_addr(agent_send_wr, mapping),
196 sizeof(mad_priv->mad),
198 ib_destroy_ah(agent_send_wr->ah);
199 kfree(agent_send_wr);
201 list_add_tail(&agent_send_wr->send_list,
202 &port_priv->send_posted_list);
203 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
211 int agent_send(struct ib_mad_private *mad,
214 struct ib_device *device,
217 struct ib_agent_port_private *port_priv;
218 struct ib_mad_agent *mad_agent;
220 port_priv = ib_get_agent_port(device, port_num, NULL);
222 printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
223 device->name, port_num);
227 /* Get mad agent based on mgmt_class in MAD */
228 switch (mad->mad.mad.mad_hdr.mgmt_class) {
229 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
230 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
231 mad_agent = port_priv->smp_agent;
233 case IB_MGMT_CLASS_PERF_MGMT:
234 mad_agent = port_priv->perf_mgmt_agent;
240 return agent_mad_send(mad_agent, port_priv, mad, grh, wc);
243 static void agent_send_handler(struct ib_mad_agent *mad_agent,
244 struct ib_mad_send_wc *mad_send_wc)
246 struct ib_agent_port_private *port_priv;
247 struct ib_agent_send_wr *agent_send_wr;
250 /* Find matching MAD agent */
251 port_priv = ib_get_agent_port(NULL, 0, mad_agent);
253 printk(KERN_ERR SPFX "agent_send_handler: no matching MAD "
254 "agent %p\n", mad_agent);
258 agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id;
259 spin_lock_irqsave(&port_priv->send_list_lock, flags);
260 /* Remove completed send from posted send MAD list */
261 list_del(&agent_send_wr->send_list);
262 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
264 dma_unmap_single(mad_agent->device->dma_device,
265 pci_unmap_addr(agent_send_wr, mapping),
266 sizeof(agent_send_wr->mad->mad),
269 ib_destroy_ah(agent_send_wr->ah);
271 /* Release allocated memory */
272 kmem_cache_free(ib_mad_cache, agent_send_wr->mad);
273 kfree(agent_send_wr);
276 int ib_agent_port_open(struct ib_device *device, int port_num)
279 struct ib_agent_port_private *port_priv;
282 /* First, check if port already open for SMI */
283 port_priv = ib_get_agent_port(device, port_num, NULL);
285 printk(KERN_DEBUG SPFX "%s port %d already open\n",
286 device->name, port_num);
290 /* Create new device info */
291 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
293 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
298 memset(port_priv, 0, sizeof *port_priv);
299 port_priv->port_num = port_num;
300 spin_lock_init(&port_priv->send_list_lock);
301 INIT_LIST_HEAD(&port_priv->send_posted_list);
303 /* Obtain send only MAD agent for SM class (SMI QP) */
304 port_priv->smp_agent = ib_register_mad_agent(device, port_num,
310 if (IS_ERR(port_priv->smp_agent)) {
311 ret = PTR_ERR(port_priv->smp_agent);
315 /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */
316 port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
321 if (IS_ERR(port_priv->perf_mgmt_agent)) {
322 ret = PTR_ERR(port_priv->perf_mgmt_agent);
326 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
327 list_add_tail(&port_priv->port_list, &ib_agent_port_list);
328 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
333 ib_unregister_mad_agent(port_priv->smp_agent);
340 int ib_agent_port_close(struct ib_device *device, int port_num)
342 struct ib_agent_port_private *port_priv;
345 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
346 port_priv = __ib_get_agent_port(device, port_num, NULL);
347 if (port_priv == NULL) {
348 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
349 printk(KERN_ERR SPFX "Port %d not found\n", port_num);
352 list_del(&port_priv->port_list);
353 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
355 ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
356 ib_unregister_mad_agent(port_priv->smp_agent);