Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
[linux-2.6] / drivers / infiniband / hw / ehca / ehca_hca.c
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  HCA query functions
5  *
6  *  Authors: Heiko J Schick <schickhj@de.ibm.com>
7  *           Christoph Raisch <raisch@de.ibm.com>
8  *
9  *  Copyright (c) 2005 IBM Corporation
10  *
11  *  All rights reserved.
12  *
13  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
14  *  BSD.
15  *
16  * OpenIB BSD License
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are met:
20  *
21  * Redistributions of source code must retain the above copyright notice, this
22  * list of conditions and the following disclaimer.
23  *
24  * Redistributions in binary form must reproduce the above copyright notice,
25  * this list of conditions and the following disclaimer in the documentation
26  * and/or other materials
27  * provided with the distribution.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41
42 #include "ehca_tools.h"
43 #include "ehca_iverbs.h"
44 #include "hcp_if.h"
45
46 static unsigned int limit_uint(unsigned int value)
47 {
48         return min_t(unsigned int, value, INT_MAX);
49 }
50
51 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
52 {
53         int i, ret = 0;
54         struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
55                                               ib_device);
56         struct hipz_query_hca *rblock;
57
58         static const u32 cap_mapping[] = {
59                 IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
60                 IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
61                 IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
62                 IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
63                 IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
64                 IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
65                 IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
66                 IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
67                 IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
68                 IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
69                 IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
70         };
71
72         rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
73         if (!rblock) {
74                 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
75                 return -ENOMEM;
76         }
77
78         if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
79                 ehca_err(&shca->ib_device, "Can't query device properties");
80                 ret = -EINVAL;
81                 goto query_device1;
82         }
83
84         memset(props, 0, sizeof(struct ib_device_attr));
85         props->page_size_cap   = shca->hca_cap_mr_pgsize;
86         props->fw_ver          = rblock->hw_ver;
87         props->max_mr_size     = rblock->max_mr_size;
88         props->vendor_id       = rblock->vendor_id >> 8;
89         props->vendor_part_id  = rblock->vendor_part_id >> 16;
90         props->hw_ver          = rblock->hw_ver;
91         props->max_qp          = limit_uint(rblock->max_qp);
92         props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
93         props->max_sge         = limit_uint(rblock->max_sge);
94         props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
95         props->max_cq          = limit_uint(rblock->max_cq);
96         props->max_cqe         = limit_uint(rblock->max_cqe);
97         props->max_mr          = limit_uint(rblock->max_mr);
98         props->max_mw          = limit_uint(rblock->max_mw);
99         props->max_pd          = limit_uint(rblock->max_pd);
100         props->max_ah          = limit_uint(rblock->max_ah);
101         props->max_ee          = limit_uint(rblock->max_rd_ee_context);
102         props->max_rdd         = limit_uint(rblock->max_rd_domain);
103         props->max_fmr         = limit_uint(rblock->max_mr);
104         props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
105         props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
106         props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
107         props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
108         props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
109
110         if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
111                 props->max_srq         = limit_uint(props->max_qp);
112                 props->max_srq_wr      = limit_uint(props->max_qp_wr);
113                 props->max_srq_sge     = 3;
114         }
115
116         props->max_pkeys           = 16;
117         props->local_ca_ack_delay  = min_t(u8, rblock->local_ca_ack_delay, 255);
118         props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
119         props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
120         props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
121         props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
122         props->max_total_mcast_qp_attach
123                 = limit_uint(rblock->max_total_mcast_qp_attach);
124
125         /* translate device capabilities */
126         props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
127                 IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
128         for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
129                 if (rblock->hca_cap_indicators & cap_mapping[i + 1])
130                         props->device_cap_flags |= cap_mapping[i];
131
132 query_device1:
133         ehca_free_fw_ctrlblock(rblock);
134
135         return ret;
136 }
137
138 static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
139 {
140         switch (fw_mtu) {
141         case 0x1:
142                 return IB_MTU_256;
143         case 0x2:
144                 return IB_MTU_512;
145         case 0x3:
146                 return IB_MTU_1024;
147         case 0x4:
148                 return IB_MTU_2048;
149         case 0x5:
150                 return IB_MTU_4096;
151         default:
152                 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
153                          fw_mtu);
154                 return 0;
155         }
156 }
157
158 static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
159 {
160         switch (vl_cap) {
161         case 0x1:
162                 return 1;
163         case 0x2:
164                 return 2;
165         case 0x3:
166                 return 4;
167         case 0x4:
168                 return 8;
169         case 0x5:
170                 return 15;
171         default:
172                 ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
173                          vl_cap);
174                 return 0;
175         }
176 }
177
178 int ehca_query_port(struct ib_device *ibdev,
179                     u8 port, struct ib_port_attr *props)
180 {
181         int ret = 0;
182         u64 h_ret;
183         struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
184                                               ib_device);
185         struct hipz_query_port *rblock;
186
187         rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
188         if (!rblock) {
189                 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
190                 return -ENOMEM;
191         }
192
193         h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
194         if (h_ret != H_SUCCESS) {
195                 ehca_err(&shca->ib_device, "Can't query port properties");
196                 ret = -EINVAL;
197                 goto query_port1;
198         }
199
200         memset(props, 0, sizeof(struct ib_port_attr));
201
202         props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
203         props->port_cap_flags  = rblock->capability_mask;
204         props->gid_tbl_len     = rblock->gid_tbl_len;
205         if (rblock->max_msg_sz)
206                 props->max_msg_sz      = rblock->max_msg_sz;
207         else
208                 props->max_msg_sz      = 0x1 << 31;
209         props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
210         props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
211         props->pkey_tbl_len    = rblock->pkey_tbl_len;
212         props->lid             = rblock->lid;
213         props->sm_lid          = rblock->sm_lid;
214         props->lmc             = rblock->lmc;
215         props->sm_sl           = rblock->sm_sl;
216         props->subnet_timeout  = rblock->subnet_timeout;
217         props->init_type_reply = rblock->init_type_reply;
218         props->max_vl_num      = map_number_of_vls(shca, rblock->vl_cap);
219
220         if (rblock->state && rblock->phys_width) {
221                 props->phys_state      = rblock->phys_pstate;
222                 props->state           = rblock->phys_state;
223                 props->active_width    = rblock->phys_width;
224                 props->active_speed    = rblock->phys_speed;
225         } else {
226                 /* old firmware releases don't report physical
227                  * port info, so use default values
228                  */
229                 props->phys_state      = 5;
230                 props->state           = rblock->state;
231                 props->active_width    = IB_WIDTH_12X;
232                 props->active_speed    = 0x1;
233         }
234
235 query_port1:
236         ehca_free_fw_ctrlblock(rblock);
237
238         return ret;
239 }
240
241 int ehca_query_sma_attr(struct ehca_shca *shca,
242                         u8 port, struct ehca_sma_attr *attr)
243 {
244         int ret = 0;
245         u64 h_ret;
246         struct hipz_query_port *rblock;
247
248         rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
249         if (!rblock) {
250                 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
251                 return -ENOMEM;
252         }
253
254         h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
255         if (h_ret != H_SUCCESS) {
256                 ehca_err(&shca->ib_device, "Can't query port properties");
257                 ret = -EINVAL;
258                 goto query_sma_attr1;
259         }
260
261         memset(attr, 0, sizeof(struct ehca_sma_attr));
262
263         attr->lid    = rblock->lid;
264         attr->lmc    = rblock->lmc;
265         attr->sm_sl  = rblock->sm_sl;
266         attr->sm_lid = rblock->sm_lid;
267
268         attr->pkey_tbl_len = rblock->pkey_tbl_len;
269         memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
270
271 query_sma_attr1:
272         ehca_free_fw_ctrlblock(rblock);
273
274         return ret;
275 }
276
277 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
278 {
279         int ret = 0;
280         u64 h_ret;
281         struct ehca_shca *shca;
282         struct hipz_query_port *rblock;
283
284         shca = container_of(ibdev, struct ehca_shca, ib_device);
285         if (index > 16) {
286                 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
287                 return -EINVAL;
288         }
289
290         rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
291         if (!rblock) {
292                 ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
293                 return -ENOMEM;
294         }
295
296         h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
297         if (h_ret != H_SUCCESS) {
298                 ehca_err(&shca->ib_device, "Can't query port properties");
299                 ret = -EINVAL;
300                 goto query_pkey1;
301         }
302
303         memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
304
305 query_pkey1:
306         ehca_free_fw_ctrlblock(rblock);
307
308         return ret;
309 }
310
311 int ehca_query_gid(struct ib_device *ibdev, u8 port,
312                    int index, union ib_gid *gid)
313 {
314         int ret = 0;
315         u64 h_ret;
316         struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
317                                               ib_device);
318         struct hipz_query_port *rblock;
319
320         if (index > 255) {
321                 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
322                 return -EINVAL;
323         }
324
325         rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
326         if (!rblock) {
327                 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
328                 return -ENOMEM;
329         }
330
331         h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
332         if (h_ret != H_SUCCESS) {
333                 ehca_err(&shca->ib_device, "Can't query port properties");
334                 ret = -EINVAL;
335                 goto query_gid1;
336         }
337
338         memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
339         memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
340
341 query_gid1:
342         ehca_free_fw_ctrlblock(rblock);
343
344         return ret;
345 }
346
347 static const u32 allowed_port_caps = (
348         IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
349         IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
350         IB_PORT_VENDOR_CLASS_SUP);
351
352 int ehca_modify_port(struct ib_device *ibdev,
353                      u8 port, int port_modify_mask,
354                      struct ib_port_modify *props)
355 {
356         int ret = 0;
357         struct ehca_shca *shca;
358         struct hipz_query_port *rblock;
359         u32 cap;
360         u64 hret;
361
362         shca = container_of(ibdev, struct ehca_shca, ib_device);
363         if ((props->set_port_cap_mask | props->clr_port_cap_mask)
364             & ~allowed_port_caps) {
365                 ehca_err(&shca->ib_device, "Non-changeable bits set in masks  "
366                          "set=%x  clr=%x  allowed=%x", props->set_port_cap_mask,
367                          props->clr_port_cap_mask, allowed_port_caps);
368                 return -EINVAL;
369         }
370
371         if (mutex_lock_interruptible(&shca->modify_mutex))
372                 return -ERESTARTSYS;
373
374         rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
375         if (!rblock) {
376                 ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
377                 ret = -ENOMEM;
378                 goto modify_port1;
379         }
380
381         hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
382         if (hret != H_SUCCESS) {
383                 ehca_err(&shca->ib_device, "Can't query port properties");
384                 ret = -EINVAL;
385                 goto modify_port2;
386         }
387
388         cap = (rblock->capability_mask | props->set_port_cap_mask)
389                 & ~props->clr_port_cap_mask;
390
391         hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
392                                   cap, props->init_type, port_modify_mask);
393         if (hret != H_SUCCESS) {
394                 ehca_err(&shca->ib_device, "Modify port failed  h_ret=%li",
395                          hret);
396                 ret = -EINVAL;
397         }
398
399 modify_port2:
400         ehca_free_fw_ctrlblock(rblock);
401
402 modify_port1:
403         mutex_unlock(&shca->modify_mutex);
404
405         return ret;
406 }