2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: sysfs.c 1349 2004-12-16 21:09:43Z roland $
37 #include "core_priv.h"
39 #include <linux/slab.h>
40 #include <linux/string.h>
42 #include <rdma/ib_mad.h>
46 struct ib_device *ibdev;
47 struct attribute_group gid_group;
48 struct attribute_group pkey_group;
52 struct port_attribute {
53 struct attribute attr;
54 ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf);
55 ssize_t (*store)(struct ib_port *, struct port_attribute *,
56 const char *buf, size_t count);
59 #define PORT_ATTR(_name, _mode, _show, _store) \
60 struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
62 #define PORT_ATTR_RO(_name) \
63 struct port_attribute port_attr_##_name = __ATTR_RO(_name)
65 struct port_table_attribute {
66 struct port_attribute attr;
71 static inline int ibdev_is_alive(const struct ib_device *dev)
73 return dev->reg_state == IB_DEV_REGISTERED;
76 static ssize_t port_attr_show(struct kobject *kobj,
77 struct attribute *attr, char *buf)
79 struct port_attribute *port_attr =
80 container_of(attr, struct port_attribute, attr);
81 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
85 if (!ibdev_is_alive(p->ibdev))
88 return port_attr->show(p, port_attr, buf);
91 static struct sysfs_ops port_sysfs_ops = {
92 .show = port_attr_show
95 static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
98 struct ib_port_attr attr;
101 static const char *state_name[] = {
102 [IB_PORT_NOP] = "NOP",
103 [IB_PORT_DOWN] = "DOWN",
104 [IB_PORT_INIT] = "INIT",
105 [IB_PORT_ARMED] = "ARMED",
106 [IB_PORT_ACTIVE] = "ACTIVE",
107 [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER"
110 ret = ib_query_port(p->ibdev, p->port_num, &attr);
114 return sprintf(buf, "%d: %s\n", attr.state,
115 attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
116 state_name[attr.state] : "UNKNOWN");
119 static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
122 struct ib_port_attr attr;
125 ret = ib_query_port(p->ibdev, p->port_num, &attr);
129 return sprintf(buf, "0x%x\n", attr.lid);
132 static ssize_t lid_mask_count_show(struct ib_port *p,
133 struct port_attribute *unused,
136 struct ib_port_attr attr;
139 ret = ib_query_port(p->ibdev, p->port_num, &attr);
143 return sprintf(buf, "%d\n", attr.lmc);
146 static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
149 struct ib_port_attr attr;
152 ret = ib_query_port(p->ibdev, p->port_num, &attr);
156 return sprintf(buf, "0x%x\n", attr.sm_lid);
159 static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
162 struct ib_port_attr attr;
165 ret = ib_query_port(p->ibdev, p->port_num, &attr);
169 return sprintf(buf, "%d\n", attr.sm_sl);
172 static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
175 struct ib_port_attr attr;
178 ret = ib_query_port(p->ibdev, p->port_num, &attr);
182 return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
185 static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
188 struct ib_port_attr attr;
193 ret = ib_query_port(p->ibdev, p->port_num, &attr);
197 switch (attr.active_speed) {
198 case 2: speed = " DDR"; break;
199 case 4: speed = " QDR"; break;
202 rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
206 return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
207 rate / 10, rate % 10 ? ".5" : "",
208 ib_width_enum_to_int(attr.active_width), speed);
211 static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
214 struct ib_port_attr attr;
218 ret = ib_query_port(p->ibdev, p->port_num, &attr);
222 switch (attr.phys_state) {
223 case 1: return sprintf(buf, "1: Sleep\n");
224 case 2: return sprintf(buf, "2: Polling\n");
225 case 3: return sprintf(buf, "3: Disabled\n");
226 case 4: return sprintf(buf, "4: PortConfigurationTraining\n");
227 case 5: return sprintf(buf, "5: LinkUp\n");
228 case 6: return sprintf(buf, "6: LinkErrorRecovery\n");
229 case 7: return sprintf(buf, "7: Phy Test\n");
230 default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
234 static PORT_ATTR_RO(state);
235 static PORT_ATTR_RO(lid);
236 static PORT_ATTR_RO(lid_mask_count);
237 static PORT_ATTR_RO(sm_lid);
238 static PORT_ATTR_RO(sm_sl);
239 static PORT_ATTR_RO(cap_mask);
240 static PORT_ATTR_RO(rate);
241 static PORT_ATTR_RO(phys_state);
243 static struct attribute *port_default_attrs[] = {
244 &port_attr_state.attr,
246 &port_attr_lid_mask_count.attr,
247 &port_attr_sm_lid.attr,
248 &port_attr_sm_sl.attr,
249 &port_attr_cap_mask.attr,
250 &port_attr_rate.attr,
251 &port_attr_phys_state.attr,
255 static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
258 struct port_table_attribute *tab_attr =
259 container_of(attr, struct port_table_attribute, attr);
263 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
267 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
268 be16_to_cpu(((__be16 *) gid.raw)[0]),
269 be16_to_cpu(((__be16 *) gid.raw)[1]),
270 be16_to_cpu(((__be16 *) gid.raw)[2]),
271 be16_to_cpu(((__be16 *) gid.raw)[3]),
272 be16_to_cpu(((__be16 *) gid.raw)[4]),
273 be16_to_cpu(((__be16 *) gid.raw)[5]),
274 be16_to_cpu(((__be16 *) gid.raw)[6]),
275 be16_to_cpu(((__be16 *) gid.raw)[7]));
278 static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
281 struct port_table_attribute *tab_attr =
282 container_of(attr, struct port_table_attribute, attr);
286 ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
290 return sprintf(buf, "0x%04x\n", pkey);
293 #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
294 struct port_table_attribute port_pma_attr_##_name = { \
295 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
296 .index = (_offset) | ((_width) << 16) | ((_counter) << 24) \
299 static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
302 struct port_table_attribute *tab_attr =
303 container_of(attr, struct port_table_attribute, attr);
304 int offset = tab_attr->index & 0xffff;
305 int width = (tab_attr->index >> 16) & 0xff;
306 struct ib_mad *in_mad = NULL;
307 struct ib_mad *out_mad = NULL;
310 if (!p->ibdev->process_mad)
311 return sprintf(buf, "N/A (no PMA)\n");
313 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
314 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
315 if (!in_mad || !out_mad) {
320 in_mad->mad_hdr.base_version = 1;
321 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
322 in_mad->mad_hdr.class_version = 1;
323 in_mad->mad_hdr.method = IB_MGMT_METHOD_GET;
324 in_mad->mad_hdr.attr_id = cpu_to_be16(0x12); /* PortCounters */
326 in_mad->data[41] = p->port_num; /* PortSelect field */
328 if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
329 p->port_num, NULL, NULL, in_mad, out_mad) &
330 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
331 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
339 (4 - (offset % 8))) & 0xf);
342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
345 ret = sprintf(buf, "%u\n",
346 be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
349 ret = sprintf(buf, "%u\n",
350 be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
363 static PORT_PMA_ATTR(symbol_error , 0, 16, 32);
364 static PORT_PMA_ATTR(link_error_recovery , 1, 8, 48);
365 static PORT_PMA_ATTR(link_downed , 2, 8, 56);
366 static PORT_PMA_ATTR(port_rcv_errors , 3, 16, 64);
367 static PORT_PMA_ATTR(port_rcv_remote_physical_errors, 4, 16, 80);
368 static PORT_PMA_ATTR(port_rcv_switch_relay_errors , 5, 16, 96);
369 static PORT_PMA_ATTR(port_xmit_discards , 6, 16, 112);
370 static PORT_PMA_ATTR(port_xmit_constraint_errors , 7, 8, 128);
371 static PORT_PMA_ATTR(port_rcv_constraint_errors , 8, 8, 136);
372 static PORT_PMA_ATTR(local_link_integrity_errors , 9, 4, 152);
373 static PORT_PMA_ATTR(excessive_buffer_overrun_errors, 10, 4, 156);
374 static PORT_PMA_ATTR(VL15_dropped , 11, 16, 176);
375 static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192);
376 static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
377 static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
378 static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
380 static struct attribute *pma_attrs[] = {
381 &port_pma_attr_symbol_error.attr.attr,
382 &port_pma_attr_link_error_recovery.attr.attr,
383 &port_pma_attr_link_downed.attr.attr,
384 &port_pma_attr_port_rcv_errors.attr.attr,
385 &port_pma_attr_port_rcv_remote_physical_errors.attr.attr,
386 &port_pma_attr_port_rcv_switch_relay_errors.attr.attr,
387 &port_pma_attr_port_xmit_discards.attr.attr,
388 &port_pma_attr_port_xmit_constraint_errors.attr.attr,
389 &port_pma_attr_port_rcv_constraint_errors.attr.attr,
390 &port_pma_attr_local_link_integrity_errors.attr.attr,
391 &port_pma_attr_excessive_buffer_overrun_errors.attr.attr,
392 &port_pma_attr_VL15_dropped.attr.attr,
393 &port_pma_attr_port_xmit_data.attr.attr,
394 &port_pma_attr_port_rcv_data.attr.attr,
395 &port_pma_attr_port_xmit_packets.attr.attr,
396 &port_pma_attr_port_rcv_packets.attr.attr,
400 static struct attribute_group pma_group = {
405 static void ib_port_release(struct kobject *kobj)
407 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
411 for (i = 0; (a = p->gid_group.attrs[i]); ++i)
414 kfree(p->gid_group.attrs);
416 for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
419 kfree(p->pkey_group.attrs);
424 static struct kobj_type port_type = {
425 .release = ib_port_release,
426 .sysfs_ops = &port_sysfs_ops,
427 .default_attrs = port_default_attrs
430 static void ib_device_release(struct device *device)
432 struct ib_device *dev = container_of(device, struct ib_device, dev);
437 static int ib_device_uevent(struct device *device,
438 struct kobj_uevent_env *env)
440 struct ib_device *dev = container_of(device, struct ib_device, dev);
442 if (add_uevent_var(env, "NAME=%s", dev->name))
446 * It would be nice to pass the node GUID with the event...
452 static struct attribute **
453 alloc_group_attrs(ssize_t (*show)(struct ib_port *,
454 struct port_attribute *, char *buf),
457 struct attribute **tab_attr;
458 struct port_table_attribute *element;
461 tab_attr = kcalloc(1 + len, sizeof(struct attribute *), GFP_KERNEL);
465 for (i = 0; i < len; i++) {
466 element = kzalloc(sizeof(struct port_table_attribute),
471 if (snprintf(element->name, sizeof(element->name),
472 "%d", i) >= sizeof(element->name)) {
477 element->attr.attr.name = element->name;
478 element->attr.attr.mode = S_IRUGO;
479 element->attr.show = show;
482 tab_attr[i] = &element->attr.attr;
494 static int add_port(struct ib_device *device, int port_num)
497 struct ib_port_attr attr;
501 ret = ib_query_port(device, port_num, &attr);
505 p = kzalloc(sizeof *p, GFP_KERNEL);
510 p->port_num = port_num;
512 ret = kobject_init_and_add(&p->kobj, &port_type,
513 kobject_get(device->ports_parent),
518 ret = sysfs_create_group(&p->kobj, &pma_group);
522 p->gid_group.name = "gids";
523 p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
524 if (!p->gid_group.attrs)
527 ret = sysfs_create_group(&p->kobj, &p->gid_group);
531 p->pkey_group.name = "pkeys";
532 p->pkey_group.attrs = alloc_group_attrs(show_port_pkey,
534 if (!p->pkey_group.attrs)
537 ret = sysfs_create_group(&p->kobj, &p->pkey_group);
541 list_add_tail(&p->kobj.entry, &device->port_list);
543 kobject_uevent(&p->kobj, KOBJ_ADD);
547 for (i = 0; i < attr.pkey_tbl_len; ++i)
548 kfree(p->pkey_group.attrs[i]);
550 kfree(p->pkey_group.attrs);
553 sysfs_remove_group(&p->kobj, &p->gid_group);
556 for (i = 0; i < attr.gid_tbl_len; ++i)
557 kfree(p->gid_group.attrs[i]);
559 kfree(p->gid_group.attrs);
562 sysfs_remove_group(&p->kobj, &pma_group);
565 kobject_put(device->ports_parent);
570 static ssize_t show_node_type(struct device *device,
571 struct device_attribute *attr, char *buf)
573 struct ib_device *dev = container_of(device, struct ib_device, dev);
575 if (!ibdev_is_alive(dev))
578 switch (dev->node_type) {
579 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
580 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
581 case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
582 case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
583 default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
587 static ssize_t show_sys_image_guid(struct device *device,
588 struct device_attribute *dev_attr, char *buf)
590 struct ib_device *dev = container_of(device, struct ib_device, dev);
591 struct ib_device_attr attr;
594 if (!ibdev_is_alive(dev))
597 ret = ib_query_device(dev, &attr);
601 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
602 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
603 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
604 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
605 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
608 static ssize_t show_node_guid(struct device *device,
609 struct device_attribute *attr, char *buf)
611 struct ib_device *dev = container_of(device, struct ib_device, dev);
613 if (!ibdev_is_alive(dev))
616 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
617 be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
618 be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
619 be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
620 be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
623 static ssize_t show_node_desc(struct device *device,
624 struct device_attribute *attr, char *buf)
626 struct ib_device *dev = container_of(device, struct ib_device, dev);
628 return sprintf(buf, "%.64s\n", dev->node_desc);
631 static ssize_t set_node_desc(struct device *device,
632 struct device_attribute *attr,
633 const char *buf, size_t count)
635 struct ib_device *dev = container_of(device, struct ib_device, dev);
636 struct ib_device_modify desc = {};
639 if (!dev->modify_device)
642 memcpy(desc.node_desc, buf, min_t(int, count, 64));
643 ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
650 static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
651 static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
652 static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
653 static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
655 static struct device_attribute *ib_class_attributes[] = {
657 &dev_attr_sys_image_guid,
662 static struct class ib_class = {
663 .name = "infiniband",
664 .dev_release = ib_device_release,
665 .dev_uevent = ib_device_uevent,
668 int ib_device_register_sysfs(struct ib_device *device)
670 struct device *class_dev = &device->dev;
674 class_dev->class = &ib_class;
675 class_dev->driver_data = device;
676 class_dev->parent = device->dma_device;
677 strlcpy(class_dev->bus_id, device->name, BUS_ID_SIZE);
679 INIT_LIST_HEAD(&device->port_list);
681 ret = device_register(class_dev);
685 for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
686 ret = device_create_file(class_dev, ib_class_attributes[i]);
691 device->ports_parent = kobject_create_and_add("ports",
692 kobject_get(&class_dev->kobj));
693 if (!device->ports_parent) {
698 if (device->node_type == RDMA_NODE_IB_SWITCH) {
699 ret = add_port(device, 0);
703 for (i = 1; i <= device->phys_port_cnt; ++i) {
704 ret = add_port(device, i);
714 struct kobject *p, *t;
715 struct ib_port *port;
717 list_for_each_entry_safe(p, t, &device->port_list, entry) {
719 port = container_of(p, struct ib_port, kobj);
720 sysfs_remove_group(p, &pma_group);
721 sysfs_remove_group(p, &port->pkey_group);
722 sysfs_remove_group(p, &port->gid_group);
727 kobject_put(&class_dev->kobj);
730 device_unregister(class_dev);
736 void ib_device_unregister_sysfs(struct ib_device *device)
738 struct kobject *p, *t;
739 struct ib_port *port;
741 list_for_each_entry_safe(p, t, &device->port_list, entry) {
743 port = container_of(p, struct ib_port, kobj);
744 sysfs_remove_group(p, &pma_group);
745 sysfs_remove_group(p, &port->pkey_group);
746 sysfs_remove_group(p, &port->gid_group);
750 kobject_put(device->ports_parent);
751 device_unregister(&device->dev);
754 int ib_sysfs_setup(void)
756 return class_register(&ib_class);
759 void ib_sysfs_cleanup(void)
761 class_unregister(&ib_class);