4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/mutex.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/compat.h>
48 struct ipmi_file_private
51 spinlock_t recv_msg_lock;
52 struct list_head recv_msgs;
54 struct fasync_struct *fasync_queue;
55 wait_queue_head_t wait;
56 struct mutex recv_mutex;
58 unsigned int default_retry_time_ms;
61 static void file_receive_handler(struct ipmi_recv_msg *msg,
64 struct ipmi_file_private *priv = handler_data;
68 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
70 was_empty = list_empty(&(priv->recv_msgs));
71 list_add_tail(&(msg->link), &(priv->recv_msgs));
74 wake_up_interruptible(&priv->wait);
75 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
78 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
81 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
83 struct ipmi_file_private *priv = file->private_data;
84 unsigned int mask = 0;
87 poll_wait(file, &priv->wait, wait);
89 spin_lock_irqsave(&priv->recv_msg_lock, flags);
91 if (!list_empty(&(priv->recv_msgs)))
92 mask |= (POLLIN | POLLRDNORM);
94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
99 static int ipmi_fasync(int fd, struct file *file, int on)
101 struct ipmi_file_private *priv = file->private_data;
104 result = fasync_helper(fd, file, on, &priv->fasync_queue);
109 static struct ipmi_user_hndl ipmi_hndlrs =
111 .ipmi_recv_hndl = file_receive_handler,
114 static int ipmi_open(struct inode *inode, struct file *file)
116 int if_num = iminor(inode);
118 struct ipmi_file_private *priv;
121 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
127 rv = ipmi_create_user(if_num,
136 file->private_data = priv;
138 spin_lock_init(&(priv->recv_msg_lock));
139 INIT_LIST_HEAD(&(priv->recv_msgs));
140 init_waitqueue_head(&priv->wait);
141 priv->fasync_queue = NULL;
142 mutex_init(&priv->recv_mutex);
144 /* Use the low-level defaults. */
145 priv->default_retries = -1;
146 priv->default_retry_time_ms = 0;
151 static int ipmi_release(struct inode *inode, struct file *file)
153 struct ipmi_file_private *priv = file->private_data;
156 rv = ipmi_destroy_user(priv->user);
160 ipmi_fasync (-1, file, 0);
162 /* FIXME - free the messages in the list. */
168 static int handle_send_req(ipmi_user_t user,
169 struct ipmi_req *req,
171 unsigned int retry_time_ms)
174 struct ipmi_addr addr;
175 struct kernel_ipmi_msg msg;
177 if (req->addr_len > sizeof(struct ipmi_addr))
180 if (copy_from_user(&addr, req->addr, req->addr_len))
183 msg.netfn = req->msg.netfn;
184 msg.cmd = req->msg.cmd;
185 msg.data_len = req->msg.data_len;
186 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
190 /* From here out we cannot return, we must jump to "out" for
191 error exits to free msgdata. */
193 rv = ipmi_validate_addr(&addr, req->addr_len);
197 if (req->msg.data != NULL) {
198 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
203 if (copy_from_user(msg.data,
214 rv = ipmi_request_settime(user,
227 static int ipmi_ioctl(struct inode *inode,
233 struct ipmi_file_private *priv = file->private_data;
234 void __user *arg = (void __user *)data;
238 case IPMICTL_SEND_COMMAND:
242 if (copy_from_user(&req, arg, sizeof(req))) {
247 rv = handle_send_req(priv->user,
249 priv->default_retries,
250 priv->default_retry_time_ms);
254 case IPMICTL_SEND_COMMAND_SETTIME:
256 struct ipmi_req_settime req;
258 if (copy_from_user(&req, arg, sizeof(req))) {
263 rv = handle_send_req(priv->user,
270 case IPMICTL_RECEIVE_MSG:
271 case IPMICTL_RECEIVE_MSG_TRUNC:
273 struct ipmi_recv rsp;
275 struct list_head *entry;
276 struct ipmi_recv_msg *msg;
281 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
286 /* We claim a mutex because we don't want two
287 users getting something from the queue at a time.
288 Since we have to release the spinlock before we can
289 copy the data to the user, it's possible another
290 user will grab something from the queue, too. Then
291 the messages might get out of order if something
292 fails and the message gets put back onto the
293 queue. This mutex prevents that problem. */
294 mutex_lock(&priv->recv_mutex);
296 /* Grab the message off the list. */
297 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
298 if (list_empty(&(priv->recv_msgs))) {
299 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
303 entry = priv->recv_msgs.next;
304 msg = list_entry(entry, struct ipmi_recv_msg, link);
306 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
308 addr_len = ipmi_addr_length(msg->addr.addr_type);
309 if (rsp.addr_len < addr_len)
312 goto recv_putback_on_err;
315 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
317 goto recv_putback_on_err;
319 rsp.addr_len = addr_len;
321 rsp.recv_type = msg->recv_type;
322 rsp.msgid = msg->msgid;
323 rsp.msg.netfn = msg->msg.netfn;
324 rsp.msg.cmd = msg->msg.cmd;
326 if (msg->msg.data_len > 0) {
327 if (rsp.msg.data_len < msg->msg.data_len) {
329 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
330 msg->msg.data_len = rsp.msg.data_len;
332 goto recv_putback_on_err;
336 if (copy_to_user(rsp.msg.data,
341 goto recv_putback_on_err;
343 rsp.msg.data_len = msg->msg.data_len;
345 rsp.msg.data_len = 0;
348 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
350 goto recv_putback_on_err;
353 mutex_unlock(&priv->recv_mutex);
354 ipmi_free_recv_msg(msg);
358 /* If we got an error, put the message back onto
359 the head of the queue. */
360 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
361 list_add(entry, &(priv->recv_msgs));
362 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
363 mutex_unlock(&priv->recv_mutex);
367 mutex_unlock(&priv->recv_mutex);
371 case IPMICTL_REGISTER_FOR_CMD:
373 struct ipmi_cmdspec val;
375 if (copy_from_user(&val, arg, sizeof(val))) {
380 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
385 case IPMICTL_UNREGISTER_FOR_CMD:
387 struct ipmi_cmdspec val;
389 if (copy_from_user(&val, arg, sizeof(val))) {
394 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
399 case IPMICTL_REGISTER_FOR_CMD_CHANS:
401 struct ipmi_cmdspec_chans val;
403 if (copy_from_user(&val, arg, sizeof(val))) {
408 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
413 case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
415 struct ipmi_cmdspec_chans val;
417 if (copy_from_user(&val, arg, sizeof(val))) {
422 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
427 case IPMICTL_SET_GETS_EVENTS_CMD:
431 if (copy_from_user(&val, arg, sizeof(val))) {
436 rv = ipmi_set_gets_events(priv->user, val);
440 /* The next four are legacy, not per-channel. */
441 case IPMICTL_SET_MY_ADDRESS_CMD:
445 if (copy_from_user(&val, arg, sizeof(val))) {
450 rv = ipmi_set_my_address(priv->user, 0, val);
454 case IPMICTL_GET_MY_ADDRESS_CMD:
459 rv = ipmi_get_my_address(priv->user, 0, &rval);
465 if (copy_to_user(arg, &val, sizeof(val))) {
472 case IPMICTL_SET_MY_LUN_CMD:
476 if (copy_from_user(&val, arg, sizeof(val))) {
481 rv = ipmi_set_my_LUN(priv->user, 0, val);
485 case IPMICTL_GET_MY_LUN_CMD:
490 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
496 if (copy_to_user(arg, &val, sizeof(val))) {
503 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
505 struct ipmi_channel_lun_address_set val;
507 if (copy_from_user(&val, arg, sizeof(val))) {
512 return ipmi_set_my_address(priv->user, val.channel, val.value);
516 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
518 struct ipmi_channel_lun_address_set val;
520 if (copy_from_user(&val, arg, sizeof(val))) {
525 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
529 if (copy_to_user(arg, &val, sizeof(val))) {
536 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
538 struct ipmi_channel_lun_address_set val;
540 if (copy_from_user(&val, arg, sizeof(val))) {
545 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
549 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
551 struct ipmi_channel_lun_address_set val;
553 if (copy_from_user(&val, arg, sizeof(val))) {
558 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
562 if (copy_to_user(arg, &val, sizeof(val))) {
569 case IPMICTL_SET_TIMING_PARMS_CMD:
571 struct ipmi_timing_parms parms;
573 if (copy_from_user(&parms, arg, sizeof(parms))) {
578 priv->default_retries = parms.retries;
579 priv->default_retry_time_ms = parms.retry_time_ms;
584 case IPMICTL_GET_TIMING_PARMS_CMD:
586 struct ipmi_timing_parms parms;
588 parms.retries = priv->default_retries;
589 parms.retry_time_ms = priv->default_retry_time_ms;
591 if (copy_to_user(arg, &parms, sizeof(parms))) {
607 * The following code contains code for supporting 32-bit compatible
608 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
611 #define COMPAT_IPMICTL_SEND_COMMAND \
612 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
613 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
614 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
615 #define COMPAT_IPMICTL_RECEIVE_MSG \
616 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
617 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
618 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
620 struct compat_ipmi_msg {
627 struct compat_ipmi_req {
629 compat_uint_t addr_len;
631 struct compat_ipmi_msg msg;
634 struct compat_ipmi_recv {
635 compat_int_t recv_type;
637 compat_uint_t addr_len;
639 struct compat_ipmi_msg msg;
642 struct compat_ipmi_req_settime {
643 struct compat_ipmi_req req;
644 compat_int_t retries;
645 compat_uint_t retry_time_ms;
649 * Define some helper functions for copying IPMI data
651 static long get_compat_ipmi_msg(struct ipmi_msg *p64,
652 struct compat_ipmi_msg __user *p32)
656 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
657 __get_user(p64->netfn, &p32->netfn) ||
658 __get_user(p64->cmd, &p32->cmd) ||
659 __get_user(p64->data_len, &p32->data_len) ||
660 __get_user(tmp, &p32->data))
662 p64->data = compat_ptr(tmp);
666 static long put_compat_ipmi_msg(struct ipmi_msg *p64,
667 struct compat_ipmi_msg __user *p32)
669 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
670 __put_user(p64->netfn, &p32->netfn) ||
671 __put_user(p64->cmd, &p32->cmd) ||
672 __put_user(p64->data_len, &p32->data_len))
677 static long get_compat_ipmi_req(struct ipmi_req *p64,
678 struct compat_ipmi_req __user *p32)
683 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
684 __get_user(tmp, &p32->addr) ||
685 __get_user(p64->addr_len, &p32->addr_len) ||
686 __get_user(p64->msgid, &p32->msgid) ||
687 get_compat_ipmi_msg(&p64->msg, &p32->msg))
689 p64->addr = compat_ptr(tmp);
693 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
694 struct compat_ipmi_req_settime __user *p32)
696 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
697 get_compat_ipmi_req(&p64->req, &p32->req) ||
698 __get_user(p64->retries, &p32->retries) ||
699 __get_user(p64->retry_time_ms, &p32->retry_time_ms))
704 static long get_compat_ipmi_recv(struct ipmi_recv *p64,
705 struct compat_ipmi_recv __user *p32)
709 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
710 __get_user(p64->recv_type, &p32->recv_type) ||
711 __get_user(tmp, &p32->addr) ||
712 __get_user(p64->addr_len, &p32->addr_len) ||
713 __get_user(p64->msgid, &p32->msgid) ||
714 get_compat_ipmi_msg(&p64->msg, &p32->msg))
716 p64->addr = compat_ptr(tmp);
720 static long put_compat_ipmi_recv(struct ipmi_recv *p64,
721 struct compat_ipmi_recv __user *p32)
723 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
724 __put_user(p64->recv_type, &p32->recv_type) ||
725 __put_user(p64->addr_len, &p32->addr_len) ||
726 __put_user(p64->msgid, &p32->msgid) ||
727 put_compat_ipmi_msg(&p64->msg, &p32->msg))
733 * Handle compatibility ioctls
735 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
739 struct ipmi_file_private *priv = filep->private_data;
742 case COMPAT_IPMICTL_SEND_COMMAND:
746 if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
749 return handle_send_req(priv->user, &rp,
750 priv->default_retries,
751 priv->default_retry_time_ms);
753 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
755 struct ipmi_req_settime sp;
757 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
760 return handle_send_req(priv->user, &sp.req,
761 sp.retries, sp.retry_time_ms);
763 case COMPAT_IPMICTL_RECEIVE_MSG:
764 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
766 struct ipmi_recv __user *precv64;
767 struct ipmi_recv recv64;
769 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
772 precv64 = compat_alloc_user_space(sizeof(recv64));
773 if (copy_to_user(precv64, &recv64, sizeof(recv64)))
776 rc = ipmi_ioctl(filep->f_dentry->d_inode, filep,
777 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
778 ? IPMICTL_RECEIVE_MSG
779 : IPMICTL_RECEIVE_MSG_TRUNC),
780 (unsigned long) precv64);
784 if (copy_from_user(&recv64, precv64, sizeof(recv64)))
787 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
793 return ipmi_ioctl(filep->f_dentry->d_inode, filep, cmd, arg);
798 static const struct file_operations ipmi_fops = {
799 .owner = THIS_MODULE,
802 .compat_ioctl = compat_ipmi_ioctl,
805 .release = ipmi_release,
806 .fasync = ipmi_fasync,
810 #define DEVICE_NAME "ipmidev"
812 static int ipmi_major = 0;
813 module_param(ipmi_major, int, 0);
814 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
815 " default, or if you set it to zero, it will choose the next"
816 " available device. Setting it to -1 will disable the"
817 " interface. Other values will set the major device number"
820 /* Keep track of the devices that are registered. */
821 struct ipmi_reg_list {
823 struct list_head link;
825 static LIST_HEAD(reg_list);
826 static DEFINE_MUTEX(reg_list_mutex);
828 static struct class *ipmi_class;
830 static void ipmi_new_smi(int if_num, struct device *device)
832 dev_t dev = MKDEV(ipmi_major, if_num);
833 struct ipmi_reg_list *entry;
835 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
837 printk(KERN_ERR "ipmi_devintf: Unable to create the"
838 " ipmi class device link\n");
843 mutex_lock(®_list_mutex);
844 class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num);
845 list_add(&entry->link, ®_list);
846 mutex_unlock(®_list_mutex);
849 static void ipmi_smi_gone(int if_num)
851 dev_t dev = MKDEV(ipmi_major, if_num);
852 struct ipmi_reg_list *entry;
854 mutex_lock(®_list_mutex);
855 list_for_each_entry(entry, ®_list, link) {
856 if (entry->dev == dev) {
857 list_del(&entry->link);
862 class_device_destroy(ipmi_class, dev);
863 mutex_unlock(®_list_mutex);
866 static struct ipmi_smi_watcher smi_watcher =
868 .owner = THIS_MODULE,
869 .new_smi = ipmi_new_smi,
870 .smi_gone = ipmi_smi_gone,
873 static __init int init_ipmi_devintf(void)
880 printk(KERN_INFO "ipmi device interface\n");
882 ipmi_class = class_create(THIS_MODULE, "ipmi");
883 if (IS_ERR(ipmi_class)) {
884 printk(KERN_ERR "ipmi: can't register device class\n");
885 return PTR_ERR(ipmi_class);
888 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
890 class_destroy(ipmi_class);
891 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
895 if (ipmi_major == 0) {
899 rv = ipmi_smi_watcher_register(&smi_watcher);
901 unregister_chrdev(ipmi_major, DEVICE_NAME);
902 class_destroy(ipmi_class);
903 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
909 module_init(init_ipmi_devintf);
911 static __exit void cleanup_ipmi(void)
913 struct ipmi_reg_list *entry, *entry2;
914 mutex_lock(®_list_mutex);
915 list_for_each_entry_safe(entry, entry2, ®_list, link) {
916 list_del(&entry->link);
917 class_device_destroy(ipmi_class, entry->dev);
920 mutex_unlock(®_list_mutex);
921 class_destroy(ipmi_class);
922 ipmi_smi_watcher_unregister(&smi_watcher);
923 unregister_chrdev(ipmi_major, DEVICE_NAME);
925 module_exit(cleanup_ipmi);
927 MODULE_LICENSE("GPL");
928 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
929 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");