4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "38.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 struct proc_dir_entry *proc_ipmi_root = NULL;
60 #endif /* CONFIG_PROC_FS */
62 #define MAX_EVENTS_IN_QUEUE 25
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
70 * The main "user" data structure.
74 struct list_head link;
76 /* Set to "0" when the user is destroyed. */
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl *handler;
85 /* The interface this user is bound to. */
88 /* Does this interface receive IPMI events? */
94 struct list_head link;
101 * This is used to form a linked lised during mass deletion.
102 * Since this is in an RCU list, we cannot use the link above
103 * or change any data until the RCU period completes. So we
104 * use this next variable during mass deletion so we can have
105 * a list and don't have to wait and restart the search on
106 * every individual deletion of a command. */
107 struct cmd_rcvr *next;
112 unsigned int inuse : 1;
113 unsigned int broadcast : 1;
115 unsigned long timeout;
116 unsigned long orig_timeout;
117 unsigned int retries_left;
119 /* To verify on an incoming send message response that this is
120 the message that the response is for, we keep a sequence id
121 and increment it every time we send a message. */
124 /* This is held so we can properly respond to the message on a
125 timeout, and it is used to hold the temporary data for
126 retransmission, too. */
127 struct ipmi_recv_msg *recv_msg;
130 /* Store the information in a msgid (long) to allow us to find a
131 sequence table entry from the msgid. */
132 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
134 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
136 seq = ((msgid >> 26) & 0x3f); \
137 seqid = (msgid & 0x3fffff); \
140 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
144 unsigned char medium;
145 unsigned char protocol;
147 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
148 but may be changed by the user. */
149 unsigned char address;
151 /* My LUN. This should generally stay the SMS LUN, but just in
156 #ifdef CONFIG_PROC_FS
157 struct ipmi_proc_entry
160 struct ipmi_proc_entry *next;
164 #define IPMI_IPMB_NUM_SEQ 64
165 #define IPMI_MAX_CHANNELS 16
168 /* What interface number are we? */
171 struct kref refcount;
173 /* The list of upper layers that are using me. seq_lock
175 struct list_head users;
177 /* Used for wake ups at startup. */
178 wait_queue_head_t waitq;
180 /* The IPMI version of the BMC on the other end. */
181 unsigned char version_major;
182 unsigned char version_minor;
184 /* This is the lower-layer's sender routine. */
185 struct ipmi_smi_handlers *handlers;
188 #ifdef CONFIG_PROC_FS
189 /* A list of proc entries for this interface. This does not
190 need a lock, only one thread creates it and only one thread
192 spinlock_t proc_entry_lock;
193 struct ipmi_proc_entry *proc_entries;
196 /* A table of sequence numbers for this interface. We use the
197 sequence numbers for IPMB messages that go out of the
198 interface to match them up with their responses. A routine
199 is called periodically to time the items in this list. */
201 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
204 /* Messages that were delayed for some reason (out of memory,
205 for instance), will go in here to be processed later in a
206 periodic timer interrupt. */
207 spinlock_t waiting_msgs_lock;
208 struct list_head waiting_msgs;
210 /* The list of command receivers that are registered for commands
211 on this interface. */
212 struct semaphore cmd_rcvrs_lock;
213 struct list_head cmd_rcvrs;
215 /* Events that were queues because no one was there to receive
217 spinlock_t events_lock; /* For dealing with event stuff. */
218 struct list_head waiting_events;
219 unsigned int waiting_events_count; /* How many events in queue? */
221 /* The event receiver for my BMC, only really used at panic
222 shutdown as a place to store this. */
223 unsigned char event_receiver;
224 unsigned char event_receiver_lun;
225 unsigned char local_sel_device;
226 unsigned char local_event_generator;
228 /* A cheap hack, if this is non-null and a message to an
229 interface comes in with a NULL user, call this routine with
230 it. Note that the message will still be freed by the
231 caller. This only works on the system interface. */
232 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
234 /* When we are scanning the channels for an SMI, this will
235 tell which channel we are scanning. */
238 /* Channel information */
239 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
242 struct proc_dir_entry *proc_dir;
243 char proc_dir_name[10];
245 spinlock_t counter_lock; /* For making counters atomic. */
247 /* Commands we got that were invalid. */
248 unsigned int sent_invalid_commands;
250 /* Commands we sent to the MC. */
251 unsigned int sent_local_commands;
252 /* Responses from the MC that were delivered to a user. */
253 unsigned int handled_local_responses;
254 /* Responses from the MC that were not delivered to a user. */
255 unsigned int unhandled_local_responses;
257 /* Commands we sent out to the IPMB bus. */
258 unsigned int sent_ipmb_commands;
259 /* Commands sent on the IPMB that had errors on the SEND CMD */
260 unsigned int sent_ipmb_command_errs;
261 /* Each retransmit increments this count. */
262 unsigned int retransmitted_ipmb_commands;
263 /* When a message times out (runs out of retransmits) this is
265 unsigned int timed_out_ipmb_commands;
267 /* This is like above, but for broadcasts. Broadcasts are
268 *not* included in the above count (they are expected to
270 unsigned int timed_out_ipmb_broadcasts;
272 /* Responses I have sent to the IPMB bus. */
273 unsigned int sent_ipmb_responses;
275 /* The response was delivered to the user. */
276 unsigned int handled_ipmb_responses;
277 /* The response had invalid data in it. */
278 unsigned int invalid_ipmb_responses;
279 /* The response didn't have anyone waiting for it. */
280 unsigned int unhandled_ipmb_responses;
282 /* Commands we sent out to the IPMB bus. */
283 unsigned int sent_lan_commands;
284 /* Commands sent on the IPMB that had errors on the SEND CMD */
285 unsigned int sent_lan_command_errs;
286 /* Each retransmit increments this count. */
287 unsigned int retransmitted_lan_commands;
288 /* When a message times out (runs out of retransmits) this is
290 unsigned int timed_out_lan_commands;
292 /* Responses I have sent to the IPMB bus. */
293 unsigned int sent_lan_responses;
295 /* The response was delivered to the user. */
296 unsigned int handled_lan_responses;
297 /* The response had invalid data in it. */
298 unsigned int invalid_lan_responses;
299 /* The response didn't have anyone waiting for it. */
300 unsigned int unhandled_lan_responses;
302 /* The command was delivered to the user. */
303 unsigned int handled_commands;
304 /* The command had invalid data in it. */
305 unsigned int invalid_commands;
306 /* The command didn't have anyone waiting for it. */
307 unsigned int unhandled_commands;
309 /* Invalid data in an event. */
310 unsigned int invalid_events;
311 /* Events that were received with the proper format. */
315 /* Used to mark an interface entry that cannot be used but is not a
316 * free entry, either, primarily used at creation and deletion time so
317 * a slot doesn't get reused too quickly. */
318 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
319 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
320 || (i == IPMI_INVALID_INTERFACE_ENTRY))
322 #define MAX_IPMI_INTERFACES 4
323 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
325 /* Directly protects the ipmi_interfaces data structure. */
326 static DEFINE_SPINLOCK(interfaces_lock);
328 /* List of watchers that want to know when smi's are added and
330 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
331 static DECLARE_RWSEM(smi_watchers_sem);
334 static void free_recv_msg_list(struct list_head *q)
336 struct ipmi_recv_msg *msg, *msg2;
338 list_for_each_entry_safe(msg, msg2, q, link) {
339 list_del(&msg->link);
340 ipmi_free_recv_msg(msg);
344 static void clean_up_interface_data(ipmi_smi_t intf)
347 struct cmd_rcvr *rcvr, *rcvr2;
348 struct list_head list;
350 free_recv_msg_list(&intf->waiting_msgs);
351 free_recv_msg_list(&intf->waiting_events);
353 /* Wholesale remove all the entries from the list in the
354 * interface and wait for RCU to know that none are in use. */
355 down(&intf->cmd_rcvrs_lock);
356 list_add_rcu(&list, &intf->cmd_rcvrs);
357 list_del_rcu(&intf->cmd_rcvrs);
358 up(&intf->cmd_rcvrs_lock);
361 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
364 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
365 if ((intf->seq_table[i].inuse)
366 && (intf->seq_table[i].recv_msg))
368 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
373 static void intf_free(struct kref *ref)
375 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
377 clean_up_interface_data(intf);
381 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
386 down_write(&smi_watchers_sem);
387 list_add(&(watcher->link), &smi_watchers);
388 up_write(&smi_watchers_sem);
389 spin_lock_irqsave(&interfaces_lock, flags);
390 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
391 ipmi_smi_t intf = ipmi_interfaces[i];
392 if (IPMI_INVALID_INTERFACE(intf))
394 spin_unlock_irqrestore(&interfaces_lock, flags);
396 spin_lock_irqsave(&interfaces_lock, flags);
398 spin_unlock_irqrestore(&interfaces_lock, flags);
402 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
404 down_write(&smi_watchers_sem);
405 list_del(&(watcher->link));
406 up_write(&smi_watchers_sem);
411 call_smi_watchers(int i)
413 struct ipmi_smi_watcher *w;
415 down_read(&smi_watchers_sem);
416 list_for_each_entry(w, &smi_watchers, link) {
417 if (try_module_get(w->owner)) {
419 module_put(w->owner);
422 up_read(&smi_watchers_sem);
426 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
428 if (addr1->addr_type != addr2->addr_type)
431 if (addr1->channel != addr2->channel)
434 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
435 struct ipmi_system_interface_addr *smi_addr1
436 = (struct ipmi_system_interface_addr *) addr1;
437 struct ipmi_system_interface_addr *smi_addr2
438 = (struct ipmi_system_interface_addr *) addr2;
439 return (smi_addr1->lun == smi_addr2->lun);
442 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
443 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
445 struct ipmi_ipmb_addr *ipmb_addr1
446 = (struct ipmi_ipmb_addr *) addr1;
447 struct ipmi_ipmb_addr *ipmb_addr2
448 = (struct ipmi_ipmb_addr *) addr2;
450 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
451 && (ipmb_addr1->lun == ipmb_addr2->lun));
454 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
455 struct ipmi_lan_addr *lan_addr1
456 = (struct ipmi_lan_addr *) addr1;
457 struct ipmi_lan_addr *lan_addr2
458 = (struct ipmi_lan_addr *) addr2;
460 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
461 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
462 && (lan_addr1->session_handle
463 == lan_addr2->session_handle)
464 && (lan_addr1->lun == lan_addr2->lun));
470 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
472 if (len < sizeof(struct ipmi_system_interface_addr)) {
476 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
477 if (addr->channel != IPMI_BMC_CHANNEL)
482 if ((addr->channel == IPMI_BMC_CHANNEL)
483 || (addr->channel >= IPMI_NUM_CHANNELS)
484 || (addr->channel < 0))
487 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
488 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
490 if (len < sizeof(struct ipmi_ipmb_addr)) {
496 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
497 if (len < sizeof(struct ipmi_lan_addr)) {
506 unsigned int ipmi_addr_length(int addr_type)
508 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
509 return sizeof(struct ipmi_system_interface_addr);
511 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
512 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
514 return sizeof(struct ipmi_ipmb_addr);
517 if (addr_type == IPMI_LAN_ADDR_TYPE)
518 return sizeof(struct ipmi_lan_addr);
523 static void deliver_response(struct ipmi_recv_msg *msg)
526 ipmi_smi_t intf = msg->user_msg_data;
529 /* Special handling for NULL users. */
530 if (intf->null_user_handler) {
531 intf->null_user_handler(intf, msg);
532 spin_lock_irqsave(&intf->counter_lock, flags);
533 intf->handled_local_responses++;
534 spin_unlock_irqrestore(&intf->counter_lock, flags);
536 /* No handler, so give up. */
537 spin_lock_irqsave(&intf->counter_lock, flags);
538 intf->unhandled_local_responses++;
539 spin_unlock_irqrestore(&intf->counter_lock, flags);
541 ipmi_free_recv_msg(msg);
543 ipmi_user_t user = msg->user;
544 user->handler->ipmi_recv_hndl(msg, user->handler_data);
548 /* Find the next sequence number not being used and add the given
549 message with the given timeout to the sequence table. This must be
550 called with the interface's seq_lock held. */
551 static int intf_next_seq(ipmi_smi_t intf,
552 struct ipmi_recv_msg *recv_msg,
553 unsigned long timeout,
562 for (i = intf->curr_seq;
563 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
564 i = (i+1)%IPMI_IPMB_NUM_SEQ)
566 if (! intf->seq_table[i].inuse)
570 if (! intf->seq_table[i].inuse) {
571 intf->seq_table[i].recv_msg = recv_msg;
573 /* Start with the maximum timeout, when the send response
574 comes in we will start the real timer. */
575 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
576 intf->seq_table[i].orig_timeout = timeout;
577 intf->seq_table[i].retries_left = retries;
578 intf->seq_table[i].broadcast = broadcast;
579 intf->seq_table[i].inuse = 1;
580 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
582 *seqid = intf->seq_table[i].seqid;
583 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
591 /* Return the receive message for the given sequence number and
592 release the sequence number so it can be reused. Some other data
593 is passed in to be sure the message matches up correctly (to help
594 guard against message coming in after their timeout and the
595 sequence number being reused). */
596 static int intf_find_seq(ipmi_smi_t intf,
601 struct ipmi_addr *addr,
602 struct ipmi_recv_msg **recv_msg)
607 if (seq >= IPMI_IPMB_NUM_SEQ)
610 spin_lock_irqsave(&(intf->seq_lock), flags);
611 if (intf->seq_table[seq].inuse) {
612 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
614 if ((msg->addr.channel == channel)
615 && (msg->msg.cmd == cmd)
616 && (msg->msg.netfn == netfn)
617 && (ipmi_addr_equal(addr, &(msg->addr))))
620 intf->seq_table[seq].inuse = 0;
624 spin_unlock_irqrestore(&(intf->seq_lock), flags);
630 /* Start the timer for a specific sequence table entry. */
631 static int intf_start_seq_timer(ipmi_smi_t intf,
640 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
642 spin_lock_irqsave(&(intf->seq_lock), flags);
643 /* We do this verification because the user can be deleted
644 while a message is outstanding. */
645 if ((intf->seq_table[seq].inuse)
646 && (intf->seq_table[seq].seqid == seqid))
648 struct seq_table *ent = &(intf->seq_table[seq]);
649 ent->timeout = ent->orig_timeout;
652 spin_unlock_irqrestore(&(intf->seq_lock), flags);
657 /* Got an error for the send message for a specific sequence number. */
658 static int intf_err_seq(ipmi_smi_t intf,
666 struct ipmi_recv_msg *msg = NULL;
669 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
671 spin_lock_irqsave(&(intf->seq_lock), flags);
672 /* We do this verification because the user can be deleted
673 while a message is outstanding. */
674 if ((intf->seq_table[seq].inuse)
675 && (intf->seq_table[seq].seqid == seqid))
677 struct seq_table *ent = &(intf->seq_table[seq]);
683 spin_unlock_irqrestore(&(intf->seq_lock), flags);
686 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
687 msg->msg_data[0] = err;
688 msg->msg.netfn |= 1; /* Convert to a response. */
689 msg->msg.data_len = 1;
690 msg->msg.data = msg->msg_data;
691 deliver_response(msg);
698 int ipmi_create_user(unsigned int if_num,
699 struct ipmi_user_hndl *handler,
704 ipmi_user_t new_user;
708 /* There is no module usecount here, because it's not
709 required. Since this can only be used by and called from
710 other modules, they will implicitly use this module, and
711 thus this can't be removed unless the other modules are
717 /* Make sure the driver is actually initialized, this handles
718 problems with initialization order. */
720 rv = ipmi_init_msghandler();
724 /* The init code doesn't return an error if it was turned
725 off, but it won't initialize. Check that. */
730 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
734 spin_lock_irqsave(&interfaces_lock, flags);
735 intf = ipmi_interfaces[if_num];
736 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
737 spin_unlock_irqrestore(&interfaces_lock, flags);
741 /* Note that each existing user holds a refcount to the interface. */
742 kref_get(&intf->refcount);
743 spin_unlock_irqrestore(&interfaces_lock, flags);
745 kref_init(&new_user->refcount);
746 new_user->handler = handler;
747 new_user->handler_data = handler_data;
748 new_user->intf = intf;
749 new_user->gets_events = 0;
751 if (!try_module_get(intf->handlers->owner)) {
756 if (intf->handlers->inc_usecount) {
757 rv = intf->handlers->inc_usecount(intf->send_info);
759 module_put(intf->handlers->owner);
765 spin_lock_irqsave(&intf->seq_lock, flags);
766 list_add_rcu(&new_user->link, &intf->users);
767 spin_unlock_irqrestore(&intf->seq_lock, flags);
773 kref_put(&intf->refcount, intf_free);
777 static void free_user(struct kref *ref)
779 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
783 int ipmi_destroy_user(ipmi_user_t user)
786 ipmi_smi_t intf = user->intf;
789 struct cmd_rcvr *rcvr;
790 struct cmd_rcvr *rcvrs = NULL;
794 /* Remove the user from the interface's sequence table. */
795 spin_lock_irqsave(&intf->seq_lock, flags);
796 list_del_rcu(&user->link);
798 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
799 if (intf->seq_table[i].inuse
800 && (intf->seq_table[i].recv_msg->user == user))
802 intf->seq_table[i].inuse = 0;
805 spin_unlock_irqrestore(&intf->seq_lock, flags);
808 * Remove the user from the command receiver's table. First
809 * we build a list of everything (not using the standard link,
810 * since other things may be using it till we do
811 * synchronize_rcu()) then free everything in that list.
813 down(&intf->cmd_rcvrs_lock);
814 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
815 if (rcvr->user == user) {
816 list_del_rcu(&rcvr->link);
821 up(&intf->cmd_rcvrs_lock);
829 module_put(intf->handlers->owner);
830 if (intf->handlers->dec_usecount)
831 intf->handlers->dec_usecount(intf->send_info);
833 kref_put(&intf->refcount, intf_free);
835 kref_put(&user->refcount, free_user);
840 void ipmi_get_version(ipmi_user_t user,
841 unsigned char *major,
842 unsigned char *minor)
844 *major = user->intf->version_major;
845 *minor = user->intf->version_minor;
848 int ipmi_set_my_address(ipmi_user_t user,
849 unsigned int channel,
850 unsigned char address)
852 if (channel >= IPMI_MAX_CHANNELS)
854 user->intf->channels[channel].address = address;
858 int ipmi_get_my_address(ipmi_user_t user,
859 unsigned int channel,
860 unsigned char *address)
862 if (channel >= IPMI_MAX_CHANNELS)
864 *address = user->intf->channels[channel].address;
868 int ipmi_set_my_LUN(ipmi_user_t user,
869 unsigned int channel,
872 if (channel >= IPMI_MAX_CHANNELS)
874 user->intf->channels[channel].lun = LUN & 0x3;
878 int ipmi_get_my_LUN(ipmi_user_t user,
879 unsigned int channel,
880 unsigned char *address)
882 if (channel >= IPMI_MAX_CHANNELS)
884 *address = user->intf->channels[channel].lun;
888 int ipmi_set_gets_events(ipmi_user_t user, int val)
891 ipmi_smi_t intf = user->intf;
892 struct ipmi_recv_msg *msg, *msg2;
893 struct list_head msgs;
895 INIT_LIST_HEAD(&msgs);
897 spin_lock_irqsave(&intf->events_lock, flags);
898 user->gets_events = val;
901 /* Deliver any queued events. */
902 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
903 list_del(&msg->link);
904 list_add_tail(&msg->link, &msgs);
908 /* Hold the events lock while doing this to preserve order. */
909 list_for_each_entry_safe(msg, msg2, &msgs, link) {
911 kref_get(&user->refcount);
912 deliver_response(msg);
915 spin_unlock_irqrestore(&intf->events_lock, flags);
920 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
924 struct cmd_rcvr *rcvr;
926 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
927 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
933 int ipmi_register_for_cmd(ipmi_user_t user,
937 ipmi_smi_t intf = user->intf;
938 struct cmd_rcvr *rcvr;
939 struct cmd_rcvr *entry;
943 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
950 down(&intf->cmd_rcvrs_lock);
951 /* Make sure the command/netfn is not already registered. */
952 entry = find_cmd_rcvr(intf, netfn, cmd);
958 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
961 up(&intf->cmd_rcvrs_lock);
968 int ipmi_unregister_for_cmd(ipmi_user_t user,
972 ipmi_smi_t intf = user->intf;
973 struct cmd_rcvr *rcvr;
975 down(&intf->cmd_rcvrs_lock);
976 /* Make sure the command/netfn is not already registered. */
977 rcvr = find_cmd_rcvr(intf, netfn, cmd);
978 if ((rcvr) && (rcvr->user == user)) {
979 list_del_rcu(&rcvr->link);
980 up(&intf->cmd_rcvrs_lock);
985 up(&intf->cmd_rcvrs_lock);
990 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
992 ipmi_smi_t intf = user->intf;
993 intf->handlers->set_run_to_completion(intf->send_info, val);
997 ipmb_checksum(unsigned char *data, int size)
999 unsigned char csum = 0;
1001 for (; size > 0; size--, data++)
1007 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1008 struct kernel_ipmi_msg *msg,
1009 struct ipmi_ipmb_addr *ipmb_addr,
1011 unsigned char ipmb_seq,
1013 unsigned char source_address,
1014 unsigned char source_lun)
1018 /* Format the IPMB header data. */
1019 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1020 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1021 smi_msg->data[2] = ipmb_addr->channel;
1023 smi_msg->data[3] = 0;
1024 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1025 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1026 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1027 smi_msg->data[i+6] = source_address;
1028 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1029 smi_msg->data[i+8] = msg->cmd;
1031 /* Now tack on the data to the message. */
1032 if (msg->data_len > 0)
1033 memcpy(&(smi_msg->data[i+9]), msg->data,
1035 smi_msg->data_size = msg->data_len + 9;
1037 /* Now calculate the checksum and tack it on. */
1038 smi_msg->data[i+smi_msg->data_size]
1039 = ipmb_checksum(&(smi_msg->data[i+6]),
1040 smi_msg->data_size-6);
1042 /* Add on the checksum size and the offset from the
1044 smi_msg->data_size += 1 + i;
1046 smi_msg->msgid = msgid;
1049 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1050 struct kernel_ipmi_msg *msg,
1051 struct ipmi_lan_addr *lan_addr,
1053 unsigned char ipmb_seq,
1054 unsigned char source_lun)
1056 /* Format the IPMB header data. */
1057 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1058 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1059 smi_msg->data[2] = lan_addr->channel;
1060 smi_msg->data[3] = lan_addr->session_handle;
1061 smi_msg->data[4] = lan_addr->remote_SWID;
1062 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1063 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1064 smi_msg->data[7] = lan_addr->local_SWID;
1065 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1066 smi_msg->data[9] = msg->cmd;
1068 /* Now tack on the data to the message. */
1069 if (msg->data_len > 0)
1070 memcpy(&(smi_msg->data[10]), msg->data,
1072 smi_msg->data_size = msg->data_len + 10;
1074 /* Now calculate the checksum and tack it on. */
1075 smi_msg->data[smi_msg->data_size]
1076 = ipmb_checksum(&(smi_msg->data[7]),
1077 smi_msg->data_size-7);
1079 /* Add on the checksum size and the offset from the
1081 smi_msg->data_size += 1;
1083 smi_msg->msgid = msgid;
1086 /* Separate from ipmi_request so that the user does not have to be
1087 supplied in certain circumstances (mainly at panic time). If
1088 messages are supplied, they will be freed, even if an error
1090 static int i_ipmi_request(ipmi_user_t user,
1092 struct ipmi_addr *addr,
1094 struct kernel_ipmi_msg *msg,
1095 void *user_msg_data,
1097 struct ipmi_recv_msg *supplied_recv,
1099 unsigned char source_address,
1100 unsigned char source_lun,
1102 unsigned int retry_time_ms)
1105 struct ipmi_smi_msg *smi_msg;
1106 struct ipmi_recv_msg *recv_msg;
1107 unsigned long flags;
1110 if (supplied_recv) {
1111 recv_msg = supplied_recv;
1113 recv_msg = ipmi_alloc_recv_msg();
1114 if (recv_msg == NULL) {
1118 recv_msg->user_msg_data = user_msg_data;
1121 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1123 smi_msg = ipmi_alloc_smi_msg();
1124 if (smi_msg == NULL) {
1125 ipmi_free_recv_msg(recv_msg);
1130 recv_msg->user = user;
1132 kref_get(&user->refcount);
1133 recv_msg->msgid = msgid;
1134 /* Store the message to send in the receive message so timeout
1135 responses can get the proper response data. */
1136 recv_msg->msg = *msg;
1138 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1139 struct ipmi_system_interface_addr *smi_addr;
1141 if (msg->netfn & 1) {
1142 /* Responses are not allowed to the SMI. */
1147 smi_addr = (struct ipmi_system_interface_addr *) addr;
1148 if (smi_addr->lun > 3) {
1149 spin_lock_irqsave(&intf->counter_lock, flags);
1150 intf->sent_invalid_commands++;
1151 spin_unlock_irqrestore(&intf->counter_lock, flags);
1156 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1158 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1159 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1160 || (msg->cmd == IPMI_GET_MSG_CMD)
1161 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1163 /* We don't let the user do these, since we manage
1164 the sequence numbers. */
1165 spin_lock_irqsave(&intf->counter_lock, flags);
1166 intf->sent_invalid_commands++;
1167 spin_unlock_irqrestore(&intf->counter_lock, flags);
1172 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1173 spin_lock_irqsave(&intf->counter_lock, flags);
1174 intf->sent_invalid_commands++;
1175 spin_unlock_irqrestore(&intf->counter_lock, flags);
1180 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1181 smi_msg->data[1] = msg->cmd;
1182 smi_msg->msgid = msgid;
1183 smi_msg->user_data = recv_msg;
1184 if (msg->data_len > 0)
1185 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1186 smi_msg->data_size = msg->data_len + 2;
1187 spin_lock_irqsave(&intf->counter_lock, flags);
1188 intf->sent_local_commands++;
1189 spin_unlock_irqrestore(&intf->counter_lock, flags);
1190 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1191 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1193 struct ipmi_ipmb_addr *ipmb_addr;
1194 unsigned char ipmb_seq;
1198 if (addr->channel >= IPMI_MAX_CHANNELS) {
1199 spin_lock_irqsave(&intf->counter_lock, flags);
1200 intf->sent_invalid_commands++;
1201 spin_unlock_irqrestore(&intf->counter_lock, flags);
1206 if (intf->channels[addr->channel].medium
1207 != IPMI_CHANNEL_MEDIUM_IPMB)
1209 spin_lock_irqsave(&intf->counter_lock, flags);
1210 intf->sent_invalid_commands++;
1211 spin_unlock_irqrestore(&intf->counter_lock, flags);
1217 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1218 retries = 0; /* Don't retry broadcasts. */
1222 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1223 /* Broadcasts add a zero at the beginning of the
1224 message, but otherwise is the same as an IPMB
1226 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1231 /* Default to 1 second retries. */
1232 if (retry_time_ms == 0)
1233 retry_time_ms = 1000;
1235 /* 9 for the header and 1 for the checksum, plus
1236 possibly one for the broadcast. */
1237 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1238 spin_lock_irqsave(&intf->counter_lock, flags);
1239 intf->sent_invalid_commands++;
1240 spin_unlock_irqrestore(&intf->counter_lock, flags);
1245 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1246 if (ipmb_addr->lun > 3) {
1247 spin_lock_irqsave(&intf->counter_lock, flags);
1248 intf->sent_invalid_commands++;
1249 spin_unlock_irqrestore(&intf->counter_lock, flags);
1254 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1256 if (recv_msg->msg.netfn & 0x1) {
1257 /* It's a response, so use the user's sequence
1259 spin_lock_irqsave(&intf->counter_lock, flags);
1260 intf->sent_ipmb_responses++;
1261 spin_unlock_irqrestore(&intf->counter_lock, flags);
1262 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1264 source_address, source_lun);
1266 /* Save the receive message so we can use it
1267 to deliver the response. */
1268 smi_msg->user_data = recv_msg;
1270 /* It's a command, so get a sequence for it. */
1272 spin_lock_irqsave(&(intf->seq_lock), flags);
1274 spin_lock(&intf->counter_lock);
1275 intf->sent_ipmb_commands++;
1276 spin_unlock(&intf->counter_lock);
1278 /* Create a sequence number with a 1 second
1279 timeout and 4 retries. */
1280 rv = intf_next_seq(intf,
1288 /* We have used up all the sequence numbers,
1289 probably, so abort. */
1290 spin_unlock_irqrestore(&(intf->seq_lock),
1295 /* Store the sequence number in the message,
1296 so that when the send message response
1297 comes back we can start the timer. */
1298 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1299 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1300 ipmb_seq, broadcast,
1301 source_address, source_lun);
1303 /* Copy the message into the recv message data, so we
1304 can retransmit it later if necessary. */
1305 memcpy(recv_msg->msg_data, smi_msg->data,
1306 smi_msg->data_size);
1307 recv_msg->msg.data = recv_msg->msg_data;
1308 recv_msg->msg.data_len = smi_msg->data_size;
1310 /* We don't unlock until here, because we need
1311 to copy the completed message into the
1312 recv_msg before we release the lock.
1313 Otherwise, race conditions may bite us. I
1314 know that's pretty paranoid, but I prefer
1316 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1318 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1319 struct ipmi_lan_addr *lan_addr;
1320 unsigned char ipmb_seq;
1323 if (addr->channel >= IPMI_NUM_CHANNELS) {
1324 spin_lock_irqsave(&intf->counter_lock, flags);
1325 intf->sent_invalid_commands++;
1326 spin_unlock_irqrestore(&intf->counter_lock, flags);
1331 if ((intf->channels[addr->channel].medium
1332 != IPMI_CHANNEL_MEDIUM_8023LAN)
1333 && (intf->channels[addr->channel].medium
1334 != IPMI_CHANNEL_MEDIUM_ASYNC))
1336 spin_lock_irqsave(&intf->counter_lock, flags);
1337 intf->sent_invalid_commands++;
1338 spin_unlock_irqrestore(&intf->counter_lock, flags);
1345 /* Default to 1 second retries. */
1346 if (retry_time_ms == 0)
1347 retry_time_ms = 1000;
1349 /* 11 for the header and 1 for the checksum. */
1350 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1351 spin_lock_irqsave(&intf->counter_lock, flags);
1352 intf->sent_invalid_commands++;
1353 spin_unlock_irqrestore(&intf->counter_lock, flags);
1358 lan_addr = (struct ipmi_lan_addr *) addr;
1359 if (lan_addr->lun > 3) {
1360 spin_lock_irqsave(&intf->counter_lock, flags);
1361 intf->sent_invalid_commands++;
1362 spin_unlock_irqrestore(&intf->counter_lock, flags);
1367 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1369 if (recv_msg->msg.netfn & 0x1) {
1370 /* It's a response, so use the user's sequence
1372 spin_lock_irqsave(&intf->counter_lock, flags);
1373 intf->sent_lan_responses++;
1374 spin_unlock_irqrestore(&intf->counter_lock, flags);
1375 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1378 /* Save the receive message so we can use it
1379 to deliver the response. */
1380 smi_msg->user_data = recv_msg;
1382 /* It's a command, so get a sequence for it. */
1384 spin_lock_irqsave(&(intf->seq_lock), flags);
1386 spin_lock(&intf->counter_lock);
1387 intf->sent_lan_commands++;
1388 spin_unlock(&intf->counter_lock);
1390 /* Create a sequence number with a 1 second
1391 timeout and 4 retries. */
1392 rv = intf_next_seq(intf,
1400 /* We have used up all the sequence numbers,
1401 probably, so abort. */
1402 spin_unlock_irqrestore(&(intf->seq_lock),
1407 /* Store the sequence number in the message,
1408 so that when the send message response
1409 comes back we can start the timer. */
1410 format_lan_msg(smi_msg, msg, lan_addr,
1411 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1412 ipmb_seq, source_lun);
1414 /* Copy the message into the recv message data, so we
1415 can retransmit it later if necessary. */
1416 memcpy(recv_msg->msg_data, smi_msg->data,
1417 smi_msg->data_size);
1418 recv_msg->msg.data = recv_msg->msg_data;
1419 recv_msg->msg.data_len = smi_msg->data_size;
1421 /* We don't unlock until here, because we need
1422 to copy the completed message into the
1423 recv_msg before we release the lock.
1424 Otherwise, race conditions may bite us. I
1425 know that's pretty paranoid, but I prefer
1427 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1430 /* Unknown address type. */
1431 spin_lock_irqsave(&intf->counter_lock, flags);
1432 intf->sent_invalid_commands++;
1433 spin_unlock_irqrestore(&intf->counter_lock, flags);
1441 for (m = 0; m < smi_msg->data_size; m++)
1442 printk(" %2.2x", smi_msg->data[m]);
1446 intf->handlers->sender(intf->send_info, smi_msg, priority);
1451 ipmi_free_smi_msg(smi_msg);
1452 ipmi_free_recv_msg(recv_msg);
1456 static int check_addr(ipmi_smi_t intf,
1457 struct ipmi_addr *addr,
1458 unsigned char *saddr,
1461 if (addr->channel >= IPMI_MAX_CHANNELS)
1463 *lun = intf->channels[addr->channel].lun;
1464 *saddr = intf->channels[addr->channel].address;
1468 int ipmi_request_settime(ipmi_user_t user,
1469 struct ipmi_addr *addr,
1471 struct kernel_ipmi_msg *msg,
1472 void *user_msg_data,
1475 unsigned int retry_time_ms)
1477 unsigned char saddr, lun;
1482 rv = check_addr(user->intf, addr, &saddr, &lun);
1485 return i_ipmi_request(user,
1499 int ipmi_request_supply_msgs(ipmi_user_t user,
1500 struct ipmi_addr *addr,
1502 struct kernel_ipmi_msg *msg,
1503 void *user_msg_data,
1505 struct ipmi_recv_msg *supplied_recv,
1508 unsigned char saddr, lun;
1513 rv = check_addr(user->intf, addr, &saddr, &lun);
1516 return i_ipmi_request(user,
1530 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1531 int count, int *eof, void *data)
1533 char *out = (char *) page;
1534 ipmi_smi_t intf = data;
1538 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1539 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1540 out[rv-1] = '\n'; /* Replace the final space with a newline */
1546 static int version_file_read_proc(char *page, char **start, off_t off,
1547 int count, int *eof, void *data)
1549 char *out = (char *) page;
1550 ipmi_smi_t intf = data;
1552 return sprintf(out, "%d.%d\n",
1553 intf->version_major, intf->version_minor);
1556 static int stat_file_read_proc(char *page, char **start, off_t off,
1557 int count, int *eof, void *data)
1559 char *out = (char *) page;
1560 ipmi_smi_t intf = data;
1562 out += sprintf(out, "sent_invalid_commands: %d\n",
1563 intf->sent_invalid_commands);
1564 out += sprintf(out, "sent_local_commands: %d\n",
1565 intf->sent_local_commands);
1566 out += sprintf(out, "handled_local_responses: %d\n",
1567 intf->handled_local_responses);
1568 out += sprintf(out, "unhandled_local_responses: %d\n",
1569 intf->unhandled_local_responses);
1570 out += sprintf(out, "sent_ipmb_commands: %d\n",
1571 intf->sent_ipmb_commands);
1572 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1573 intf->sent_ipmb_command_errs);
1574 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1575 intf->retransmitted_ipmb_commands);
1576 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1577 intf->timed_out_ipmb_commands);
1578 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1579 intf->timed_out_ipmb_broadcasts);
1580 out += sprintf(out, "sent_ipmb_responses: %d\n",
1581 intf->sent_ipmb_responses);
1582 out += sprintf(out, "handled_ipmb_responses: %d\n",
1583 intf->handled_ipmb_responses);
1584 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1585 intf->invalid_ipmb_responses);
1586 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1587 intf->unhandled_ipmb_responses);
1588 out += sprintf(out, "sent_lan_commands: %d\n",
1589 intf->sent_lan_commands);
1590 out += sprintf(out, "sent_lan_command_errs: %d\n",
1591 intf->sent_lan_command_errs);
1592 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1593 intf->retransmitted_lan_commands);
1594 out += sprintf(out, "timed_out_lan_commands: %d\n",
1595 intf->timed_out_lan_commands);
1596 out += sprintf(out, "sent_lan_responses: %d\n",
1597 intf->sent_lan_responses);
1598 out += sprintf(out, "handled_lan_responses: %d\n",
1599 intf->handled_lan_responses);
1600 out += sprintf(out, "invalid_lan_responses: %d\n",
1601 intf->invalid_lan_responses);
1602 out += sprintf(out, "unhandled_lan_responses: %d\n",
1603 intf->unhandled_lan_responses);
1604 out += sprintf(out, "handled_commands: %d\n",
1605 intf->handled_commands);
1606 out += sprintf(out, "invalid_commands: %d\n",
1607 intf->invalid_commands);
1608 out += sprintf(out, "unhandled_commands: %d\n",
1609 intf->unhandled_commands);
1610 out += sprintf(out, "invalid_events: %d\n",
1611 intf->invalid_events);
1612 out += sprintf(out, "events: %d\n",
1615 return (out - ((char *) page));
1618 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1619 read_proc_t *read_proc, write_proc_t *write_proc,
1620 void *data, struct module *owner)
1623 #ifdef CONFIG_PROC_FS
1624 struct proc_dir_entry *file;
1625 struct ipmi_proc_entry *entry;
1627 /* Create a list element. */
1628 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1631 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1636 strcpy(entry->name, name);
1638 file = create_proc_entry(name, 0, smi->proc_dir);
1646 file->read_proc = read_proc;
1647 file->write_proc = write_proc;
1648 file->owner = owner;
1650 spin_lock(&smi->proc_entry_lock);
1651 /* Stick it on the list. */
1652 entry->next = smi->proc_entries;
1653 smi->proc_entries = entry;
1654 spin_unlock(&smi->proc_entry_lock);
1656 #endif /* CONFIG_PROC_FS */
1661 static int add_proc_entries(ipmi_smi_t smi, int num)
1665 #ifdef CONFIG_PROC_FS
1666 sprintf(smi->proc_dir_name, "%d", num);
1667 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1671 smi->proc_dir->owner = THIS_MODULE;
1675 rv = ipmi_smi_add_proc_entry(smi, "stats",
1676 stat_file_read_proc, NULL,
1680 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1681 ipmb_file_read_proc, NULL,
1685 rv = ipmi_smi_add_proc_entry(smi, "version",
1686 version_file_read_proc, NULL,
1688 #endif /* CONFIG_PROC_FS */
1693 static void remove_proc_entries(ipmi_smi_t smi)
1695 #ifdef CONFIG_PROC_FS
1696 struct ipmi_proc_entry *entry;
1698 spin_lock(&smi->proc_entry_lock);
1699 while (smi->proc_entries) {
1700 entry = smi->proc_entries;
1701 smi->proc_entries = entry->next;
1703 remove_proc_entry(entry->name, smi->proc_dir);
1707 spin_unlock(&smi->proc_entry_lock);
1708 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1709 #endif /* CONFIG_PROC_FS */
1713 send_channel_info_cmd(ipmi_smi_t intf, int chan)
1715 struct kernel_ipmi_msg msg;
1716 unsigned char data[1];
1717 struct ipmi_system_interface_addr si;
1719 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1720 si.channel = IPMI_BMC_CHANNEL;
1723 msg.netfn = IPMI_NETFN_APP_REQUEST;
1724 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
1728 return i_ipmi_request(NULL,
1730 (struct ipmi_addr *) &si,
1737 intf->channels[0].address,
1738 intf->channels[0].lun,
1743 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1748 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
1749 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
1750 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
1752 /* It's the one we want */
1753 if (msg->msg.data[0] != 0) {
1754 /* Got an error from the channel, just go on. */
1756 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
1757 /* If the MC does not support this
1758 command, that is legal. We just
1759 assume it has one IPMB at channel
1761 intf->channels[0].medium
1762 = IPMI_CHANNEL_MEDIUM_IPMB;
1763 intf->channels[0].protocol
1764 = IPMI_CHANNEL_PROTOCOL_IPMB;
1767 intf->curr_channel = IPMI_MAX_CHANNELS;
1768 wake_up(&intf->waitq);
1773 if (msg->msg.data_len < 4) {
1774 /* Message not big enough, just go on. */
1777 chan = intf->curr_channel;
1778 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
1779 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
1782 intf->curr_channel++;
1783 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
1784 wake_up(&intf->waitq);
1786 rv = send_channel_info_cmd(intf, intf->curr_channel);
1789 /* Got an error somehow, just give up. */
1790 intf->curr_channel = IPMI_MAX_CHANNELS;
1791 wake_up(&intf->waitq);
1793 printk(KERN_WARNING PFX
1794 "Error sending channel information: %d\n",
1802 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1804 unsigned char version_major,
1805 unsigned char version_minor,
1806 unsigned char slave_addr,
1807 ipmi_smi_t *new_intf)
1812 unsigned long flags;
1815 /* Make sure the driver is actually initialized, this handles
1816 problems with initialization order. */
1818 rv = ipmi_init_msghandler();
1821 /* The init code doesn't return an error if it was turned
1822 off, but it won't initialize. Check that. */
1827 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
1830 memset(intf, 0, sizeof(*intf));
1831 intf->intf_num = -1;
1832 kref_init(&intf->refcount);
1833 intf->version_major = version_major;
1834 intf->version_minor = version_minor;
1835 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1836 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
1837 intf->channels[j].lun = 2;
1839 if (slave_addr != 0)
1840 intf->channels[0].address = slave_addr;
1841 INIT_LIST_HEAD(&intf->users);
1842 intf->handlers = handlers;
1843 intf->send_info = send_info;
1844 spin_lock_init(&intf->seq_lock);
1845 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1846 intf->seq_table[j].inuse = 0;
1847 intf->seq_table[j].seqid = 0;
1850 #ifdef CONFIG_PROC_FS
1851 spin_lock_init(&intf->proc_entry_lock);
1853 spin_lock_init(&intf->waiting_msgs_lock);
1854 INIT_LIST_HEAD(&intf->waiting_msgs);
1855 spin_lock_init(&intf->events_lock);
1856 INIT_LIST_HEAD(&intf->waiting_events);
1857 intf->waiting_events_count = 0;
1858 init_MUTEX(&intf->cmd_rcvrs_lock);
1859 INIT_LIST_HEAD(&intf->cmd_rcvrs);
1860 init_waitqueue_head(&intf->waitq);
1862 spin_lock_init(&intf->counter_lock);
1863 intf->proc_dir = NULL;
1866 spin_lock_irqsave(&interfaces_lock, flags);
1867 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1868 if (ipmi_interfaces[i] == NULL) {
1870 /* Reserve the entry till we are done. */
1871 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1876 spin_unlock_irqrestore(&interfaces_lock, flags);
1880 /* FIXME - this is an ugly kludge, this sets the intf for the
1881 caller before sending any messages with it. */
1884 if ((version_major > 1)
1885 || ((version_major == 1) && (version_minor >= 5)))
1887 /* Start scanning the channels to see what is
1889 intf->null_user_handler = channel_handler;
1890 intf->curr_channel = 0;
1891 rv = send_channel_info_cmd(intf, 0);
1895 /* Wait for the channel info to be read. */
1896 wait_event(intf->waitq,
1897 intf->curr_channel >= IPMI_MAX_CHANNELS);
1899 /* Assume a single IPMB channel at zero. */
1900 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
1901 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
1905 rv = add_proc_entries(intf, i);
1910 remove_proc_entries(intf);
1911 kref_put(&intf->refcount, intf_free);
1912 if (i < MAX_IPMI_INTERFACES) {
1913 spin_lock_irqsave(&interfaces_lock, flags);
1914 ipmi_interfaces[i] = NULL;
1915 spin_unlock_irqrestore(&interfaces_lock, flags);
1918 spin_lock_irqsave(&interfaces_lock, flags);
1919 ipmi_interfaces[i] = intf;
1920 spin_unlock_irqrestore(&interfaces_lock, flags);
1921 call_smi_watchers(i);
1927 int ipmi_unregister_smi(ipmi_smi_t intf)
1930 struct ipmi_smi_watcher *w;
1931 unsigned long flags;
1933 spin_lock_irqsave(&interfaces_lock, flags);
1934 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1935 if (ipmi_interfaces[i] == intf) {
1936 /* Set the interface number reserved until we
1938 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1939 intf->intf_num = -1;
1943 spin_unlock_irqrestore(&interfaces_lock,flags);
1945 if (i == MAX_IPMI_INTERFACES)
1948 remove_proc_entries(intf);
1950 /* Call all the watcher interfaces to tell them that
1951 an interface is gone. */
1952 down_read(&smi_watchers_sem);
1953 list_for_each_entry(w, &smi_watchers, link)
1955 up_read(&smi_watchers_sem);
1957 /* Allow the entry to be reused now. */
1958 spin_lock_irqsave(&interfaces_lock, flags);
1959 ipmi_interfaces[i] = NULL;
1960 spin_unlock_irqrestore(&interfaces_lock,flags);
1962 kref_put(&intf->refcount, intf_free);
1966 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
1967 struct ipmi_smi_msg *msg)
1969 struct ipmi_ipmb_addr ipmb_addr;
1970 struct ipmi_recv_msg *recv_msg;
1971 unsigned long flags;
1974 /* This is 11, not 10, because the response must contain a
1975 * completion code. */
1976 if (msg->rsp_size < 11) {
1977 /* Message not big enough, just ignore it. */
1978 spin_lock_irqsave(&intf->counter_lock, flags);
1979 intf->invalid_ipmb_responses++;
1980 spin_unlock_irqrestore(&intf->counter_lock, flags);
1984 if (msg->rsp[2] != 0) {
1985 /* An error getting the response, just ignore it. */
1989 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
1990 ipmb_addr.slave_addr = msg->rsp[6];
1991 ipmb_addr.channel = msg->rsp[3] & 0x0f;
1992 ipmb_addr.lun = msg->rsp[7] & 3;
1994 /* It's a response from a remote entity. Look up the sequence
1995 number and handle the response. */
1996 if (intf_find_seq(intf,
2000 (msg->rsp[4] >> 2) & (~1),
2001 (struct ipmi_addr *) &(ipmb_addr),
2004 /* We were unable to find the sequence number,
2005 so just nuke the message. */
2006 spin_lock_irqsave(&intf->counter_lock, flags);
2007 intf->unhandled_ipmb_responses++;
2008 spin_unlock_irqrestore(&intf->counter_lock, flags);
2012 memcpy(recv_msg->msg_data,
2015 /* THe other fields matched, so no need to set them, except
2016 for netfn, which needs to be the response that was
2017 returned, not the request value. */
2018 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2019 recv_msg->msg.data = recv_msg->msg_data;
2020 recv_msg->msg.data_len = msg->rsp_size - 10;
2021 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2022 spin_lock_irqsave(&intf->counter_lock, flags);
2023 intf->handled_ipmb_responses++;
2024 spin_unlock_irqrestore(&intf->counter_lock, flags);
2025 deliver_response(recv_msg);
2030 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2031 struct ipmi_smi_msg *msg)
2033 struct cmd_rcvr *rcvr;
2035 unsigned char netfn;
2037 ipmi_user_t user = NULL;
2038 struct ipmi_ipmb_addr *ipmb_addr;
2039 struct ipmi_recv_msg *recv_msg;
2040 unsigned long flags;
2042 if (msg->rsp_size < 10) {
2043 /* Message not big enough, just ignore it. */
2044 spin_lock_irqsave(&intf->counter_lock, flags);
2045 intf->invalid_commands++;
2046 spin_unlock_irqrestore(&intf->counter_lock, flags);
2050 if (msg->rsp[2] != 0) {
2051 /* An error getting the response, just ignore it. */
2055 netfn = msg->rsp[4] >> 2;
2059 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2062 kref_get(&user->refcount);
2068 /* We didn't find a user, deliver an error response. */
2069 spin_lock_irqsave(&intf->counter_lock, flags);
2070 intf->unhandled_commands++;
2071 spin_unlock_irqrestore(&intf->counter_lock, flags);
2073 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2074 msg->data[1] = IPMI_SEND_MSG_CMD;
2075 msg->data[2] = msg->rsp[3];
2076 msg->data[3] = msg->rsp[6];
2077 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2078 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2079 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2081 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2082 msg->data[8] = msg->rsp[8]; /* cmd */
2083 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2084 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2085 msg->data_size = 11;
2090 printk("Invalid command:");
2091 for (m = 0; m < msg->data_size; m++)
2092 printk(" %2.2x", msg->data[m]);
2096 intf->handlers->sender(intf->send_info, msg, 0);
2098 rv = -1; /* We used the message, so return the value that
2099 causes it to not be freed or queued. */
2101 /* Deliver the message to the user. */
2102 spin_lock_irqsave(&intf->counter_lock, flags);
2103 intf->handled_commands++;
2104 spin_unlock_irqrestore(&intf->counter_lock, flags);
2106 recv_msg = ipmi_alloc_recv_msg();
2108 /* We couldn't allocate memory for the
2109 message, so requeue it for handling
2112 kref_put(&user->refcount, free_user);
2114 /* Extract the source address from the data. */
2115 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2116 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2117 ipmb_addr->slave_addr = msg->rsp[6];
2118 ipmb_addr->lun = msg->rsp[7] & 3;
2119 ipmb_addr->channel = msg->rsp[3] & 0xf;
2121 /* Extract the rest of the message information
2122 from the IPMB header.*/
2123 recv_msg->user = user;
2124 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2125 recv_msg->msgid = msg->rsp[7] >> 2;
2126 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2127 recv_msg->msg.cmd = msg->rsp[8];
2128 recv_msg->msg.data = recv_msg->msg_data;
2130 /* We chop off 10, not 9 bytes because the checksum
2131 at the end also needs to be removed. */
2132 recv_msg->msg.data_len = msg->rsp_size - 10;
2133 memcpy(recv_msg->msg_data,
2135 msg->rsp_size - 10);
2136 deliver_response(recv_msg);
2143 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2144 struct ipmi_smi_msg *msg)
2146 struct ipmi_lan_addr lan_addr;
2147 struct ipmi_recv_msg *recv_msg;
2148 unsigned long flags;
2151 /* This is 13, not 12, because the response must contain a
2152 * completion code. */
2153 if (msg->rsp_size < 13) {
2154 /* Message not big enough, just ignore it. */
2155 spin_lock_irqsave(&intf->counter_lock, flags);
2156 intf->invalid_lan_responses++;
2157 spin_unlock_irqrestore(&intf->counter_lock, flags);
2161 if (msg->rsp[2] != 0) {
2162 /* An error getting the response, just ignore it. */
2166 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2167 lan_addr.session_handle = msg->rsp[4];
2168 lan_addr.remote_SWID = msg->rsp[8];
2169 lan_addr.local_SWID = msg->rsp[5];
2170 lan_addr.channel = msg->rsp[3] & 0x0f;
2171 lan_addr.privilege = msg->rsp[3] >> 4;
2172 lan_addr.lun = msg->rsp[9] & 3;
2174 /* It's a response from a remote entity. Look up the sequence
2175 number and handle the response. */
2176 if (intf_find_seq(intf,
2180 (msg->rsp[6] >> 2) & (~1),
2181 (struct ipmi_addr *) &(lan_addr),
2184 /* We were unable to find the sequence number,
2185 so just nuke the message. */
2186 spin_lock_irqsave(&intf->counter_lock, flags);
2187 intf->unhandled_lan_responses++;
2188 spin_unlock_irqrestore(&intf->counter_lock, flags);
2192 memcpy(recv_msg->msg_data,
2194 msg->rsp_size - 11);
2195 /* The other fields matched, so no need to set them, except
2196 for netfn, which needs to be the response that was
2197 returned, not the request value. */
2198 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2199 recv_msg->msg.data = recv_msg->msg_data;
2200 recv_msg->msg.data_len = msg->rsp_size - 12;
2201 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2202 spin_lock_irqsave(&intf->counter_lock, flags);
2203 intf->handled_lan_responses++;
2204 spin_unlock_irqrestore(&intf->counter_lock, flags);
2205 deliver_response(recv_msg);
2210 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2211 struct ipmi_smi_msg *msg)
2213 struct cmd_rcvr *rcvr;
2215 unsigned char netfn;
2217 ipmi_user_t user = NULL;
2218 struct ipmi_lan_addr *lan_addr;
2219 struct ipmi_recv_msg *recv_msg;
2220 unsigned long flags;
2222 if (msg->rsp_size < 12) {
2223 /* Message not big enough, just ignore it. */
2224 spin_lock_irqsave(&intf->counter_lock, flags);
2225 intf->invalid_commands++;
2226 spin_unlock_irqrestore(&intf->counter_lock, flags);
2230 if (msg->rsp[2] != 0) {
2231 /* An error getting the response, just ignore it. */
2235 netfn = msg->rsp[6] >> 2;
2239 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2242 kref_get(&user->refcount);
2248 /* We didn't find a user, just give up. */
2249 spin_lock_irqsave(&intf->counter_lock, flags);
2250 intf->unhandled_commands++;
2251 spin_unlock_irqrestore(&intf->counter_lock, flags);
2253 rv = 0; /* Don't do anything with these messages, just
2254 allow them to be freed. */
2256 /* Deliver the message to the user. */
2257 spin_lock_irqsave(&intf->counter_lock, flags);
2258 intf->handled_commands++;
2259 spin_unlock_irqrestore(&intf->counter_lock, flags);
2261 recv_msg = ipmi_alloc_recv_msg();
2263 /* We couldn't allocate memory for the
2264 message, so requeue it for handling
2267 kref_put(&user->refcount, free_user);
2269 /* Extract the source address from the data. */
2270 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2271 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2272 lan_addr->session_handle = msg->rsp[4];
2273 lan_addr->remote_SWID = msg->rsp[8];
2274 lan_addr->local_SWID = msg->rsp[5];
2275 lan_addr->lun = msg->rsp[9] & 3;
2276 lan_addr->channel = msg->rsp[3] & 0xf;
2277 lan_addr->privilege = msg->rsp[3] >> 4;
2279 /* Extract the rest of the message information
2280 from the IPMB header.*/
2281 recv_msg->user = user;
2282 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2283 recv_msg->msgid = msg->rsp[9] >> 2;
2284 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2285 recv_msg->msg.cmd = msg->rsp[10];
2286 recv_msg->msg.data = recv_msg->msg_data;
2288 /* We chop off 12, not 11 bytes because the checksum
2289 at the end also needs to be removed. */
2290 recv_msg->msg.data_len = msg->rsp_size - 12;
2291 memcpy(recv_msg->msg_data,
2293 msg->rsp_size - 12);
2294 deliver_response(recv_msg);
2301 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2302 struct ipmi_smi_msg *msg)
2304 struct ipmi_system_interface_addr *smi_addr;
2306 recv_msg->msgid = 0;
2307 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2308 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2309 smi_addr->channel = IPMI_BMC_CHANNEL;
2310 smi_addr->lun = msg->rsp[0] & 3;
2311 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2312 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2313 recv_msg->msg.cmd = msg->rsp[1];
2314 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2315 recv_msg->msg.data = recv_msg->msg_data;
2316 recv_msg->msg.data_len = msg->rsp_size - 3;
2319 static int handle_read_event_rsp(ipmi_smi_t intf,
2320 struct ipmi_smi_msg *msg)
2322 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2323 struct list_head msgs;
2326 int deliver_count = 0;
2327 unsigned long flags;
2329 if (msg->rsp_size < 19) {
2330 /* Message is too small to be an IPMB event. */
2331 spin_lock_irqsave(&intf->counter_lock, flags);
2332 intf->invalid_events++;
2333 spin_unlock_irqrestore(&intf->counter_lock, flags);
2337 if (msg->rsp[2] != 0) {
2338 /* An error getting the event, just ignore it. */
2342 INIT_LIST_HEAD(&msgs);
2344 spin_lock_irqsave(&intf->events_lock, flags);
2346 spin_lock(&intf->counter_lock);
2348 spin_unlock(&intf->counter_lock);
2350 /* Allocate and fill in one message for every user that is getting
2353 list_for_each_entry_rcu(user, &intf->users, link) {
2354 if (! user->gets_events)
2357 recv_msg = ipmi_alloc_recv_msg();
2360 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2361 list_del(&recv_msg->link);
2362 ipmi_free_recv_msg(recv_msg);
2364 /* We couldn't allocate memory for the
2365 message, so requeue it for handling
2373 copy_event_into_recv_msg(recv_msg, msg);
2374 recv_msg->user = user;
2375 kref_get(&user->refcount);
2376 list_add_tail(&(recv_msg->link), &msgs);
2380 if (deliver_count) {
2381 /* Now deliver all the messages. */
2382 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2383 list_del(&recv_msg->link);
2384 deliver_response(recv_msg);
2386 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2387 /* No one to receive the message, put it in queue if there's
2388 not already too many things in the queue. */
2389 recv_msg = ipmi_alloc_recv_msg();
2391 /* We couldn't allocate memory for the
2392 message, so requeue it for handling
2398 copy_event_into_recv_msg(recv_msg, msg);
2399 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2401 /* There's too many things in the queue, discard this
2403 printk(KERN_WARNING PFX "Event queue full, discarding an"
2404 " incoming event\n");
2408 spin_unlock_irqrestore(&(intf->events_lock), flags);
2413 static int handle_bmc_rsp(ipmi_smi_t intf,
2414 struct ipmi_smi_msg *msg)
2416 struct ipmi_recv_msg *recv_msg;
2417 unsigned long flags;
2418 struct ipmi_user *user;
2420 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2421 if (recv_msg == NULL)
2423 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2424 "could be because of a malformed message, or\n"
2425 "because of a hardware error. Contact your\n"
2426 "hardware vender for assistance\n");
2430 user = recv_msg->user;
2431 /* Make sure the user still exists. */
2432 if (user && !user->valid) {
2433 /* The user for the message went away, so give up. */
2434 spin_lock_irqsave(&intf->counter_lock, flags);
2435 intf->unhandled_local_responses++;
2436 spin_unlock_irqrestore(&intf->counter_lock, flags);
2437 ipmi_free_recv_msg(recv_msg);
2439 struct ipmi_system_interface_addr *smi_addr;
2441 spin_lock_irqsave(&intf->counter_lock, flags);
2442 intf->handled_local_responses++;
2443 spin_unlock_irqrestore(&intf->counter_lock, flags);
2444 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2445 recv_msg->msgid = msg->msgid;
2446 smi_addr = ((struct ipmi_system_interface_addr *)
2448 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2449 smi_addr->channel = IPMI_BMC_CHANNEL;
2450 smi_addr->lun = msg->rsp[0] & 3;
2451 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2452 recv_msg->msg.cmd = msg->rsp[1];
2453 memcpy(recv_msg->msg_data,
2456 recv_msg->msg.data = recv_msg->msg_data;
2457 recv_msg->msg.data_len = msg->rsp_size - 2;
2458 deliver_response(recv_msg);
2464 /* Handle a new message. Return 1 if the message should be requeued,
2465 0 if the message should be freed, or -1 if the message should not
2466 be freed or requeued. */
2467 static int handle_new_recv_msg(ipmi_smi_t intf,
2468 struct ipmi_smi_msg *msg)
2476 for (m = 0; m < msg->rsp_size; m++)
2477 printk(" %2.2x", msg->rsp[m]);
2480 if (msg->rsp_size < 2) {
2481 /* Message is too small to be correct. */
2482 printk(KERN_WARNING PFX "BMC returned to small a message"
2483 " for netfn %x cmd %x, got %d bytes\n",
2484 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
2486 /* Generate an error response for the message. */
2487 msg->rsp[0] = msg->data[0] | (1 << 2);
2488 msg->rsp[1] = msg->data[1];
2489 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2491 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
2492 || (msg->rsp[1] != msg->data[1])) /* Command */
2494 /* The response is not even marginally correct. */
2495 printk(KERN_WARNING PFX "BMC returned incorrect response,"
2496 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
2497 (msg->data[0] >> 2) | 1, msg->data[1],
2498 msg->rsp[0] >> 2, msg->rsp[1]);
2500 /* Generate an error response for the message. */
2501 msg->rsp[0] = msg->data[0] | (1 << 2);
2502 msg->rsp[1] = msg->data[1];
2503 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2507 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2508 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
2509 && (msg->user_data != NULL))
2511 /* It's a response to a response we sent. For this we
2512 deliver a send message response to the user. */
2513 struct ipmi_recv_msg *recv_msg = msg->user_data;
2516 if (msg->rsp_size < 2)
2517 /* Message is too small to be correct. */
2520 chan = msg->data[2] & 0x0f;
2521 if (chan >= IPMI_MAX_CHANNELS)
2522 /* Invalid channel number */
2528 /* Make sure the user still exists. */
2529 if (!recv_msg->user || !recv_msg->user->valid)
2532 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
2533 recv_msg->msg.data = recv_msg->msg_data;
2534 recv_msg->msg.data_len = 1;
2535 recv_msg->msg_data[0] = msg->rsp[2];
2536 deliver_response(recv_msg);
2537 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2538 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
2540 /* It's from the receive queue. */
2541 chan = msg->rsp[3] & 0xf;
2542 if (chan >= IPMI_MAX_CHANNELS) {
2543 /* Invalid channel number */
2548 switch (intf->channels[chan].medium) {
2549 case IPMI_CHANNEL_MEDIUM_IPMB:
2550 if (msg->rsp[4] & 0x04) {
2551 /* It's a response, so find the
2552 requesting message and send it up. */
2553 requeue = handle_ipmb_get_msg_rsp(intf, msg);
2555 /* It's a command to the SMS from some other
2556 entity. Handle that. */
2557 requeue = handle_ipmb_get_msg_cmd(intf, msg);
2561 case IPMI_CHANNEL_MEDIUM_8023LAN:
2562 case IPMI_CHANNEL_MEDIUM_ASYNC:
2563 if (msg->rsp[6] & 0x04) {
2564 /* It's a response, so find the
2565 requesting message and send it up. */
2566 requeue = handle_lan_get_msg_rsp(intf, msg);
2568 /* It's a command to the SMS from some other
2569 entity. Handle that. */
2570 requeue = handle_lan_get_msg_cmd(intf, msg);
2575 /* We don't handle the channel type, so just
2576 * free the message. */
2580 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2581 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
2583 /* It's an asyncronous event. */
2584 requeue = handle_read_event_rsp(intf, msg);
2586 /* It's a response from the local BMC. */
2587 requeue = handle_bmc_rsp(intf, msg);
2594 /* Handle a new message from the lower layer. */
2595 void ipmi_smi_msg_received(ipmi_smi_t intf,
2596 struct ipmi_smi_msg *msg)
2598 unsigned long flags;
2602 if ((msg->data_size >= 2)
2603 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2604 && (msg->data[1] == IPMI_SEND_MSG_CMD)
2605 && (msg->user_data == NULL))
2607 /* This is the local response to a command send, start
2608 the timer for these. The user_data will not be
2609 NULL if this is a response send, and we will let
2610 response sends just go through. */
2612 /* Check for errors, if we get certain errors (ones
2613 that mean basically we can try again later), we
2614 ignore them and start the timer. Otherwise we
2615 report the error immediately. */
2616 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
2617 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
2618 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
2620 int chan = msg->rsp[3] & 0xf;
2622 /* Got an error sending the message, handle it. */
2623 spin_lock_irqsave(&intf->counter_lock, flags);
2624 if (chan >= IPMI_MAX_CHANNELS)
2625 ; /* This shouldn't happen */
2626 else if ((intf->channels[chan].medium
2627 == IPMI_CHANNEL_MEDIUM_8023LAN)
2628 || (intf->channels[chan].medium
2629 == IPMI_CHANNEL_MEDIUM_ASYNC))
2630 intf->sent_lan_command_errs++;
2632 intf->sent_ipmb_command_errs++;
2633 spin_unlock_irqrestore(&intf->counter_lock, flags);
2634 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
2636 /* The message was sent, start the timer. */
2637 intf_start_seq_timer(intf, msg->msgid);
2640 ipmi_free_smi_msg(msg);
2644 /* To preserve message order, if the list is not empty, we
2645 tack this message onto the end of the list. */
2646 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2647 if (!list_empty(&intf->waiting_msgs)) {
2648 list_add_tail(&msg->link, &intf->waiting_msgs);
2649 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2652 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2654 rv = handle_new_recv_msg(intf, msg);
2656 /* Could not handle the message now, just add it to a
2657 list to handle later. */
2658 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2659 list_add_tail(&msg->link, &intf->waiting_msgs);
2660 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2661 } else if (rv == 0) {
2662 ipmi_free_smi_msg(msg);
2669 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
2674 list_for_each_entry_rcu(user, &intf->users, link) {
2675 if (! user->handler->ipmi_watchdog_pretimeout)
2678 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
2684 handle_msg_timeout(struct ipmi_recv_msg *msg)
2686 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2687 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
2688 msg->msg.netfn |= 1; /* Convert to a response. */
2689 msg->msg.data_len = 1;
2690 msg->msg.data = msg->msg_data;
2691 deliver_response(msg);
2694 static struct ipmi_smi_msg *
2695 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2696 unsigned char seq, long seqid)
2698 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
2700 /* If we can't allocate the message, then just return, we
2701 get 4 retries, so this should be ok. */
2704 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
2705 smi_msg->data_size = recv_msg->msg.data_len;
2706 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
2712 for (m = 0; m < smi_msg->data_size; m++)
2713 printk(" %2.2x", smi_msg->data[m]);
2720 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
2721 struct list_head *timeouts, long timeout_period,
2722 int slot, unsigned long *flags)
2724 struct ipmi_recv_msg *msg;
2729 ent->timeout -= timeout_period;
2730 if (ent->timeout > 0)
2733 if (ent->retries_left == 0) {
2734 /* The message has used all its retries. */
2736 msg = ent->recv_msg;
2737 list_add_tail(&msg->link, timeouts);
2738 spin_lock(&intf->counter_lock);
2740 intf->timed_out_ipmb_broadcasts++;
2741 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2742 intf->timed_out_lan_commands++;
2744 intf->timed_out_ipmb_commands++;
2745 spin_unlock(&intf->counter_lock);
2747 struct ipmi_smi_msg *smi_msg;
2748 /* More retries, send again. */
2750 /* Start with the max timer, set to normal
2751 timer after the message is sent. */
2752 ent->timeout = MAX_MSG_TIMEOUT;
2753 ent->retries_left--;
2754 spin_lock(&intf->counter_lock);
2755 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2756 intf->retransmitted_lan_commands++;
2758 intf->retransmitted_ipmb_commands++;
2759 spin_unlock(&intf->counter_lock);
2761 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
2766 spin_unlock_irqrestore(&intf->seq_lock, *flags);
2767 /* Send the new message. We send with a zero
2768 * priority. It timed out, I doubt time is
2769 * that critical now, and high priority
2770 * messages are really only for messages to the
2771 * local MC, which don't get resent. */
2772 intf->handlers->sender(intf->send_info,
2774 spin_lock_irqsave(&intf->seq_lock, *flags);
2778 static void ipmi_timeout_handler(long timeout_period)
2781 struct list_head timeouts;
2782 struct ipmi_recv_msg *msg, *msg2;
2783 struct ipmi_smi_msg *smi_msg, *smi_msg2;
2784 unsigned long flags;
2787 INIT_LIST_HEAD(&timeouts);
2789 spin_lock(&interfaces_lock);
2790 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2791 intf = ipmi_interfaces[i];
2792 if (IPMI_INVALID_INTERFACE(intf))
2794 kref_get(&intf->refcount);
2795 spin_unlock(&interfaces_lock);
2797 /* See if any waiting messages need to be processed. */
2798 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2799 list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
2800 if (! handle_new_recv_msg(intf, smi_msg)) {
2801 list_del(&smi_msg->link);
2802 ipmi_free_smi_msg(smi_msg);
2804 /* To preserve message order, quit if we
2805 can't handle a message. */
2809 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2811 /* Go through the seq table and find any messages that
2812 have timed out, putting them in the timeouts
2814 spin_lock_irqsave(&intf->seq_lock, flags);
2815 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
2816 check_msg_timeout(intf, &(intf->seq_table[j]),
2817 &timeouts, timeout_period, j,
2819 spin_unlock_irqrestore(&intf->seq_lock, flags);
2821 list_for_each_entry_safe(msg, msg2, &timeouts, link)
2822 handle_msg_timeout(msg);
2824 kref_put(&intf->refcount, intf_free);
2825 spin_lock(&interfaces_lock);
2827 spin_unlock(&interfaces_lock);
2830 static void ipmi_request_event(void)
2835 spin_lock(&interfaces_lock);
2836 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2837 intf = ipmi_interfaces[i];
2838 if (IPMI_INVALID_INTERFACE(intf))
2841 intf->handlers->request_events(intf->send_info);
2843 spin_unlock(&interfaces_lock);
2846 static struct timer_list ipmi_timer;
2848 /* Call every ~100 ms. */
2849 #define IPMI_TIMEOUT_TIME 100
2851 /* How many jiffies does it take to get to the timeout time. */
2852 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
2854 /* Request events from the queue every second (this is the number of
2855 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
2856 future, IPMI will add a way to know immediately if an event is in
2857 the queue and this silliness can go away. */
2858 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
2860 static atomic_t stop_operation;
2861 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2863 static void ipmi_timeout(unsigned long data)
2865 if (atomic_read(&stop_operation))
2869 if (ticks_to_req_ev == 0) {
2870 ipmi_request_event();
2871 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2874 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
2876 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
2880 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
2881 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
2883 /* FIXME - convert these to slabs. */
2884 static void free_smi_msg(struct ipmi_smi_msg *msg)
2886 atomic_dec(&smi_msg_inuse_count);
2890 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
2892 struct ipmi_smi_msg *rv;
2893 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
2895 rv->done = free_smi_msg;
2896 rv->user_data = NULL;
2897 atomic_inc(&smi_msg_inuse_count);
2902 static void free_recv_msg(struct ipmi_recv_msg *msg)
2904 atomic_dec(&recv_msg_inuse_count);
2908 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
2910 struct ipmi_recv_msg *rv;
2912 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
2914 rv->done = free_recv_msg;
2915 atomic_inc(&recv_msg_inuse_count);
2920 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
2923 kref_put(&msg->user->refcount, free_user);
2927 #ifdef CONFIG_IPMI_PANIC_EVENT
2929 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
2933 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
2937 #ifdef CONFIG_IPMI_PANIC_STRING
2938 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2940 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2941 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
2942 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
2943 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2945 /* A get event receiver command, save it. */
2946 intf->event_receiver = msg->msg.data[1];
2947 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
2951 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2953 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2954 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2955 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
2956 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2958 /* A get device id command, save if we are an event
2959 receiver or generator. */
2960 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
2961 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
2966 static void send_panic_events(char *str)
2968 struct kernel_ipmi_msg msg;
2970 unsigned char data[16];
2972 struct ipmi_system_interface_addr *si;
2973 struct ipmi_addr addr;
2974 struct ipmi_smi_msg smi_msg;
2975 struct ipmi_recv_msg recv_msg;
2977 si = (struct ipmi_system_interface_addr *) &addr;
2978 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2979 si->channel = IPMI_BMC_CHANNEL;
2982 /* Fill in an event telling that we have failed. */
2983 msg.netfn = 0x04; /* Sensor or Event. */
2984 msg.cmd = 2; /* Platform event command. */
2987 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
2988 data[1] = 0x03; /* This is for IPMI 1.0. */
2989 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
2990 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
2991 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
2993 /* Put a few breadcrumbs in. Hopefully later we can add more things
2994 to make the panic events more useful. */
3001 smi_msg.done = dummy_smi_done_handler;
3002 recv_msg.done = dummy_recv_done_handler;
3004 /* For every registered interface, send the event. */
3005 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3006 intf = ipmi_interfaces[i];
3007 if (IPMI_INVALID_INTERFACE(intf))
3010 /* Send the event announcing the panic. */
3011 intf->handlers->set_run_to_completion(intf->send_info, 1);
3012 i_ipmi_request(NULL,
3021 intf->channels[0].address,
3022 intf->channels[0].lun,
3023 0, 1); /* Don't retry, and don't wait. */
3026 #ifdef CONFIG_IPMI_PANIC_STRING
3027 /* On every interface, dump a bunch of OEM event holding the
3032 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3034 struct ipmi_ipmb_addr *ipmb;
3037 intf = ipmi_interfaces[i];
3038 if (IPMI_INVALID_INTERFACE(intf))
3041 /* First job here is to figure out where to send the
3042 OEM events. There's no way in IPMI to send OEM
3043 events using an event send command, so we have to
3044 find the SEL to put them in and stick them in
3047 /* Get capabilities from the get device id. */
3048 intf->local_sel_device = 0;
3049 intf->local_event_generator = 0;
3050 intf->event_receiver = 0;
3052 /* Request the device info from the local MC. */
3053 msg.netfn = IPMI_NETFN_APP_REQUEST;
3054 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3057 intf->null_user_handler = device_id_fetcher;
3058 i_ipmi_request(NULL,
3067 intf->channels[0].address,
3068 intf->channels[0].lun,
3069 0, 1); /* Don't retry, and don't wait. */
3071 if (intf->local_event_generator) {
3072 /* Request the event receiver from the local MC. */
3073 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3074 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3077 intf->null_user_handler = event_receiver_fetcher;
3078 i_ipmi_request(NULL,
3087 intf->channels[0].address,
3088 intf->channels[0].lun,
3089 0, 1); /* no retry, and no wait. */
3091 intf->null_user_handler = NULL;
3093 /* Validate the event receiver. The low bit must not
3094 be 1 (it must be a valid IPMB address), it cannot
3095 be zero, and it must not be my address. */
3096 if (((intf->event_receiver & 1) == 0)
3097 && (intf->event_receiver != 0)
3098 && (intf->event_receiver != intf->channels[0].address))
3100 /* The event receiver is valid, send an IPMB
3102 ipmb = (struct ipmi_ipmb_addr *) &addr;
3103 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3104 ipmb->channel = 0; /* FIXME - is this right? */
3105 ipmb->lun = intf->event_receiver_lun;
3106 ipmb->slave_addr = intf->event_receiver;
3107 } else if (intf->local_sel_device) {
3108 /* The event receiver was not valid (or was
3109 me), but I am an SEL device, just dump it
3111 si = (struct ipmi_system_interface_addr *) &addr;
3112 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3113 si->channel = IPMI_BMC_CHANNEL;
3116 continue; /* No where to send the event. */
3119 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3120 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3126 int size = strlen(p);
3132 data[2] = 0xf0; /* OEM event without timestamp. */
3133 data[3] = intf->channels[0].address;
3134 data[4] = j++; /* sequence # */
3135 /* Always give 11 bytes, so strncpy will fill
3136 it with zeroes for me. */
3137 strncpy(data+5, p, 11);
3140 i_ipmi_request(NULL,
3149 intf->channels[0].address,
3150 intf->channels[0].lun,
3151 0, 1); /* no retry, and no wait. */
3154 #endif /* CONFIG_IPMI_PANIC_STRING */
3156 #endif /* CONFIG_IPMI_PANIC_EVENT */
3158 static int has_paniced = 0;
3160 static int panic_event(struct notifier_block *this,
3161 unsigned long event,
3171 /* For every registered interface, set it to run to completion. */
3172 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3173 intf = ipmi_interfaces[i];
3174 if (IPMI_INVALID_INTERFACE(intf))
3177 intf->handlers->set_run_to_completion(intf->send_info, 1);
3180 #ifdef CONFIG_IPMI_PANIC_EVENT
3181 send_panic_events(ptr);
3187 static struct notifier_block panic_block = {
3188 .notifier_call = panic_event,
3190 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3193 static int ipmi_init_msghandler(void)
3200 printk(KERN_INFO "ipmi message handler version "
3201 IPMI_DRIVER_VERSION "\n");
3203 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3204 ipmi_interfaces[i] = NULL;
3206 #ifdef CONFIG_PROC_FS
3207 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3208 if (!proc_ipmi_root) {
3209 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3213 proc_ipmi_root->owner = THIS_MODULE;
3214 #endif /* CONFIG_PROC_FS */
3216 init_timer(&ipmi_timer);
3217 ipmi_timer.data = 0;
3218 ipmi_timer.function = ipmi_timeout;
3219 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3220 add_timer(&ipmi_timer);
3222 notifier_chain_register(&panic_notifier_list, &panic_block);
3229 static __init int ipmi_init_msghandler_mod(void)
3231 ipmi_init_msghandler();
3235 static __exit void cleanup_ipmi(void)
3242 notifier_chain_unregister(&panic_notifier_list, &panic_block);
3244 /* This can't be called if any interfaces exist, so no worry about
3245 shutting down the interfaces. */
3247 /* Tell the timer to stop, then wait for it to stop. This avoids
3248 problems with race conditions removing the timer here. */
3249 atomic_inc(&stop_operation);
3250 del_timer_sync(&ipmi_timer);
3252 #ifdef CONFIG_PROC_FS
3253 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3254 #endif /* CONFIG_PROC_FS */
3258 /* Check for buffer leaks. */
3259 count = atomic_read(&smi_msg_inuse_count);
3261 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3263 count = atomic_read(&recv_msg_inuse_count);
3265 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3268 module_exit(cleanup_ipmi);
3270 module_init(ipmi_init_msghandler_mod);
3271 MODULE_LICENSE("GPL");
3272 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3273 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3274 MODULE_VERSION(IPMI_DRIVER_VERSION);
3276 EXPORT_SYMBOL(ipmi_create_user);
3277 EXPORT_SYMBOL(ipmi_destroy_user);
3278 EXPORT_SYMBOL(ipmi_get_version);
3279 EXPORT_SYMBOL(ipmi_request_settime);
3280 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3281 EXPORT_SYMBOL(ipmi_register_smi);
3282 EXPORT_SYMBOL(ipmi_unregister_smi);
3283 EXPORT_SYMBOL(ipmi_register_for_cmd);
3284 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3285 EXPORT_SYMBOL(ipmi_smi_msg_received);
3286 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3287 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3288 EXPORT_SYMBOL(ipmi_addr_length);
3289 EXPORT_SYMBOL(ipmi_validate_addr);
3290 EXPORT_SYMBOL(ipmi_set_gets_events);
3291 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3292 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3293 EXPORT_SYMBOL(ipmi_set_my_address);
3294 EXPORT_SYMBOL(ipmi_get_my_address);
3295 EXPORT_SYMBOL(ipmi_set_my_LUN);
3296 EXPORT_SYMBOL(ipmi_get_my_LUN);
3297 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3298 EXPORT_SYMBOL(proc_ipmi_root);
3299 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3300 EXPORT_SYMBOL(ipmi_free_recv_msg);