4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "38.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 struct proc_dir_entry *proc_ipmi_root = NULL;
60 EXPORT_SYMBOL(proc_ipmi_root);
61 #endif /* CONFIG_PROC_FS */
63 #define MAX_EVENTS_IN_QUEUE 25
65 /* Don't let a message sit in a queue forever, always time it with at lest
66 the max message timer. This is in milliseconds. */
67 #define MAX_MSG_TIMEOUT 60000
71 * The main "user" data structure.
75 struct list_head link;
77 /* Set to "0" when the user is destroyed. */
82 /* The upper layer that handles receive messages. */
83 struct ipmi_user_hndl *handler;
86 /* The interface this user is bound to. */
89 /* Does this interface receive IPMI events? */
95 struct list_head link;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium;
146 unsigned char protocol;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry *next;
165 #define IPMI_IPMB_NUM_SEQ 64
166 #define IPMI_MAX_CHANNELS 16
169 /* What interface number are we? */
172 struct kref refcount;
174 /* The list of upper layers that are using me. seq_lock
176 struct list_head users;
178 /* Used for wake ups at startup. */
179 wait_queue_head_t waitq;
181 /* The IPMI version of the BMC on the other end. */
182 unsigned char version_major;
183 unsigned char version_minor;
185 /* This is the lower-layer's sender routine. */
186 struct ipmi_smi_handlers *handlers;
189 #ifdef CONFIG_PROC_FS
190 /* A list of proc entries for this interface. This does not
191 need a lock, only one thread creates it and only one thread
193 spinlock_t proc_entry_lock;
194 struct ipmi_proc_entry *proc_entries;
197 /* A table of sequence numbers for this interface. We use the
198 sequence numbers for IPMB messages that go out of the
199 interface to match them up with their responses. A routine
200 is called periodically to time the items in this list. */
202 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
205 /* Messages that were delayed for some reason (out of memory,
206 for instance), will go in here to be processed later in a
207 periodic timer interrupt. */
208 spinlock_t waiting_msgs_lock;
209 struct list_head waiting_msgs;
211 /* The list of command receivers that are registered for commands
212 on this interface. */
213 struct semaphore cmd_rcvrs_lock;
214 struct list_head cmd_rcvrs;
216 /* Events that were queues because no one was there to receive
218 spinlock_t events_lock; /* For dealing with event stuff. */
219 struct list_head waiting_events;
220 unsigned int waiting_events_count; /* How many events in queue? */
222 /* The event receiver for my BMC, only really used at panic
223 shutdown as a place to store this. */
224 unsigned char event_receiver;
225 unsigned char event_receiver_lun;
226 unsigned char local_sel_device;
227 unsigned char local_event_generator;
229 /* A cheap hack, if this is non-null and a message to an
230 interface comes in with a NULL user, call this routine with
231 it. Note that the message will still be freed by the
232 caller. This only works on the system interface. */
233 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
235 /* When we are scanning the channels for an SMI, this will
236 tell which channel we are scanning. */
239 /* Channel information */
240 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
243 struct proc_dir_entry *proc_dir;
244 char proc_dir_name[10];
246 spinlock_t counter_lock; /* For making counters atomic. */
248 /* Commands we got that were invalid. */
249 unsigned int sent_invalid_commands;
251 /* Commands we sent to the MC. */
252 unsigned int sent_local_commands;
253 /* Responses from the MC that were delivered to a user. */
254 unsigned int handled_local_responses;
255 /* Responses from the MC that were not delivered to a user. */
256 unsigned int unhandled_local_responses;
258 /* Commands we sent out to the IPMB bus. */
259 unsigned int sent_ipmb_commands;
260 /* Commands sent on the IPMB that had errors on the SEND CMD */
261 unsigned int sent_ipmb_command_errs;
262 /* Each retransmit increments this count. */
263 unsigned int retransmitted_ipmb_commands;
264 /* When a message times out (runs out of retransmits) this is
266 unsigned int timed_out_ipmb_commands;
268 /* This is like above, but for broadcasts. Broadcasts are
269 *not* included in the above count (they are expected to
271 unsigned int timed_out_ipmb_broadcasts;
273 /* Responses I have sent to the IPMB bus. */
274 unsigned int sent_ipmb_responses;
276 /* The response was delivered to the user. */
277 unsigned int handled_ipmb_responses;
278 /* The response had invalid data in it. */
279 unsigned int invalid_ipmb_responses;
280 /* The response didn't have anyone waiting for it. */
281 unsigned int unhandled_ipmb_responses;
283 /* Commands we sent out to the IPMB bus. */
284 unsigned int sent_lan_commands;
285 /* Commands sent on the IPMB that had errors on the SEND CMD */
286 unsigned int sent_lan_command_errs;
287 /* Each retransmit increments this count. */
288 unsigned int retransmitted_lan_commands;
289 /* When a message times out (runs out of retransmits) this is
291 unsigned int timed_out_lan_commands;
293 /* Responses I have sent to the IPMB bus. */
294 unsigned int sent_lan_responses;
296 /* The response was delivered to the user. */
297 unsigned int handled_lan_responses;
298 /* The response had invalid data in it. */
299 unsigned int invalid_lan_responses;
300 /* The response didn't have anyone waiting for it. */
301 unsigned int unhandled_lan_responses;
303 /* The command was delivered to the user. */
304 unsigned int handled_commands;
305 /* The command had invalid data in it. */
306 unsigned int invalid_commands;
307 /* The command didn't have anyone waiting for it. */
308 unsigned int unhandled_commands;
310 /* Invalid data in an event. */
311 unsigned int invalid_events;
312 /* Events that were received with the proper format. */
316 /* Used to mark an interface entry that cannot be used but is not a
317 * free entry, either, primarily used at creation and deletion time so
318 * a slot doesn't get reused too quickly. */
319 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
320 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
321 || (i == IPMI_INVALID_INTERFACE_ENTRY))
323 #define MAX_IPMI_INTERFACES 4
324 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
326 /* Directly protects the ipmi_interfaces data structure. */
327 static DEFINE_SPINLOCK(interfaces_lock);
329 /* List of watchers that want to know when smi's are added and
331 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
332 static DECLARE_RWSEM(smi_watchers_sem);
335 static void free_recv_msg_list(struct list_head *q)
337 struct ipmi_recv_msg *msg, *msg2;
339 list_for_each_entry_safe(msg, msg2, q, link) {
340 list_del(&msg->link);
341 ipmi_free_recv_msg(msg);
345 static void clean_up_interface_data(ipmi_smi_t intf)
348 struct cmd_rcvr *rcvr, *rcvr2;
349 struct list_head list;
351 free_recv_msg_list(&intf->waiting_msgs);
352 free_recv_msg_list(&intf->waiting_events);
354 /* Wholesale remove all the entries from the list in the
355 * interface and wait for RCU to know that none are in use. */
356 down(&intf->cmd_rcvrs_lock);
357 list_add_rcu(&list, &intf->cmd_rcvrs);
358 list_del_rcu(&intf->cmd_rcvrs);
359 up(&intf->cmd_rcvrs_lock);
362 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
365 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
366 if ((intf->seq_table[i].inuse)
367 && (intf->seq_table[i].recv_msg))
369 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
374 static void intf_free(struct kref *ref)
376 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
378 clean_up_interface_data(intf);
382 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
387 down_write(&smi_watchers_sem);
388 list_add(&(watcher->link), &smi_watchers);
389 up_write(&smi_watchers_sem);
390 spin_lock_irqsave(&interfaces_lock, flags);
391 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
392 ipmi_smi_t intf = ipmi_interfaces[i];
393 if (IPMI_INVALID_INTERFACE(intf))
395 spin_unlock_irqrestore(&interfaces_lock, flags);
397 spin_lock_irqsave(&interfaces_lock, flags);
399 spin_unlock_irqrestore(&interfaces_lock, flags);
403 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
405 down_write(&smi_watchers_sem);
406 list_del(&(watcher->link));
407 up_write(&smi_watchers_sem);
412 call_smi_watchers(int i)
414 struct ipmi_smi_watcher *w;
416 down_read(&smi_watchers_sem);
417 list_for_each_entry(w, &smi_watchers, link) {
418 if (try_module_get(w->owner)) {
420 module_put(w->owner);
423 up_read(&smi_watchers_sem);
427 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
429 if (addr1->addr_type != addr2->addr_type)
432 if (addr1->channel != addr2->channel)
435 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
436 struct ipmi_system_interface_addr *smi_addr1
437 = (struct ipmi_system_interface_addr *) addr1;
438 struct ipmi_system_interface_addr *smi_addr2
439 = (struct ipmi_system_interface_addr *) addr2;
440 return (smi_addr1->lun == smi_addr2->lun);
443 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
444 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
446 struct ipmi_ipmb_addr *ipmb_addr1
447 = (struct ipmi_ipmb_addr *) addr1;
448 struct ipmi_ipmb_addr *ipmb_addr2
449 = (struct ipmi_ipmb_addr *) addr2;
451 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
452 && (ipmb_addr1->lun == ipmb_addr2->lun));
455 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
456 struct ipmi_lan_addr *lan_addr1
457 = (struct ipmi_lan_addr *) addr1;
458 struct ipmi_lan_addr *lan_addr2
459 = (struct ipmi_lan_addr *) addr2;
461 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
462 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
463 && (lan_addr1->session_handle
464 == lan_addr2->session_handle)
465 && (lan_addr1->lun == lan_addr2->lun));
471 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
473 if (len < sizeof(struct ipmi_system_interface_addr)) {
477 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
478 if (addr->channel != IPMI_BMC_CHANNEL)
483 if ((addr->channel == IPMI_BMC_CHANNEL)
484 || (addr->channel >= IPMI_NUM_CHANNELS)
485 || (addr->channel < 0))
488 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
489 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
491 if (len < sizeof(struct ipmi_ipmb_addr)) {
497 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
498 if (len < sizeof(struct ipmi_lan_addr)) {
507 unsigned int ipmi_addr_length(int addr_type)
509 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
510 return sizeof(struct ipmi_system_interface_addr);
512 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
513 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
515 return sizeof(struct ipmi_ipmb_addr);
518 if (addr_type == IPMI_LAN_ADDR_TYPE)
519 return sizeof(struct ipmi_lan_addr);
524 static void deliver_response(struct ipmi_recv_msg *msg)
527 ipmi_smi_t intf = msg->user_msg_data;
530 /* Special handling for NULL users. */
531 if (intf->null_user_handler) {
532 intf->null_user_handler(intf, msg);
533 spin_lock_irqsave(&intf->counter_lock, flags);
534 intf->handled_local_responses++;
535 spin_unlock_irqrestore(&intf->counter_lock, flags);
537 /* No handler, so give up. */
538 spin_lock_irqsave(&intf->counter_lock, flags);
539 intf->unhandled_local_responses++;
540 spin_unlock_irqrestore(&intf->counter_lock, flags);
542 ipmi_free_recv_msg(msg);
544 ipmi_user_t user = msg->user;
545 user->handler->ipmi_recv_hndl(msg, user->handler_data);
549 /* Find the next sequence number not being used and add the given
550 message with the given timeout to the sequence table. This must be
551 called with the interface's seq_lock held. */
552 static int intf_next_seq(ipmi_smi_t intf,
553 struct ipmi_recv_msg *recv_msg,
554 unsigned long timeout,
563 for (i = intf->curr_seq;
564 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
565 i = (i+1)%IPMI_IPMB_NUM_SEQ)
567 if (! intf->seq_table[i].inuse)
571 if (! intf->seq_table[i].inuse) {
572 intf->seq_table[i].recv_msg = recv_msg;
574 /* Start with the maximum timeout, when the send response
575 comes in we will start the real timer. */
576 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
577 intf->seq_table[i].orig_timeout = timeout;
578 intf->seq_table[i].retries_left = retries;
579 intf->seq_table[i].broadcast = broadcast;
580 intf->seq_table[i].inuse = 1;
581 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
583 *seqid = intf->seq_table[i].seqid;
584 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
592 /* Return the receive message for the given sequence number and
593 release the sequence number so it can be reused. Some other data
594 is passed in to be sure the message matches up correctly (to help
595 guard against message coming in after their timeout and the
596 sequence number being reused). */
597 static int intf_find_seq(ipmi_smi_t intf,
602 struct ipmi_addr *addr,
603 struct ipmi_recv_msg **recv_msg)
608 if (seq >= IPMI_IPMB_NUM_SEQ)
611 spin_lock_irqsave(&(intf->seq_lock), flags);
612 if (intf->seq_table[seq].inuse) {
613 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
615 if ((msg->addr.channel == channel)
616 && (msg->msg.cmd == cmd)
617 && (msg->msg.netfn == netfn)
618 && (ipmi_addr_equal(addr, &(msg->addr))))
621 intf->seq_table[seq].inuse = 0;
625 spin_unlock_irqrestore(&(intf->seq_lock), flags);
631 /* Start the timer for a specific sequence table entry. */
632 static int intf_start_seq_timer(ipmi_smi_t intf,
641 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
643 spin_lock_irqsave(&(intf->seq_lock), flags);
644 /* We do this verification because the user can be deleted
645 while a message is outstanding. */
646 if ((intf->seq_table[seq].inuse)
647 && (intf->seq_table[seq].seqid == seqid))
649 struct seq_table *ent = &(intf->seq_table[seq]);
650 ent->timeout = ent->orig_timeout;
653 spin_unlock_irqrestore(&(intf->seq_lock), flags);
658 /* Got an error for the send message for a specific sequence number. */
659 static int intf_err_seq(ipmi_smi_t intf,
667 struct ipmi_recv_msg *msg = NULL;
670 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
672 spin_lock_irqsave(&(intf->seq_lock), flags);
673 /* We do this verification because the user can be deleted
674 while a message is outstanding. */
675 if ((intf->seq_table[seq].inuse)
676 && (intf->seq_table[seq].seqid == seqid))
678 struct seq_table *ent = &(intf->seq_table[seq]);
684 spin_unlock_irqrestore(&(intf->seq_lock), flags);
687 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
688 msg->msg_data[0] = err;
689 msg->msg.netfn |= 1; /* Convert to a response. */
690 msg->msg.data_len = 1;
691 msg->msg.data = msg->msg_data;
692 deliver_response(msg);
699 int ipmi_create_user(unsigned int if_num,
700 struct ipmi_user_hndl *handler,
705 ipmi_user_t new_user;
709 /* There is no module usecount here, because it's not
710 required. Since this can only be used by and called from
711 other modules, they will implicitly use this module, and
712 thus this can't be removed unless the other modules are
718 /* Make sure the driver is actually initialized, this handles
719 problems with initialization order. */
721 rv = ipmi_init_msghandler();
725 /* The init code doesn't return an error if it was turned
726 off, but it won't initialize. Check that. */
731 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
735 spin_lock_irqsave(&interfaces_lock, flags);
736 intf = ipmi_interfaces[if_num];
737 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
738 spin_unlock_irqrestore(&interfaces_lock, flags);
742 /* Note that each existing user holds a refcount to the interface. */
743 kref_get(&intf->refcount);
744 spin_unlock_irqrestore(&interfaces_lock, flags);
746 kref_init(&new_user->refcount);
747 new_user->handler = handler;
748 new_user->handler_data = handler_data;
749 new_user->intf = intf;
750 new_user->gets_events = 0;
752 if (!try_module_get(intf->handlers->owner)) {
757 if (intf->handlers->inc_usecount) {
758 rv = intf->handlers->inc_usecount(intf->send_info);
760 module_put(intf->handlers->owner);
766 spin_lock_irqsave(&intf->seq_lock, flags);
767 list_add_rcu(&new_user->link, &intf->users);
768 spin_unlock_irqrestore(&intf->seq_lock, flags);
774 kref_put(&intf->refcount, intf_free);
778 static void free_user(struct kref *ref)
780 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
784 int ipmi_destroy_user(ipmi_user_t user)
787 ipmi_smi_t intf = user->intf;
790 struct cmd_rcvr *rcvr;
791 struct cmd_rcvr *rcvrs = NULL;
795 /* Remove the user from the interface's sequence table. */
796 spin_lock_irqsave(&intf->seq_lock, flags);
797 list_del_rcu(&user->link);
799 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
800 if (intf->seq_table[i].inuse
801 && (intf->seq_table[i].recv_msg->user == user))
803 intf->seq_table[i].inuse = 0;
806 spin_unlock_irqrestore(&intf->seq_lock, flags);
809 * Remove the user from the command receiver's table. First
810 * we build a list of everything (not using the standard link,
811 * since other things may be using it till we do
812 * synchronize_rcu()) then free everything in that list.
814 down(&intf->cmd_rcvrs_lock);
815 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
816 if (rcvr->user == user) {
817 list_del_rcu(&rcvr->link);
822 up(&intf->cmd_rcvrs_lock);
830 module_put(intf->handlers->owner);
831 if (intf->handlers->dec_usecount)
832 intf->handlers->dec_usecount(intf->send_info);
834 kref_put(&intf->refcount, intf_free);
836 kref_put(&user->refcount, free_user);
841 void ipmi_get_version(ipmi_user_t user,
842 unsigned char *major,
843 unsigned char *minor)
845 *major = user->intf->version_major;
846 *minor = user->intf->version_minor;
849 int ipmi_set_my_address(ipmi_user_t user,
850 unsigned int channel,
851 unsigned char address)
853 if (channel >= IPMI_MAX_CHANNELS)
855 user->intf->channels[channel].address = address;
859 int ipmi_get_my_address(ipmi_user_t user,
860 unsigned int channel,
861 unsigned char *address)
863 if (channel >= IPMI_MAX_CHANNELS)
865 *address = user->intf->channels[channel].address;
869 int ipmi_set_my_LUN(ipmi_user_t user,
870 unsigned int channel,
873 if (channel >= IPMI_MAX_CHANNELS)
875 user->intf->channels[channel].lun = LUN & 0x3;
879 int ipmi_get_my_LUN(ipmi_user_t user,
880 unsigned int channel,
881 unsigned char *address)
883 if (channel >= IPMI_MAX_CHANNELS)
885 *address = user->intf->channels[channel].lun;
889 int ipmi_set_gets_events(ipmi_user_t user, int val)
892 ipmi_smi_t intf = user->intf;
893 struct ipmi_recv_msg *msg, *msg2;
894 struct list_head msgs;
896 INIT_LIST_HEAD(&msgs);
898 spin_lock_irqsave(&intf->events_lock, flags);
899 user->gets_events = val;
902 /* Deliver any queued events. */
903 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
904 list_del(&msg->link);
905 list_add_tail(&msg->link, &msgs);
909 /* Hold the events lock while doing this to preserve order. */
910 list_for_each_entry_safe(msg, msg2, &msgs, link) {
912 kref_get(&user->refcount);
913 deliver_response(msg);
916 spin_unlock_irqrestore(&intf->events_lock, flags);
921 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
925 struct cmd_rcvr *rcvr;
927 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
928 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
934 int ipmi_register_for_cmd(ipmi_user_t user,
938 ipmi_smi_t intf = user->intf;
939 struct cmd_rcvr *rcvr;
940 struct cmd_rcvr *entry;
944 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
951 down(&intf->cmd_rcvrs_lock);
952 /* Make sure the command/netfn is not already registered. */
953 entry = find_cmd_rcvr(intf, netfn, cmd);
959 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
962 up(&intf->cmd_rcvrs_lock);
969 int ipmi_unregister_for_cmd(ipmi_user_t user,
973 ipmi_smi_t intf = user->intf;
974 struct cmd_rcvr *rcvr;
976 down(&intf->cmd_rcvrs_lock);
977 /* Make sure the command/netfn is not already registered. */
978 rcvr = find_cmd_rcvr(intf, netfn, cmd);
979 if ((rcvr) && (rcvr->user == user)) {
980 list_del_rcu(&rcvr->link);
981 up(&intf->cmd_rcvrs_lock);
986 up(&intf->cmd_rcvrs_lock);
991 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
993 ipmi_smi_t intf = user->intf;
994 intf->handlers->set_run_to_completion(intf->send_info, val);
998 ipmb_checksum(unsigned char *data, int size)
1000 unsigned char csum = 0;
1002 for (; size > 0; size--, data++)
1008 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1009 struct kernel_ipmi_msg *msg,
1010 struct ipmi_ipmb_addr *ipmb_addr,
1012 unsigned char ipmb_seq,
1014 unsigned char source_address,
1015 unsigned char source_lun)
1019 /* Format the IPMB header data. */
1020 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1021 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1022 smi_msg->data[2] = ipmb_addr->channel;
1024 smi_msg->data[3] = 0;
1025 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1026 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1027 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1028 smi_msg->data[i+6] = source_address;
1029 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1030 smi_msg->data[i+8] = msg->cmd;
1032 /* Now tack on the data to the message. */
1033 if (msg->data_len > 0)
1034 memcpy(&(smi_msg->data[i+9]), msg->data,
1036 smi_msg->data_size = msg->data_len + 9;
1038 /* Now calculate the checksum and tack it on. */
1039 smi_msg->data[i+smi_msg->data_size]
1040 = ipmb_checksum(&(smi_msg->data[i+6]),
1041 smi_msg->data_size-6);
1043 /* Add on the checksum size and the offset from the
1045 smi_msg->data_size += 1 + i;
1047 smi_msg->msgid = msgid;
1050 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1051 struct kernel_ipmi_msg *msg,
1052 struct ipmi_lan_addr *lan_addr,
1054 unsigned char ipmb_seq,
1055 unsigned char source_lun)
1057 /* Format the IPMB header data. */
1058 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1059 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1060 smi_msg->data[2] = lan_addr->channel;
1061 smi_msg->data[3] = lan_addr->session_handle;
1062 smi_msg->data[4] = lan_addr->remote_SWID;
1063 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1064 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1065 smi_msg->data[7] = lan_addr->local_SWID;
1066 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1067 smi_msg->data[9] = msg->cmd;
1069 /* Now tack on the data to the message. */
1070 if (msg->data_len > 0)
1071 memcpy(&(smi_msg->data[10]), msg->data,
1073 smi_msg->data_size = msg->data_len + 10;
1075 /* Now calculate the checksum and tack it on. */
1076 smi_msg->data[smi_msg->data_size]
1077 = ipmb_checksum(&(smi_msg->data[7]),
1078 smi_msg->data_size-7);
1080 /* Add on the checksum size and the offset from the
1082 smi_msg->data_size += 1;
1084 smi_msg->msgid = msgid;
1087 /* Separate from ipmi_request so that the user does not have to be
1088 supplied in certain circumstances (mainly at panic time). If
1089 messages are supplied, they will be freed, even if an error
1091 static int i_ipmi_request(ipmi_user_t user,
1093 struct ipmi_addr *addr,
1095 struct kernel_ipmi_msg *msg,
1096 void *user_msg_data,
1098 struct ipmi_recv_msg *supplied_recv,
1100 unsigned char source_address,
1101 unsigned char source_lun,
1103 unsigned int retry_time_ms)
1106 struct ipmi_smi_msg *smi_msg;
1107 struct ipmi_recv_msg *recv_msg;
1108 unsigned long flags;
1111 if (supplied_recv) {
1112 recv_msg = supplied_recv;
1114 recv_msg = ipmi_alloc_recv_msg();
1115 if (recv_msg == NULL) {
1119 recv_msg->user_msg_data = user_msg_data;
1122 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1124 smi_msg = ipmi_alloc_smi_msg();
1125 if (smi_msg == NULL) {
1126 ipmi_free_recv_msg(recv_msg);
1131 recv_msg->user = user;
1133 kref_get(&user->refcount);
1134 recv_msg->msgid = msgid;
1135 /* Store the message to send in the receive message so timeout
1136 responses can get the proper response data. */
1137 recv_msg->msg = *msg;
1139 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1140 struct ipmi_system_interface_addr *smi_addr;
1142 if (msg->netfn & 1) {
1143 /* Responses are not allowed to the SMI. */
1148 smi_addr = (struct ipmi_system_interface_addr *) addr;
1149 if (smi_addr->lun > 3) {
1150 spin_lock_irqsave(&intf->counter_lock, flags);
1151 intf->sent_invalid_commands++;
1152 spin_unlock_irqrestore(&intf->counter_lock, flags);
1157 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1159 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1160 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1161 || (msg->cmd == IPMI_GET_MSG_CMD)
1162 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1164 /* We don't let the user do these, since we manage
1165 the sequence numbers. */
1166 spin_lock_irqsave(&intf->counter_lock, flags);
1167 intf->sent_invalid_commands++;
1168 spin_unlock_irqrestore(&intf->counter_lock, flags);
1173 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1174 spin_lock_irqsave(&intf->counter_lock, flags);
1175 intf->sent_invalid_commands++;
1176 spin_unlock_irqrestore(&intf->counter_lock, flags);
1181 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1182 smi_msg->data[1] = msg->cmd;
1183 smi_msg->msgid = msgid;
1184 smi_msg->user_data = recv_msg;
1185 if (msg->data_len > 0)
1186 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1187 smi_msg->data_size = msg->data_len + 2;
1188 spin_lock_irqsave(&intf->counter_lock, flags);
1189 intf->sent_local_commands++;
1190 spin_unlock_irqrestore(&intf->counter_lock, flags);
1191 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1192 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1194 struct ipmi_ipmb_addr *ipmb_addr;
1195 unsigned char ipmb_seq;
1199 if (addr->channel >= IPMI_MAX_CHANNELS) {
1200 spin_lock_irqsave(&intf->counter_lock, flags);
1201 intf->sent_invalid_commands++;
1202 spin_unlock_irqrestore(&intf->counter_lock, flags);
1207 if (intf->channels[addr->channel].medium
1208 != IPMI_CHANNEL_MEDIUM_IPMB)
1210 spin_lock_irqsave(&intf->counter_lock, flags);
1211 intf->sent_invalid_commands++;
1212 spin_unlock_irqrestore(&intf->counter_lock, flags);
1218 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1219 retries = 0; /* Don't retry broadcasts. */
1223 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1224 /* Broadcasts add a zero at the beginning of the
1225 message, but otherwise is the same as an IPMB
1227 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1232 /* Default to 1 second retries. */
1233 if (retry_time_ms == 0)
1234 retry_time_ms = 1000;
1236 /* 9 for the header and 1 for the checksum, plus
1237 possibly one for the broadcast. */
1238 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1239 spin_lock_irqsave(&intf->counter_lock, flags);
1240 intf->sent_invalid_commands++;
1241 spin_unlock_irqrestore(&intf->counter_lock, flags);
1246 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1247 if (ipmb_addr->lun > 3) {
1248 spin_lock_irqsave(&intf->counter_lock, flags);
1249 intf->sent_invalid_commands++;
1250 spin_unlock_irqrestore(&intf->counter_lock, flags);
1255 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1257 if (recv_msg->msg.netfn & 0x1) {
1258 /* It's a response, so use the user's sequence
1260 spin_lock_irqsave(&intf->counter_lock, flags);
1261 intf->sent_ipmb_responses++;
1262 spin_unlock_irqrestore(&intf->counter_lock, flags);
1263 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1265 source_address, source_lun);
1267 /* Save the receive message so we can use it
1268 to deliver the response. */
1269 smi_msg->user_data = recv_msg;
1271 /* It's a command, so get a sequence for it. */
1273 spin_lock_irqsave(&(intf->seq_lock), flags);
1275 spin_lock(&intf->counter_lock);
1276 intf->sent_ipmb_commands++;
1277 spin_unlock(&intf->counter_lock);
1279 /* Create a sequence number with a 1 second
1280 timeout and 4 retries. */
1281 rv = intf_next_seq(intf,
1289 /* We have used up all the sequence numbers,
1290 probably, so abort. */
1291 spin_unlock_irqrestore(&(intf->seq_lock),
1296 /* Store the sequence number in the message,
1297 so that when the send message response
1298 comes back we can start the timer. */
1299 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1300 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1301 ipmb_seq, broadcast,
1302 source_address, source_lun);
1304 /* Copy the message into the recv message data, so we
1305 can retransmit it later if necessary. */
1306 memcpy(recv_msg->msg_data, smi_msg->data,
1307 smi_msg->data_size);
1308 recv_msg->msg.data = recv_msg->msg_data;
1309 recv_msg->msg.data_len = smi_msg->data_size;
1311 /* We don't unlock until here, because we need
1312 to copy the completed message into the
1313 recv_msg before we release the lock.
1314 Otherwise, race conditions may bite us. I
1315 know that's pretty paranoid, but I prefer
1317 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1319 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1320 struct ipmi_lan_addr *lan_addr;
1321 unsigned char ipmb_seq;
1324 if (addr->channel >= IPMI_NUM_CHANNELS) {
1325 spin_lock_irqsave(&intf->counter_lock, flags);
1326 intf->sent_invalid_commands++;
1327 spin_unlock_irqrestore(&intf->counter_lock, flags);
1332 if ((intf->channels[addr->channel].medium
1333 != IPMI_CHANNEL_MEDIUM_8023LAN)
1334 && (intf->channels[addr->channel].medium
1335 != IPMI_CHANNEL_MEDIUM_ASYNC))
1337 spin_lock_irqsave(&intf->counter_lock, flags);
1338 intf->sent_invalid_commands++;
1339 spin_unlock_irqrestore(&intf->counter_lock, flags);
1346 /* Default to 1 second retries. */
1347 if (retry_time_ms == 0)
1348 retry_time_ms = 1000;
1350 /* 11 for the header and 1 for the checksum. */
1351 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1352 spin_lock_irqsave(&intf->counter_lock, flags);
1353 intf->sent_invalid_commands++;
1354 spin_unlock_irqrestore(&intf->counter_lock, flags);
1359 lan_addr = (struct ipmi_lan_addr *) addr;
1360 if (lan_addr->lun > 3) {
1361 spin_lock_irqsave(&intf->counter_lock, flags);
1362 intf->sent_invalid_commands++;
1363 spin_unlock_irqrestore(&intf->counter_lock, flags);
1368 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1370 if (recv_msg->msg.netfn & 0x1) {
1371 /* It's a response, so use the user's sequence
1373 spin_lock_irqsave(&intf->counter_lock, flags);
1374 intf->sent_lan_responses++;
1375 spin_unlock_irqrestore(&intf->counter_lock, flags);
1376 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1379 /* Save the receive message so we can use it
1380 to deliver the response. */
1381 smi_msg->user_data = recv_msg;
1383 /* It's a command, so get a sequence for it. */
1385 spin_lock_irqsave(&(intf->seq_lock), flags);
1387 spin_lock(&intf->counter_lock);
1388 intf->sent_lan_commands++;
1389 spin_unlock(&intf->counter_lock);
1391 /* Create a sequence number with a 1 second
1392 timeout and 4 retries. */
1393 rv = intf_next_seq(intf,
1401 /* We have used up all the sequence numbers,
1402 probably, so abort. */
1403 spin_unlock_irqrestore(&(intf->seq_lock),
1408 /* Store the sequence number in the message,
1409 so that when the send message response
1410 comes back we can start the timer. */
1411 format_lan_msg(smi_msg, msg, lan_addr,
1412 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1413 ipmb_seq, source_lun);
1415 /* Copy the message into the recv message data, so we
1416 can retransmit it later if necessary. */
1417 memcpy(recv_msg->msg_data, smi_msg->data,
1418 smi_msg->data_size);
1419 recv_msg->msg.data = recv_msg->msg_data;
1420 recv_msg->msg.data_len = smi_msg->data_size;
1422 /* We don't unlock until here, because we need
1423 to copy the completed message into the
1424 recv_msg before we release the lock.
1425 Otherwise, race conditions may bite us. I
1426 know that's pretty paranoid, but I prefer
1428 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1431 /* Unknown address type. */
1432 spin_lock_irqsave(&intf->counter_lock, flags);
1433 intf->sent_invalid_commands++;
1434 spin_unlock_irqrestore(&intf->counter_lock, flags);
1442 for (m = 0; m < smi_msg->data_size; m++)
1443 printk(" %2.2x", smi_msg->data[m]);
1447 intf->handlers->sender(intf->send_info, smi_msg, priority);
1452 ipmi_free_smi_msg(smi_msg);
1453 ipmi_free_recv_msg(recv_msg);
1457 static int check_addr(ipmi_smi_t intf,
1458 struct ipmi_addr *addr,
1459 unsigned char *saddr,
1462 if (addr->channel >= IPMI_MAX_CHANNELS)
1464 *lun = intf->channels[addr->channel].lun;
1465 *saddr = intf->channels[addr->channel].address;
1469 int ipmi_request_settime(ipmi_user_t user,
1470 struct ipmi_addr *addr,
1472 struct kernel_ipmi_msg *msg,
1473 void *user_msg_data,
1476 unsigned int retry_time_ms)
1478 unsigned char saddr, lun;
1483 rv = check_addr(user->intf, addr, &saddr, &lun);
1486 return i_ipmi_request(user,
1500 int ipmi_request_supply_msgs(ipmi_user_t user,
1501 struct ipmi_addr *addr,
1503 struct kernel_ipmi_msg *msg,
1504 void *user_msg_data,
1506 struct ipmi_recv_msg *supplied_recv,
1509 unsigned char saddr, lun;
1514 rv = check_addr(user->intf, addr, &saddr, &lun);
1517 return i_ipmi_request(user,
1531 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1532 int count, int *eof, void *data)
1534 char *out = (char *) page;
1535 ipmi_smi_t intf = data;
1539 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1540 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1541 out[rv-1] = '\n'; /* Replace the final space with a newline */
1547 static int version_file_read_proc(char *page, char **start, off_t off,
1548 int count, int *eof, void *data)
1550 char *out = (char *) page;
1551 ipmi_smi_t intf = data;
1553 return sprintf(out, "%d.%d\n",
1554 intf->version_major, intf->version_minor);
1557 static int stat_file_read_proc(char *page, char **start, off_t off,
1558 int count, int *eof, void *data)
1560 char *out = (char *) page;
1561 ipmi_smi_t intf = data;
1563 out += sprintf(out, "sent_invalid_commands: %d\n",
1564 intf->sent_invalid_commands);
1565 out += sprintf(out, "sent_local_commands: %d\n",
1566 intf->sent_local_commands);
1567 out += sprintf(out, "handled_local_responses: %d\n",
1568 intf->handled_local_responses);
1569 out += sprintf(out, "unhandled_local_responses: %d\n",
1570 intf->unhandled_local_responses);
1571 out += sprintf(out, "sent_ipmb_commands: %d\n",
1572 intf->sent_ipmb_commands);
1573 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1574 intf->sent_ipmb_command_errs);
1575 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1576 intf->retransmitted_ipmb_commands);
1577 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1578 intf->timed_out_ipmb_commands);
1579 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1580 intf->timed_out_ipmb_broadcasts);
1581 out += sprintf(out, "sent_ipmb_responses: %d\n",
1582 intf->sent_ipmb_responses);
1583 out += sprintf(out, "handled_ipmb_responses: %d\n",
1584 intf->handled_ipmb_responses);
1585 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1586 intf->invalid_ipmb_responses);
1587 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1588 intf->unhandled_ipmb_responses);
1589 out += sprintf(out, "sent_lan_commands: %d\n",
1590 intf->sent_lan_commands);
1591 out += sprintf(out, "sent_lan_command_errs: %d\n",
1592 intf->sent_lan_command_errs);
1593 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1594 intf->retransmitted_lan_commands);
1595 out += sprintf(out, "timed_out_lan_commands: %d\n",
1596 intf->timed_out_lan_commands);
1597 out += sprintf(out, "sent_lan_responses: %d\n",
1598 intf->sent_lan_responses);
1599 out += sprintf(out, "handled_lan_responses: %d\n",
1600 intf->handled_lan_responses);
1601 out += sprintf(out, "invalid_lan_responses: %d\n",
1602 intf->invalid_lan_responses);
1603 out += sprintf(out, "unhandled_lan_responses: %d\n",
1604 intf->unhandled_lan_responses);
1605 out += sprintf(out, "handled_commands: %d\n",
1606 intf->handled_commands);
1607 out += sprintf(out, "invalid_commands: %d\n",
1608 intf->invalid_commands);
1609 out += sprintf(out, "unhandled_commands: %d\n",
1610 intf->unhandled_commands);
1611 out += sprintf(out, "invalid_events: %d\n",
1612 intf->invalid_events);
1613 out += sprintf(out, "events: %d\n",
1616 return (out - ((char *) page));
1619 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1620 read_proc_t *read_proc, write_proc_t *write_proc,
1621 void *data, struct module *owner)
1624 #ifdef CONFIG_PROC_FS
1625 struct proc_dir_entry *file;
1626 struct ipmi_proc_entry *entry;
1628 /* Create a list element. */
1629 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1632 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1637 strcpy(entry->name, name);
1639 file = create_proc_entry(name, 0, smi->proc_dir);
1647 file->read_proc = read_proc;
1648 file->write_proc = write_proc;
1649 file->owner = owner;
1651 spin_lock(&smi->proc_entry_lock);
1652 /* Stick it on the list. */
1653 entry->next = smi->proc_entries;
1654 smi->proc_entries = entry;
1655 spin_unlock(&smi->proc_entry_lock);
1657 #endif /* CONFIG_PROC_FS */
1662 static int add_proc_entries(ipmi_smi_t smi, int num)
1666 #ifdef CONFIG_PROC_FS
1667 sprintf(smi->proc_dir_name, "%d", num);
1668 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1672 smi->proc_dir->owner = THIS_MODULE;
1676 rv = ipmi_smi_add_proc_entry(smi, "stats",
1677 stat_file_read_proc, NULL,
1681 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1682 ipmb_file_read_proc, NULL,
1686 rv = ipmi_smi_add_proc_entry(smi, "version",
1687 version_file_read_proc, NULL,
1689 #endif /* CONFIG_PROC_FS */
1694 static void remove_proc_entries(ipmi_smi_t smi)
1696 #ifdef CONFIG_PROC_FS
1697 struct ipmi_proc_entry *entry;
1699 spin_lock(&smi->proc_entry_lock);
1700 while (smi->proc_entries) {
1701 entry = smi->proc_entries;
1702 smi->proc_entries = entry->next;
1704 remove_proc_entry(entry->name, smi->proc_dir);
1708 spin_unlock(&smi->proc_entry_lock);
1709 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1710 #endif /* CONFIG_PROC_FS */
1714 send_channel_info_cmd(ipmi_smi_t intf, int chan)
1716 struct kernel_ipmi_msg msg;
1717 unsigned char data[1];
1718 struct ipmi_system_interface_addr si;
1720 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1721 si.channel = IPMI_BMC_CHANNEL;
1724 msg.netfn = IPMI_NETFN_APP_REQUEST;
1725 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
1729 return i_ipmi_request(NULL,
1731 (struct ipmi_addr *) &si,
1738 intf->channels[0].address,
1739 intf->channels[0].lun,
1744 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1749 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
1750 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
1751 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
1753 /* It's the one we want */
1754 if (msg->msg.data[0] != 0) {
1755 /* Got an error from the channel, just go on. */
1757 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
1758 /* If the MC does not support this
1759 command, that is legal. We just
1760 assume it has one IPMB at channel
1762 intf->channels[0].medium
1763 = IPMI_CHANNEL_MEDIUM_IPMB;
1764 intf->channels[0].protocol
1765 = IPMI_CHANNEL_PROTOCOL_IPMB;
1768 intf->curr_channel = IPMI_MAX_CHANNELS;
1769 wake_up(&intf->waitq);
1774 if (msg->msg.data_len < 4) {
1775 /* Message not big enough, just go on. */
1778 chan = intf->curr_channel;
1779 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
1780 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
1783 intf->curr_channel++;
1784 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
1785 wake_up(&intf->waitq);
1787 rv = send_channel_info_cmd(intf, intf->curr_channel);
1790 /* Got an error somehow, just give up. */
1791 intf->curr_channel = IPMI_MAX_CHANNELS;
1792 wake_up(&intf->waitq);
1794 printk(KERN_WARNING PFX
1795 "Error sending channel information: %d\n",
1803 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1805 unsigned char version_major,
1806 unsigned char version_minor,
1807 unsigned char slave_addr,
1808 ipmi_smi_t *new_intf)
1813 unsigned long flags;
1816 /* Make sure the driver is actually initialized, this handles
1817 problems with initialization order. */
1819 rv = ipmi_init_msghandler();
1822 /* The init code doesn't return an error if it was turned
1823 off, but it won't initialize. Check that. */
1828 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
1831 memset(intf, 0, sizeof(*intf));
1832 intf->intf_num = -1;
1833 kref_init(&intf->refcount);
1834 intf->version_major = version_major;
1835 intf->version_minor = version_minor;
1836 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1837 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
1838 intf->channels[j].lun = 2;
1840 if (slave_addr != 0)
1841 intf->channels[0].address = slave_addr;
1842 INIT_LIST_HEAD(&intf->users);
1843 intf->handlers = handlers;
1844 intf->send_info = send_info;
1845 spin_lock_init(&intf->seq_lock);
1846 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1847 intf->seq_table[j].inuse = 0;
1848 intf->seq_table[j].seqid = 0;
1851 #ifdef CONFIG_PROC_FS
1852 spin_lock_init(&intf->proc_entry_lock);
1854 spin_lock_init(&intf->waiting_msgs_lock);
1855 INIT_LIST_HEAD(&intf->waiting_msgs);
1856 spin_lock_init(&intf->events_lock);
1857 INIT_LIST_HEAD(&intf->waiting_events);
1858 intf->waiting_events_count = 0;
1859 init_MUTEX(&intf->cmd_rcvrs_lock);
1860 INIT_LIST_HEAD(&intf->cmd_rcvrs);
1861 init_waitqueue_head(&intf->waitq);
1863 spin_lock_init(&intf->counter_lock);
1864 intf->proc_dir = NULL;
1867 spin_lock_irqsave(&interfaces_lock, flags);
1868 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1869 if (ipmi_interfaces[i] == NULL) {
1871 /* Reserve the entry till we are done. */
1872 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1877 spin_unlock_irqrestore(&interfaces_lock, flags);
1881 /* FIXME - this is an ugly kludge, this sets the intf for the
1882 caller before sending any messages with it. */
1885 if ((version_major > 1)
1886 || ((version_major == 1) && (version_minor >= 5)))
1888 /* Start scanning the channels to see what is
1890 intf->null_user_handler = channel_handler;
1891 intf->curr_channel = 0;
1892 rv = send_channel_info_cmd(intf, 0);
1896 /* Wait for the channel info to be read. */
1897 wait_event(intf->waitq,
1898 intf->curr_channel >= IPMI_MAX_CHANNELS);
1900 /* Assume a single IPMB channel at zero. */
1901 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
1902 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
1906 rv = add_proc_entries(intf, i);
1911 remove_proc_entries(intf);
1912 kref_put(&intf->refcount, intf_free);
1913 if (i < MAX_IPMI_INTERFACES) {
1914 spin_lock_irqsave(&interfaces_lock, flags);
1915 ipmi_interfaces[i] = NULL;
1916 spin_unlock_irqrestore(&interfaces_lock, flags);
1919 spin_lock_irqsave(&interfaces_lock, flags);
1920 ipmi_interfaces[i] = intf;
1921 spin_unlock_irqrestore(&interfaces_lock, flags);
1922 call_smi_watchers(i);
1928 int ipmi_unregister_smi(ipmi_smi_t intf)
1931 struct ipmi_smi_watcher *w;
1932 unsigned long flags;
1934 spin_lock_irqsave(&interfaces_lock, flags);
1935 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1936 if (ipmi_interfaces[i] == intf) {
1937 /* Set the interface number reserved until we
1939 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1940 intf->intf_num = -1;
1944 spin_unlock_irqrestore(&interfaces_lock,flags);
1946 if (i == MAX_IPMI_INTERFACES)
1949 remove_proc_entries(intf);
1951 /* Call all the watcher interfaces to tell them that
1952 an interface is gone. */
1953 down_read(&smi_watchers_sem);
1954 list_for_each_entry(w, &smi_watchers, link)
1956 up_read(&smi_watchers_sem);
1958 /* Allow the entry to be reused now. */
1959 spin_lock_irqsave(&interfaces_lock, flags);
1960 ipmi_interfaces[i] = NULL;
1961 spin_unlock_irqrestore(&interfaces_lock,flags);
1963 kref_put(&intf->refcount, intf_free);
1967 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
1968 struct ipmi_smi_msg *msg)
1970 struct ipmi_ipmb_addr ipmb_addr;
1971 struct ipmi_recv_msg *recv_msg;
1972 unsigned long flags;
1975 /* This is 11, not 10, because the response must contain a
1976 * completion code. */
1977 if (msg->rsp_size < 11) {
1978 /* Message not big enough, just ignore it. */
1979 spin_lock_irqsave(&intf->counter_lock, flags);
1980 intf->invalid_ipmb_responses++;
1981 spin_unlock_irqrestore(&intf->counter_lock, flags);
1985 if (msg->rsp[2] != 0) {
1986 /* An error getting the response, just ignore it. */
1990 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
1991 ipmb_addr.slave_addr = msg->rsp[6];
1992 ipmb_addr.channel = msg->rsp[3] & 0x0f;
1993 ipmb_addr.lun = msg->rsp[7] & 3;
1995 /* It's a response from a remote entity. Look up the sequence
1996 number and handle the response. */
1997 if (intf_find_seq(intf,
2001 (msg->rsp[4] >> 2) & (~1),
2002 (struct ipmi_addr *) &(ipmb_addr),
2005 /* We were unable to find the sequence number,
2006 so just nuke the message. */
2007 spin_lock_irqsave(&intf->counter_lock, flags);
2008 intf->unhandled_ipmb_responses++;
2009 spin_unlock_irqrestore(&intf->counter_lock, flags);
2013 memcpy(recv_msg->msg_data,
2016 /* THe other fields matched, so no need to set them, except
2017 for netfn, which needs to be the response that was
2018 returned, not the request value. */
2019 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2020 recv_msg->msg.data = recv_msg->msg_data;
2021 recv_msg->msg.data_len = msg->rsp_size - 10;
2022 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2023 spin_lock_irqsave(&intf->counter_lock, flags);
2024 intf->handled_ipmb_responses++;
2025 spin_unlock_irqrestore(&intf->counter_lock, flags);
2026 deliver_response(recv_msg);
2031 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2032 struct ipmi_smi_msg *msg)
2034 struct cmd_rcvr *rcvr;
2036 unsigned char netfn;
2038 ipmi_user_t user = NULL;
2039 struct ipmi_ipmb_addr *ipmb_addr;
2040 struct ipmi_recv_msg *recv_msg;
2041 unsigned long flags;
2043 if (msg->rsp_size < 10) {
2044 /* Message not big enough, just ignore it. */
2045 spin_lock_irqsave(&intf->counter_lock, flags);
2046 intf->invalid_commands++;
2047 spin_unlock_irqrestore(&intf->counter_lock, flags);
2051 if (msg->rsp[2] != 0) {
2052 /* An error getting the response, just ignore it. */
2056 netfn = msg->rsp[4] >> 2;
2060 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2063 kref_get(&user->refcount);
2069 /* We didn't find a user, deliver an error response. */
2070 spin_lock_irqsave(&intf->counter_lock, flags);
2071 intf->unhandled_commands++;
2072 spin_unlock_irqrestore(&intf->counter_lock, flags);
2074 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2075 msg->data[1] = IPMI_SEND_MSG_CMD;
2076 msg->data[2] = msg->rsp[3];
2077 msg->data[3] = msg->rsp[6];
2078 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2079 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2080 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2082 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2083 msg->data[8] = msg->rsp[8]; /* cmd */
2084 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2085 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2086 msg->data_size = 11;
2091 printk("Invalid command:");
2092 for (m = 0; m < msg->data_size; m++)
2093 printk(" %2.2x", msg->data[m]);
2097 intf->handlers->sender(intf->send_info, msg, 0);
2099 rv = -1; /* We used the message, so return the value that
2100 causes it to not be freed or queued. */
2102 /* Deliver the message to the user. */
2103 spin_lock_irqsave(&intf->counter_lock, flags);
2104 intf->handled_commands++;
2105 spin_unlock_irqrestore(&intf->counter_lock, flags);
2107 recv_msg = ipmi_alloc_recv_msg();
2109 /* We couldn't allocate memory for the
2110 message, so requeue it for handling
2113 kref_put(&user->refcount, free_user);
2115 /* Extract the source address from the data. */
2116 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2117 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2118 ipmb_addr->slave_addr = msg->rsp[6];
2119 ipmb_addr->lun = msg->rsp[7] & 3;
2120 ipmb_addr->channel = msg->rsp[3] & 0xf;
2122 /* Extract the rest of the message information
2123 from the IPMB header.*/
2124 recv_msg->user = user;
2125 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2126 recv_msg->msgid = msg->rsp[7] >> 2;
2127 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2128 recv_msg->msg.cmd = msg->rsp[8];
2129 recv_msg->msg.data = recv_msg->msg_data;
2131 /* We chop off 10, not 9 bytes because the checksum
2132 at the end also needs to be removed. */
2133 recv_msg->msg.data_len = msg->rsp_size - 10;
2134 memcpy(recv_msg->msg_data,
2136 msg->rsp_size - 10);
2137 deliver_response(recv_msg);
2144 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2145 struct ipmi_smi_msg *msg)
2147 struct ipmi_lan_addr lan_addr;
2148 struct ipmi_recv_msg *recv_msg;
2149 unsigned long flags;
2152 /* This is 13, not 12, because the response must contain a
2153 * completion code. */
2154 if (msg->rsp_size < 13) {
2155 /* Message not big enough, just ignore it. */
2156 spin_lock_irqsave(&intf->counter_lock, flags);
2157 intf->invalid_lan_responses++;
2158 spin_unlock_irqrestore(&intf->counter_lock, flags);
2162 if (msg->rsp[2] != 0) {
2163 /* An error getting the response, just ignore it. */
2167 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2168 lan_addr.session_handle = msg->rsp[4];
2169 lan_addr.remote_SWID = msg->rsp[8];
2170 lan_addr.local_SWID = msg->rsp[5];
2171 lan_addr.channel = msg->rsp[3] & 0x0f;
2172 lan_addr.privilege = msg->rsp[3] >> 4;
2173 lan_addr.lun = msg->rsp[9] & 3;
2175 /* It's a response from a remote entity. Look up the sequence
2176 number and handle the response. */
2177 if (intf_find_seq(intf,
2181 (msg->rsp[6] >> 2) & (~1),
2182 (struct ipmi_addr *) &(lan_addr),
2185 /* We were unable to find the sequence number,
2186 so just nuke the message. */
2187 spin_lock_irqsave(&intf->counter_lock, flags);
2188 intf->unhandled_lan_responses++;
2189 spin_unlock_irqrestore(&intf->counter_lock, flags);
2193 memcpy(recv_msg->msg_data,
2195 msg->rsp_size - 11);
2196 /* The other fields matched, so no need to set them, except
2197 for netfn, which needs to be the response that was
2198 returned, not the request value. */
2199 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2200 recv_msg->msg.data = recv_msg->msg_data;
2201 recv_msg->msg.data_len = msg->rsp_size - 12;
2202 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2203 spin_lock_irqsave(&intf->counter_lock, flags);
2204 intf->handled_lan_responses++;
2205 spin_unlock_irqrestore(&intf->counter_lock, flags);
2206 deliver_response(recv_msg);
2211 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2212 struct ipmi_smi_msg *msg)
2214 struct cmd_rcvr *rcvr;
2216 unsigned char netfn;
2218 ipmi_user_t user = NULL;
2219 struct ipmi_lan_addr *lan_addr;
2220 struct ipmi_recv_msg *recv_msg;
2221 unsigned long flags;
2223 if (msg->rsp_size < 12) {
2224 /* Message not big enough, just ignore it. */
2225 spin_lock_irqsave(&intf->counter_lock, flags);
2226 intf->invalid_commands++;
2227 spin_unlock_irqrestore(&intf->counter_lock, flags);
2231 if (msg->rsp[2] != 0) {
2232 /* An error getting the response, just ignore it. */
2236 netfn = msg->rsp[6] >> 2;
2240 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2243 kref_get(&user->refcount);
2249 /* We didn't find a user, just give up. */
2250 spin_lock_irqsave(&intf->counter_lock, flags);
2251 intf->unhandled_commands++;
2252 spin_unlock_irqrestore(&intf->counter_lock, flags);
2254 rv = 0; /* Don't do anything with these messages, just
2255 allow them to be freed. */
2257 /* Deliver the message to the user. */
2258 spin_lock_irqsave(&intf->counter_lock, flags);
2259 intf->handled_commands++;
2260 spin_unlock_irqrestore(&intf->counter_lock, flags);
2262 recv_msg = ipmi_alloc_recv_msg();
2264 /* We couldn't allocate memory for the
2265 message, so requeue it for handling
2268 kref_put(&user->refcount, free_user);
2270 /* Extract the source address from the data. */
2271 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2272 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2273 lan_addr->session_handle = msg->rsp[4];
2274 lan_addr->remote_SWID = msg->rsp[8];
2275 lan_addr->local_SWID = msg->rsp[5];
2276 lan_addr->lun = msg->rsp[9] & 3;
2277 lan_addr->channel = msg->rsp[3] & 0xf;
2278 lan_addr->privilege = msg->rsp[3] >> 4;
2280 /* Extract the rest of the message information
2281 from the IPMB header.*/
2282 recv_msg->user = user;
2283 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2284 recv_msg->msgid = msg->rsp[9] >> 2;
2285 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2286 recv_msg->msg.cmd = msg->rsp[10];
2287 recv_msg->msg.data = recv_msg->msg_data;
2289 /* We chop off 12, not 11 bytes because the checksum
2290 at the end also needs to be removed. */
2291 recv_msg->msg.data_len = msg->rsp_size - 12;
2292 memcpy(recv_msg->msg_data,
2294 msg->rsp_size - 12);
2295 deliver_response(recv_msg);
2302 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2303 struct ipmi_smi_msg *msg)
2305 struct ipmi_system_interface_addr *smi_addr;
2307 recv_msg->msgid = 0;
2308 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2309 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2310 smi_addr->channel = IPMI_BMC_CHANNEL;
2311 smi_addr->lun = msg->rsp[0] & 3;
2312 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2313 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2314 recv_msg->msg.cmd = msg->rsp[1];
2315 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2316 recv_msg->msg.data = recv_msg->msg_data;
2317 recv_msg->msg.data_len = msg->rsp_size - 3;
2320 static int handle_read_event_rsp(ipmi_smi_t intf,
2321 struct ipmi_smi_msg *msg)
2323 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2324 struct list_head msgs;
2327 int deliver_count = 0;
2328 unsigned long flags;
2330 if (msg->rsp_size < 19) {
2331 /* Message is too small to be an IPMB event. */
2332 spin_lock_irqsave(&intf->counter_lock, flags);
2333 intf->invalid_events++;
2334 spin_unlock_irqrestore(&intf->counter_lock, flags);
2338 if (msg->rsp[2] != 0) {
2339 /* An error getting the event, just ignore it. */
2343 INIT_LIST_HEAD(&msgs);
2345 spin_lock_irqsave(&intf->events_lock, flags);
2347 spin_lock(&intf->counter_lock);
2349 spin_unlock(&intf->counter_lock);
2351 /* Allocate and fill in one message for every user that is getting
2354 list_for_each_entry_rcu(user, &intf->users, link) {
2355 if (! user->gets_events)
2358 recv_msg = ipmi_alloc_recv_msg();
2361 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2362 list_del(&recv_msg->link);
2363 ipmi_free_recv_msg(recv_msg);
2365 /* We couldn't allocate memory for the
2366 message, so requeue it for handling
2374 copy_event_into_recv_msg(recv_msg, msg);
2375 recv_msg->user = user;
2376 kref_get(&user->refcount);
2377 list_add_tail(&(recv_msg->link), &msgs);
2381 if (deliver_count) {
2382 /* Now deliver all the messages. */
2383 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2384 list_del(&recv_msg->link);
2385 deliver_response(recv_msg);
2387 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2388 /* No one to receive the message, put it in queue if there's
2389 not already too many things in the queue. */
2390 recv_msg = ipmi_alloc_recv_msg();
2392 /* We couldn't allocate memory for the
2393 message, so requeue it for handling
2399 copy_event_into_recv_msg(recv_msg, msg);
2400 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2402 /* There's too many things in the queue, discard this
2404 printk(KERN_WARNING PFX "Event queue full, discarding an"
2405 " incoming event\n");
2409 spin_unlock_irqrestore(&(intf->events_lock), flags);
2414 static int handle_bmc_rsp(ipmi_smi_t intf,
2415 struct ipmi_smi_msg *msg)
2417 struct ipmi_recv_msg *recv_msg;
2418 unsigned long flags;
2419 struct ipmi_user *user;
2421 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2422 if (recv_msg == NULL)
2424 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2425 "could be because of a malformed message, or\n"
2426 "because of a hardware error. Contact your\n"
2427 "hardware vender for assistance\n");
2431 user = recv_msg->user;
2432 /* Make sure the user still exists. */
2433 if (user && !user->valid) {
2434 /* The user for the message went away, so give up. */
2435 spin_lock_irqsave(&intf->counter_lock, flags);
2436 intf->unhandled_local_responses++;
2437 spin_unlock_irqrestore(&intf->counter_lock, flags);
2438 ipmi_free_recv_msg(recv_msg);
2440 struct ipmi_system_interface_addr *smi_addr;
2442 spin_lock_irqsave(&intf->counter_lock, flags);
2443 intf->handled_local_responses++;
2444 spin_unlock_irqrestore(&intf->counter_lock, flags);
2445 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2446 recv_msg->msgid = msg->msgid;
2447 smi_addr = ((struct ipmi_system_interface_addr *)
2449 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2450 smi_addr->channel = IPMI_BMC_CHANNEL;
2451 smi_addr->lun = msg->rsp[0] & 3;
2452 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2453 recv_msg->msg.cmd = msg->rsp[1];
2454 memcpy(recv_msg->msg_data,
2457 recv_msg->msg.data = recv_msg->msg_data;
2458 recv_msg->msg.data_len = msg->rsp_size - 2;
2459 deliver_response(recv_msg);
2465 /* Handle a new message. Return 1 if the message should be requeued,
2466 0 if the message should be freed, or -1 if the message should not
2467 be freed or requeued. */
2468 static int handle_new_recv_msg(ipmi_smi_t intf,
2469 struct ipmi_smi_msg *msg)
2477 for (m = 0; m < msg->rsp_size; m++)
2478 printk(" %2.2x", msg->rsp[m]);
2481 if (msg->rsp_size < 2) {
2482 /* Message is too small to be correct. */
2483 printk(KERN_WARNING PFX "BMC returned to small a message"
2484 " for netfn %x cmd %x, got %d bytes\n",
2485 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
2487 /* Generate an error response for the message. */
2488 msg->rsp[0] = msg->data[0] | (1 << 2);
2489 msg->rsp[1] = msg->data[1];
2490 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2492 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
2493 || (msg->rsp[1] != msg->data[1])) /* Command */
2495 /* The response is not even marginally correct. */
2496 printk(KERN_WARNING PFX "BMC returned incorrect response,"
2497 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
2498 (msg->data[0] >> 2) | 1, msg->data[1],
2499 msg->rsp[0] >> 2, msg->rsp[1]);
2501 /* Generate an error response for the message. */
2502 msg->rsp[0] = msg->data[0] | (1 << 2);
2503 msg->rsp[1] = msg->data[1];
2504 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2508 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2509 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
2510 && (msg->user_data != NULL))
2512 /* It's a response to a response we sent. For this we
2513 deliver a send message response to the user. */
2514 struct ipmi_recv_msg *recv_msg = msg->user_data;
2517 if (msg->rsp_size < 2)
2518 /* Message is too small to be correct. */
2521 chan = msg->data[2] & 0x0f;
2522 if (chan >= IPMI_MAX_CHANNELS)
2523 /* Invalid channel number */
2529 /* Make sure the user still exists. */
2530 if (!recv_msg->user || !recv_msg->user->valid)
2533 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
2534 recv_msg->msg.data = recv_msg->msg_data;
2535 recv_msg->msg.data_len = 1;
2536 recv_msg->msg_data[0] = msg->rsp[2];
2537 deliver_response(recv_msg);
2538 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2539 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
2541 /* It's from the receive queue. */
2542 chan = msg->rsp[3] & 0xf;
2543 if (chan >= IPMI_MAX_CHANNELS) {
2544 /* Invalid channel number */
2549 switch (intf->channels[chan].medium) {
2550 case IPMI_CHANNEL_MEDIUM_IPMB:
2551 if (msg->rsp[4] & 0x04) {
2552 /* It's a response, so find the
2553 requesting message and send it up. */
2554 requeue = handle_ipmb_get_msg_rsp(intf, msg);
2556 /* It's a command to the SMS from some other
2557 entity. Handle that. */
2558 requeue = handle_ipmb_get_msg_cmd(intf, msg);
2562 case IPMI_CHANNEL_MEDIUM_8023LAN:
2563 case IPMI_CHANNEL_MEDIUM_ASYNC:
2564 if (msg->rsp[6] & 0x04) {
2565 /* It's a response, so find the
2566 requesting message and send it up. */
2567 requeue = handle_lan_get_msg_rsp(intf, msg);
2569 /* It's a command to the SMS from some other
2570 entity. Handle that. */
2571 requeue = handle_lan_get_msg_cmd(intf, msg);
2576 /* We don't handle the channel type, so just
2577 * free the message. */
2581 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2582 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
2584 /* It's an asyncronous event. */
2585 requeue = handle_read_event_rsp(intf, msg);
2587 /* It's a response from the local BMC. */
2588 requeue = handle_bmc_rsp(intf, msg);
2595 /* Handle a new message from the lower layer. */
2596 void ipmi_smi_msg_received(ipmi_smi_t intf,
2597 struct ipmi_smi_msg *msg)
2599 unsigned long flags;
2603 if ((msg->data_size >= 2)
2604 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2605 && (msg->data[1] == IPMI_SEND_MSG_CMD)
2606 && (msg->user_data == NULL))
2608 /* This is the local response to a command send, start
2609 the timer for these. The user_data will not be
2610 NULL if this is a response send, and we will let
2611 response sends just go through. */
2613 /* Check for errors, if we get certain errors (ones
2614 that mean basically we can try again later), we
2615 ignore them and start the timer. Otherwise we
2616 report the error immediately. */
2617 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
2618 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
2619 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
2621 int chan = msg->rsp[3] & 0xf;
2623 /* Got an error sending the message, handle it. */
2624 spin_lock_irqsave(&intf->counter_lock, flags);
2625 if (chan >= IPMI_MAX_CHANNELS)
2626 ; /* This shouldn't happen */
2627 else if ((intf->channels[chan].medium
2628 == IPMI_CHANNEL_MEDIUM_8023LAN)
2629 || (intf->channels[chan].medium
2630 == IPMI_CHANNEL_MEDIUM_ASYNC))
2631 intf->sent_lan_command_errs++;
2633 intf->sent_ipmb_command_errs++;
2634 spin_unlock_irqrestore(&intf->counter_lock, flags);
2635 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
2637 /* The message was sent, start the timer. */
2638 intf_start_seq_timer(intf, msg->msgid);
2641 ipmi_free_smi_msg(msg);
2645 /* To preserve message order, if the list is not empty, we
2646 tack this message onto the end of the list. */
2647 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2648 if (!list_empty(&intf->waiting_msgs)) {
2649 list_add_tail(&msg->link, &intf->waiting_msgs);
2650 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2653 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2655 rv = handle_new_recv_msg(intf, msg);
2657 /* Could not handle the message now, just add it to a
2658 list to handle later. */
2659 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2660 list_add_tail(&msg->link, &intf->waiting_msgs);
2661 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2662 } else if (rv == 0) {
2663 ipmi_free_smi_msg(msg);
2670 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
2675 list_for_each_entry_rcu(user, &intf->users, link) {
2676 if (! user->handler->ipmi_watchdog_pretimeout)
2679 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
2685 handle_msg_timeout(struct ipmi_recv_msg *msg)
2687 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2688 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
2689 msg->msg.netfn |= 1; /* Convert to a response. */
2690 msg->msg.data_len = 1;
2691 msg->msg.data = msg->msg_data;
2692 deliver_response(msg);
2695 static struct ipmi_smi_msg *
2696 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2697 unsigned char seq, long seqid)
2699 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
2701 /* If we can't allocate the message, then just return, we
2702 get 4 retries, so this should be ok. */
2705 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
2706 smi_msg->data_size = recv_msg->msg.data_len;
2707 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
2713 for (m = 0; m < smi_msg->data_size; m++)
2714 printk(" %2.2x", smi_msg->data[m]);
2721 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
2722 struct list_head *timeouts, long timeout_period,
2723 int slot, unsigned long *flags)
2725 struct ipmi_recv_msg *msg;
2730 ent->timeout -= timeout_period;
2731 if (ent->timeout > 0)
2734 if (ent->retries_left == 0) {
2735 /* The message has used all its retries. */
2737 msg = ent->recv_msg;
2738 list_add_tail(&msg->link, timeouts);
2739 spin_lock(&intf->counter_lock);
2741 intf->timed_out_ipmb_broadcasts++;
2742 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2743 intf->timed_out_lan_commands++;
2745 intf->timed_out_ipmb_commands++;
2746 spin_unlock(&intf->counter_lock);
2748 struct ipmi_smi_msg *smi_msg;
2749 /* More retries, send again. */
2751 /* Start with the max timer, set to normal
2752 timer after the message is sent. */
2753 ent->timeout = MAX_MSG_TIMEOUT;
2754 ent->retries_left--;
2755 spin_lock(&intf->counter_lock);
2756 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2757 intf->retransmitted_lan_commands++;
2759 intf->retransmitted_ipmb_commands++;
2760 spin_unlock(&intf->counter_lock);
2762 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
2767 spin_unlock_irqrestore(&intf->seq_lock, *flags);
2768 /* Send the new message. We send with a zero
2769 * priority. It timed out, I doubt time is
2770 * that critical now, and high priority
2771 * messages are really only for messages to the
2772 * local MC, which don't get resent. */
2773 intf->handlers->sender(intf->send_info,
2775 spin_lock_irqsave(&intf->seq_lock, *flags);
2779 static void ipmi_timeout_handler(long timeout_period)
2782 struct list_head timeouts;
2783 struct ipmi_recv_msg *msg, *msg2;
2784 struct ipmi_smi_msg *smi_msg, *smi_msg2;
2785 unsigned long flags;
2788 INIT_LIST_HEAD(&timeouts);
2790 spin_lock(&interfaces_lock);
2791 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2792 intf = ipmi_interfaces[i];
2793 if (IPMI_INVALID_INTERFACE(intf))
2795 kref_get(&intf->refcount);
2796 spin_unlock(&interfaces_lock);
2798 /* See if any waiting messages need to be processed. */
2799 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2800 list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
2801 if (! handle_new_recv_msg(intf, smi_msg)) {
2802 list_del(&smi_msg->link);
2803 ipmi_free_smi_msg(smi_msg);
2805 /* To preserve message order, quit if we
2806 can't handle a message. */
2810 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2812 /* Go through the seq table and find any messages that
2813 have timed out, putting them in the timeouts
2815 spin_lock_irqsave(&intf->seq_lock, flags);
2816 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
2817 check_msg_timeout(intf, &(intf->seq_table[j]),
2818 &timeouts, timeout_period, j,
2820 spin_unlock_irqrestore(&intf->seq_lock, flags);
2822 list_for_each_entry_safe(msg, msg2, &timeouts, link)
2823 handle_msg_timeout(msg);
2825 kref_put(&intf->refcount, intf_free);
2826 spin_lock(&interfaces_lock);
2828 spin_unlock(&interfaces_lock);
2831 static void ipmi_request_event(void)
2836 spin_lock(&interfaces_lock);
2837 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2838 intf = ipmi_interfaces[i];
2839 if (IPMI_INVALID_INTERFACE(intf))
2842 intf->handlers->request_events(intf->send_info);
2844 spin_unlock(&interfaces_lock);
2847 static struct timer_list ipmi_timer;
2849 /* Call every ~100 ms. */
2850 #define IPMI_TIMEOUT_TIME 100
2852 /* How many jiffies does it take to get to the timeout time. */
2853 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
2855 /* Request events from the queue every second (this is the number of
2856 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
2857 future, IPMI will add a way to know immediately if an event is in
2858 the queue and this silliness can go away. */
2859 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
2861 static atomic_t stop_operation;
2862 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2864 static void ipmi_timeout(unsigned long data)
2866 if (atomic_read(&stop_operation))
2870 if (ticks_to_req_ev == 0) {
2871 ipmi_request_event();
2872 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2875 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
2877 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
2881 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
2882 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
2884 /* FIXME - convert these to slabs. */
2885 static void free_smi_msg(struct ipmi_smi_msg *msg)
2887 atomic_dec(&smi_msg_inuse_count);
2891 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
2893 struct ipmi_smi_msg *rv;
2894 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
2896 rv->done = free_smi_msg;
2897 rv->user_data = NULL;
2898 atomic_inc(&smi_msg_inuse_count);
2903 static void free_recv_msg(struct ipmi_recv_msg *msg)
2905 atomic_dec(&recv_msg_inuse_count);
2909 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
2911 struct ipmi_recv_msg *rv;
2913 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
2915 rv->done = free_recv_msg;
2916 atomic_inc(&recv_msg_inuse_count);
2921 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
2924 kref_put(&msg->user->refcount, free_user);
2928 #ifdef CONFIG_IPMI_PANIC_EVENT
2930 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
2934 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
2938 #ifdef CONFIG_IPMI_PANIC_STRING
2939 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2941 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2942 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
2943 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
2944 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2946 /* A get event receiver command, save it. */
2947 intf->event_receiver = msg->msg.data[1];
2948 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
2952 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2954 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2955 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2956 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
2957 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2959 /* A get device id command, save if we are an event
2960 receiver or generator. */
2961 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
2962 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
2967 static void send_panic_events(char *str)
2969 struct kernel_ipmi_msg msg;
2971 unsigned char data[16];
2973 struct ipmi_system_interface_addr *si;
2974 struct ipmi_addr addr;
2975 struct ipmi_smi_msg smi_msg;
2976 struct ipmi_recv_msg recv_msg;
2978 si = (struct ipmi_system_interface_addr *) &addr;
2979 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2980 si->channel = IPMI_BMC_CHANNEL;
2983 /* Fill in an event telling that we have failed. */
2984 msg.netfn = 0x04; /* Sensor or Event. */
2985 msg.cmd = 2; /* Platform event command. */
2988 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
2989 data[1] = 0x03; /* This is for IPMI 1.0. */
2990 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
2991 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
2992 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
2994 /* Put a few breadcrumbs in. Hopefully later we can add more things
2995 to make the panic events more useful. */
3002 smi_msg.done = dummy_smi_done_handler;
3003 recv_msg.done = dummy_recv_done_handler;
3005 /* For every registered interface, send the event. */
3006 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3007 intf = ipmi_interfaces[i];
3008 if (IPMI_INVALID_INTERFACE(intf))
3011 /* Send the event announcing the panic. */
3012 intf->handlers->set_run_to_completion(intf->send_info, 1);
3013 i_ipmi_request(NULL,
3022 intf->channels[0].address,
3023 intf->channels[0].lun,
3024 0, 1); /* Don't retry, and don't wait. */
3027 #ifdef CONFIG_IPMI_PANIC_STRING
3028 /* On every interface, dump a bunch of OEM event holding the
3033 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3035 struct ipmi_ipmb_addr *ipmb;
3038 intf = ipmi_interfaces[i];
3039 if (IPMI_INVALID_INTERFACE(intf))
3042 /* First job here is to figure out where to send the
3043 OEM events. There's no way in IPMI to send OEM
3044 events using an event send command, so we have to
3045 find the SEL to put them in and stick them in
3048 /* Get capabilities from the get device id. */
3049 intf->local_sel_device = 0;
3050 intf->local_event_generator = 0;
3051 intf->event_receiver = 0;
3053 /* Request the device info from the local MC. */
3054 msg.netfn = IPMI_NETFN_APP_REQUEST;
3055 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3058 intf->null_user_handler = device_id_fetcher;
3059 i_ipmi_request(NULL,
3068 intf->channels[0].address,
3069 intf->channels[0].lun,
3070 0, 1); /* Don't retry, and don't wait. */
3072 if (intf->local_event_generator) {
3073 /* Request the event receiver from the local MC. */
3074 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3075 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3078 intf->null_user_handler = event_receiver_fetcher;
3079 i_ipmi_request(NULL,
3088 intf->channels[0].address,
3089 intf->channels[0].lun,
3090 0, 1); /* no retry, and no wait. */
3092 intf->null_user_handler = NULL;
3094 /* Validate the event receiver. The low bit must not
3095 be 1 (it must be a valid IPMB address), it cannot
3096 be zero, and it must not be my address. */
3097 if (((intf->event_receiver & 1) == 0)
3098 && (intf->event_receiver != 0)
3099 && (intf->event_receiver != intf->channels[0].address))
3101 /* The event receiver is valid, send an IPMB
3103 ipmb = (struct ipmi_ipmb_addr *) &addr;
3104 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3105 ipmb->channel = 0; /* FIXME - is this right? */
3106 ipmb->lun = intf->event_receiver_lun;
3107 ipmb->slave_addr = intf->event_receiver;
3108 } else if (intf->local_sel_device) {
3109 /* The event receiver was not valid (or was
3110 me), but I am an SEL device, just dump it
3112 si = (struct ipmi_system_interface_addr *) &addr;
3113 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3114 si->channel = IPMI_BMC_CHANNEL;
3117 continue; /* No where to send the event. */
3120 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3121 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3127 int size = strlen(p);
3133 data[2] = 0xf0; /* OEM event without timestamp. */
3134 data[3] = intf->channels[0].address;
3135 data[4] = j++; /* sequence # */
3136 /* Always give 11 bytes, so strncpy will fill
3137 it with zeroes for me. */
3138 strncpy(data+5, p, 11);
3141 i_ipmi_request(NULL,
3150 intf->channels[0].address,
3151 intf->channels[0].lun,
3152 0, 1); /* no retry, and no wait. */
3155 #endif /* CONFIG_IPMI_PANIC_STRING */
3157 #endif /* CONFIG_IPMI_PANIC_EVENT */
3159 static int has_paniced = 0;
3161 static int panic_event(struct notifier_block *this,
3162 unsigned long event,
3172 /* For every registered interface, set it to run to completion. */
3173 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3174 intf = ipmi_interfaces[i];
3175 if (IPMI_INVALID_INTERFACE(intf))
3178 intf->handlers->set_run_to_completion(intf->send_info, 1);
3181 #ifdef CONFIG_IPMI_PANIC_EVENT
3182 send_panic_events(ptr);
3188 static struct notifier_block panic_block = {
3189 .notifier_call = panic_event,
3191 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3194 static int ipmi_init_msghandler(void)
3201 printk(KERN_INFO "ipmi message handler version "
3202 IPMI_DRIVER_VERSION "\n");
3204 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3205 ipmi_interfaces[i] = NULL;
3207 #ifdef CONFIG_PROC_FS
3208 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3209 if (!proc_ipmi_root) {
3210 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3214 proc_ipmi_root->owner = THIS_MODULE;
3215 #endif /* CONFIG_PROC_FS */
3217 init_timer(&ipmi_timer);
3218 ipmi_timer.data = 0;
3219 ipmi_timer.function = ipmi_timeout;
3220 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3221 add_timer(&ipmi_timer);
3223 notifier_chain_register(&panic_notifier_list, &panic_block);
3230 static __init int ipmi_init_msghandler_mod(void)
3232 ipmi_init_msghandler();
3236 static __exit void cleanup_ipmi(void)
3243 notifier_chain_unregister(&panic_notifier_list, &panic_block);
3245 /* This can't be called if any interfaces exist, so no worry about
3246 shutting down the interfaces. */
3248 /* Tell the timer to stop, then wait for it to stop. This avoids
3249 problems with race conditions removing the timer here. */
3250 atomic_inc(&stop_operation);
3251 del_timer_sync(&ipmi_timer);
3253 #ifdef CONFIG_PROC_FS
3254 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3255 #endif /* CONFIG_PROC_FS */
3259 /* Check for buffer leaks. */
3260 count = atomic_read(&smi_msg_inuse_count);
3262 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3264 count = atomic_read(&recv_msg_inuse_count);
3266 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3269 module_exit(cleanup_ipmi);
3271 module_init(ipmi_init_msghandler_mod);
3272 MODULE_LICENSE("GPL");
3273 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3274 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3275 MODULE_VERSION(IPMI_DRIVER_VERSION);
3277 EXPORT_SYMBOL(ipmi_create_user);
3278 EXPORT_SYMBOL(ipmi_destroy_user);
3279 EXPORT_SYMBOL(ipmi_get_version);
3280 EXPORT_SYMBOL(ipmi_request_settime);
3281 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3282 EXPORT_SYMBOL(ipmi_register_smi);
3283 EXPORT_SYMBOL(ipmi_unregister_smi);
3284 EXPORT_SYMBOL(ipmi_register_for_cmd);
3285 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3286 EXPORT_SYMBOL(ipmi_smi_msg_received);
3287 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3288 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3289 EXPORT_SYMBOL(ipmi_addr_length);
3290 EXPORT_SYMBOL(ipmi_validate_addr);
3291 EXPORT_SYMBOL(ipmi_set_gets_events);
3292 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3293 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3294 EXPORT_SYMBOL(ipmi_set_my_address);
3295 EXPORT_SYMBOL(ipmi_get_my_address);
3296 EXPORT_SYMBOL(ipmi_set_my_LUN);
3297 EXPORT_SYMBOL(ipmi_get_my_LUN);
3298 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3299 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3300 EXPORT_SYMBOL(ipmi_free_recv_msg);