4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/rwsem.h>
42 #include <linux/slab.h>
43 #include <linux/ipmi.h>
44 #include <linux/ipmi_smi.h>
45 #include <linux/notifier.h>
46 #include <linux/init.h>
47 #include <linux/proc_fs.h>
49 #define PFX "IPMI message handler: "
50 #define IPMI_MSGHANDLER_VERSION "v33"
52 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
55 static int initialized = 0;
57 static struct proc_dir_entry *proc_ipmi_root = NULL;
59 #define MAX_EVENTS_IN_QUEUE 25
61 /* Don't let a message sit in a queue forever, always time it with at lest
62 the max message timer. This is in milliseconds. */
63 #define MAX_MSG_TIMEOUT 60000
67 struct list_head link;
69 /* The upper layer that handles receive messages. */
70 struct ipmi_user_hndl *handler;
73 /* The interface this user is bound to. */
76 /* Does this interface receive IPMI events? */
82 struct list_head link;
91 unsigned int inuse : 1;
92 unsigned int broadcast : 1;
94 unsigned long timeout;
95 unsigned long orig_timeout;
96 unsigned int retries_left;
98 /* To verify on an incoming send message response that this is
99 the message that the response is for, we keep a sequence id
100 and increment it every time we send a message. */
103 /* This is held so we can properly respond to the message on a
104 timeout, and it is used to hold the temporary data for
105 retransmission, too. */
106 struct ipmi_recv_msg *recv_msg;
109 /* Store the information in a msgid (long) to allow us to find a
110 sequence table entry from the msgid. */
111 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
113 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
115 seq = ((msgid >> 26) & 0x3f); \
116 seqid = (msgid & 0x3fffff); \
119 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
123 unsigned char medium;
124 unsigned char protocol;
127 struct ipmi_proc_entry
130 struct ipmi_proc_entry *next;
133 #define IPMI_IPMB_NUM_SEQ 64
134 #define IPMI_MAX_CHANNELS 8
137 /* What interface number are we? */
140 /* The list of upper layers that are using me. We read-lock
141 this when delivering messages to the upper layer to keep
142 the user from going away while we are processing the
143 message. This means that you cannot add or delete a user
144 from the receive callback. */
146 struct list_head users;
148 /* Used for wake ups at startup. */
149 wait_queue_head_t waitq;
151 /* The IPMI version of the BMC on the other end. */
152 unsigned char version_major;
153 unsigned char version_minor;
155 /* This is the lower-layer's sender routine. */
156 struct ipmi_smi_handlers *handlers;
159 /* A list of proc entries for this interface. This does not
160 need a lock, only one thread creates it and only one thread
162 struct ipmi_proc_entry *proc_entries;
164 /* A table of sequence numbers for this interface. We use the
165 sequence numbers for IPMB messages that go out of the
166 interface to match them up with their responses. A routine
167 is called periodically to time the items in this list. */
169 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
172 /* Messages that were delayed for some reason (out of memory,
173 for instance), will go in here to be processed later in a
174 periodic timer interrupt. */
175 spinlock_t waiting_msgs_lock;
176 struct list_head waiting_msgs;
178 /* The list of command receivers that are registered for commands
179 on this interface. */
180 rwlock_t cmd_rcvr_lock;
181 struct list_head cmd_rcvrs;
183 /* Events that were queues because no one was there to receive
185 spinlock_t events_lock; /* For dealing with event stuff. */
186 struct list_head waiting_events;
187 unsigned int waiting_events_count; /* How many events in queue? */
189 /* This will be non-null if someone registers to receive all
190 IPMI commands (this is for interface emulation). There
191 may not be any things in the cmd_rcvrs list above when
192 this is registered. */
193 ipmi_user_t all_cmd_rcvr;
195 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
196 but may be changed by the user. */
197 unsigned char my_address;
199 /* My LUN. This should generally stay the SMS LUN, but just in
201 unsigned char my_lun;
203 /* The event receiver for my BMC, only really used at panic
204 shutdown as a place to store this. */
205 unsigned char event_receiver;
206 unsigned char event_receiver_lun;
207 unsigned char local_sel_device;
208 unsigned char local_event_generator;
210 /* A cheap hack, if this is non-null and a message to an
211 interface comes in with a NULL user, call this routine with
212 it. Note that the message will still be freed by the
213 caller. This only works on the system interface. */
214 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_smi_msg *msg);
216 /* When we are scanning the channels for an SMI, this will
217 tell which channel we are scanning. */
220 /* Channel information */
221 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
224 struct proc_dir_entry *proc_dir;
225 char proc_dir_name[10];
227 spinlock_t counter_lock; /* For making counters atomic. */
229 /* Commands we got that were invalid. */
230 unsigned int sent_invalid_commands;
232 /* Commands we sent to the MC. */
233 unsigned int sent_local_commands;
234 /* Responses from the MC that were delivered to a user. */
235 unsigned int handled_local_responses;
236 /* Responses from the MC that were not delivered to a user. */
237 unsigned int unhandled_local_responses;
239 /* Commands we sent out to the IPMB bus. */
240 unsigned int sent_ipmb_commands;
241 /* Commands sent on the IPMB that had errors on the SEND CMD */
242 unsigned int sent_ipmb_command_errs;
243 /* Each retransmit increments this count. */
244 unsigned int retransmitted_ipmb_commands;
245 /* When a message times out (runs out of retransmits) this is
247 unsigned int timed_out_ipmb_commands;
249 /* This is like above, but for broadcasts. Broadcasts are
250 *not* included in the above count (they are expected to
252 unsigned int timed_out_ipmb_broadcasts;
254 /* Responses I have sent to the IPMB bus. */
255 unsigned int sent_ipmb_responses;
257 /* The response was delivered to the user. */
258 unsigned int handled_ipmb_responses;
259 /* The response had invalid data in it. */
260 unsigned int invalid_ipmb_responses;
261 /* The response didn't have anyone waiting for it. */
262 unsigned int unhandled_ipmb_responses;
264 /* Commands we sent out to the IPMB bus. */
265 unsigned int sent_lan_commands;
266 /* Commands sent on the IPMB that had errors on the SEND CMD */
267 unsigned int sent_lan_command_errs;
268 /* Each retransmit increments this count. */
269 unsigned int retransmitted_lan_commands;
270 /* When a message times out (runs out of retransmits) this is
272 unsigned int timed_out_lan_commands;
274 /* Responses I have sent to the IPMB bus. */
275 unsigned int sent_lan_responses;
277 /* The response was delivered to the user. */
278 unsigned int handled_lan_responses;
279 /* The response had invalid data in it. */
280 unsigned int invalid_lan_responses;
281 /* The response didn't have anyone waiting for it. */
282 unsigned int unhandled_lan_responses;
284 /* The command was delivered to the user. */
285 unsigned int handled_commands;
286 /* The command had invalid data in it. */
287 unsigned int invalid_commands;
288 /* The command didn't have anyone waiting for it. */
289 unsigned int unhandled_commands;
291 /* Invalid data in an event. */
292 unsigned int invalid_events;
293 /* Events that were received with the proper format. */
297 #define MAX_IPMI_INTERFACES 4
298 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
300 /* Used to keep interfaces from going away while operations are
301 operating on interfaces. Grab read if you are not modifying the
302 interfaces, write if you are. */
303 static DECLARE_RWSEM(interfaces_sem);
305 /* Directly protects the ipmi_interfaces data structure. This is
306 claimed in the timer interrupt. */
307 static DEFINE_SPINLOCK(interfaces_lock);
309 /* List of watchers that want to know when smi's are added and
311 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
312 static DECLARE_RWSEM(smi_watchers_sem);
314 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
318 down_read(&interfaces_sem);
319 down_write(&smi_watchers_sem);
320 list_add(&(watcher->link), &smi_watchers);
321 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
322 if (ipmi_interfaces[i] != NULL) {
326 up_write(&smi_watchers_sem);
327 up_read(&interfaces_sem);
331 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
333 down_write(&smi_watchers_sem);
334 list_del(&(watcher->link));
335 up_write(&smi_watchers_sem);
340 call_smi_watchers(int i)
342 struct ipmi_smi_watcher *w;
344 down_read(&smi_watchers_sem);
345 list_for_each_entry(w, &smi_watchers, link) {
346 if (try_module_get(w->owner)) {
348 module_put(w->owner);
351 up_read(&smi_watchers_sem);
355 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
357 if (addr1->addr_type != addr2->addr_type)
360 if (addr1->channel != addr2->channel)
363 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
364 struct ipmi_system_interface_addr *smi_addr1
365 = (struct ipmi_system_interface_addr *) addr1;
366 struct ipmi_system_interface_addr *smi_addr2
367 = (struct ipmi_system_interface_addr *) addr2;
368 return (smi_addr1->lun == smi_addr2->lun);
371 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
372 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
374 struct ipmi_ipmb_addr *ipmb_addr1
375 = (struct ipmi_ipmb_addr *) addr1;
376 struct ipmi_ipmb_addr *ipmb_addr2
377 = (struct ipmi_ipmb_addr *) addr2;
379 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
380 && (ipmb_addr1->lun == ipmb_addr2->lun));
383 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
384 struct ipmi_lan_addr *lan_addr1
385 = (struct ipmi_lan_addr *) addr1;
386 struct ipmi_lan_addr *lan_addr2
387 = (struct ipmi_lan_addr *) addr2;
389 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
390 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
391 && (lan_addr1->session_handle
392 == lan_addr2->session_handle)
393 && (lan_addr1->lun == lan_addr2->lun));
399 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
401 if (len < sizeof(struct ipmi_system_interface_addr)) {
405 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
406 if (addr->channel != IPMI_BMC_CHANNEL)
411 if ((addr->channel == IPMI_BMC_CHANNEL)
412 || (addr->channel >= IPMI_NUM_CHANNELS)
413 || (addr->channel < 0))
416 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
417 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
419 if (len < sizeof(struct ipmi_ipmb_addr)) {
425 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
426 if (len < sizeof(struct ipmi_lan_addr)) {
435 unsigned int ipmi_addr_length(int addr_type)
437 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
438 return sizeof(struct ipmi_system_interface_addr);
440 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
441 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
443 return sizeof(struct ipmi_ipmb_addr);
446 if (addr_type == IPMI_LAN_ADDR_TYPE)
447 return sizeof(struct ipmi_lan_addr);
452 static void deliver_response(struct ipmi_recv_msg *msg)
454 msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data);
457 /* Find the next sequence number not being used and add the given
458 message with the given timeout to the sequence table. This must be
459 called with the interface's seq_lock held. */
460 static int intf_next_seq(ipmi_smi_t intf,
461 struct ipmi_recv_msg *recv_msg,
462 unsigned long timeout,
471 for (i=intf->curr_seq;
472 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
473 i=(i+1)%IPMI_IPMB_NUM_SEQ)
475 if (! intf->seq_table[i].inuse)
479 if (! intf->seq_table[i].inuse) {
480 intf->seq_table[i].recv_msg = recv_msg;
482 /* Start with the maximum timeout, when the send response
483 comes in we will start the real timer. */
484 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
485 intf->seq_table[i].orig_timeout = timeout;
486 intf->seq_table[i].retries_left = retries;
487 intf->seq_table[i].broadcast = broadcast;
488 intf->seq_table[i].inuse = 1;
489 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
491 *seqid = intf->seq_table[i].seqid;
492 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
500 /* Return the receive message for the given sequence number and
501 release the sequence number so it can be reused. Some other data
502 is passed in to be sure the message matches up correctly (to help
503 guard against message coming in after their timeout and the
504 sequence number being reused). */
505 static int intf_find_seq(ipmi_smi_t intf,
510 struct ipmi_addr *addr,
511 struct ipmi_recv_msg **recv_msg)
516 if (seq >= IPMI_IPMB_NUM_SEQ)
519 spin_lock_irqsave(&(intf->seq_lock), flags);
520 if (intf->seq_table[seq].inuse) {
521 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
523 if ((msg->addr.channel == channel)
524 && (msg->msg.cmd == cmd)
525 && (msg->msg.netfn == netfn)
526 && (ipmi_addr_equal(addr, &(msg->addr))))
529 intf->seq_table[seq].inuse = 0;
533 spin_unlock_irqrestore(&(intf->seq_lock), flags);
539 /* Start the timer for a specific sequence table entry. */
540 static int intf_start_seq_timer(ipmi_smi_t intf,
549 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
551 spin_lock_irqsave(&(intf->seq_lock), flags);
552 /* We do this verification because the user can be deleted
553 while a message is outstanding. */
554 if ((intf->seq_table[seq].inuse)
555 && (intf->seq_table[seq].seqid == seqid))
557 struct seq_table *ent = &(intf->seq_table[seq]);
558 ent->timeout = ent->orig_timeout;
561 spin_unlock_irqrestore(&(intf->seq_lock), flags);
566 /* Got an error for the send message for a specific sequence number. */
567 static int intf_err_seq(ipmi_smi_t intf,
575 struct ipmi_recv_msg *msg = NULL;
578 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
580 spin_lock_irqsave(&(intf->seq_lock), flags);
581 /* We do this verification because the user can be deleted
582 while a message is outstanding. */
583 if ((intf->seq_table[seq].inuse)
584 && (intf->seq_table[seq].seqid == seqid))
586 struct seq_table *ent = &(intf->seq_table[seq]);
592 spin_unlock_irqrestore(&(intf->seq_lock), flags);
595 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
596 msg->msg_data[0] = err;
597 msg->msg.netfn |= 1; /* Convert to a response. */
598 msg->msg.data_len = 1;
599 msg->msg.data = msg->msg_data;
600 deliver_response(msg);
607 int ipmi_create_user(unsigned int if_num,
608 struct ipmi_user_hndl *handler,
613 ipmi_user_t new_user;
617 /* There is no module usecount here, because it's not
618 required. Since this can only be used by and called from
619 other modules, they will implicitly use this module, and
620 thus this can't be removed unless the other modules are
626 /* Make sure the driver is actually initialized, this handles
627 problems with initialization order. */
629 rv = ipmi_init_msghandler();
633 /* The init code doesn't return an error if it was turned
634 off, but it won't initialize. Check that. */
639 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
643 down_read(&interfaces_sem);
644 if ((if_num > MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL)
650 intf = ipmi_interfaces[if_num];
652 new_user->handler = handler;
653 new_user->handler_data = handler_data;
654 new_user->intf = intf;
655 new_user->gets_events = 0;
657 if (!try_module_get(intf->handlers->owner)) {
662 if (intf->handlers->inc_usecount) {
663 rv = intf->handlers->inc_usecount(intf->send_info);
665 module_put(intf->handlers->owner);
670 write_lock_irqsave(&intf->users_lock, flags);
671 list_add_tail(&new_user->link, &intf->users);
672 write_unlock_irqrestore(&intf->users_lock, flags);
681 up_read(&interfaces_sem);
685 static int ipmi_destroy_user_nolock(ipmi_user_t user)
689 struct cmd_rcvr *rcvr, *rcvr2;
693 /* Find the user and delete them from the list. */
694 list_for_each_entry(t_user, &(user->intf->users), link) {
695 if (t_user == user) {
696 list_del(&t_user->link);
706 /* Remove the user from the interfaces sequence table. */
707 spin_lock_irqsave(&(user->intf->seq_lock), flags);
708 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
709 if (user->intf->seq_table[i].inuse
710 && (user->intf->seq_table[i].recv_msg->user == user))
712 user->intf->seq_table[i].inuse = 0;
715 spin_unlock_irqrestore(&(user->intf->seq_lock), flags);
717 /* Remove the user from the command receiver's table. */
718 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
719 list_for_each_entry_safe(rcvr, rcvr2, &(user->intf->cmd_rcvrs), link) {
720 if (rcvr->user == user) {
721 list_del(&rcvr->link);
725 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
734 int ipmi_destroy_user(ipmi_user_t user)
737 ipmi_smi_t intf = user->intf;
740 down_read(&interfaces_sem);
741 write_lock_irqsave(&intf->users_lock, flags);
742 rv = ipmi_destroy_user_nolock(user);
744 module_put(intf->handlers->owner);
745 if (intf->handlers->dec_usecount)
746 intf->handlers->dec_usecount(intf->send_info);
749 write_unlock_irqrestore(&intf->users_lock, flags);
750 up_read(&interfaces_sem);
754 void ipmi_get_version(ipmi_user_t user,
755 unsigned char *major,
756 unsigned char *minor)
758 *major = user->intf->version_major;
759 *minor = user->intf->version_minor;
762 void ipmi_set_my_address(ipmi_user_t user,
763 unsigned char address)
765 user->intf->my_address = address;
768 unsigned char ipmi_get_my_address(ipmi_user_t user)
770 return user->intf->my_address;
773 void ipmi_set_my_LUN(ipmi_user_t user,
776 user->intf->my_lun = LUN & 0x3;
779 unsigned char ipmi_get_my_LUN(ipmi_user_t user)
781 return user->intf->my_lun;
784 int ipmi_set_gets_events(ipmi_user_t user, int val)
787 struct ipmi_recv_msg *msg, *msg2;
789 read_lock(&(user->intf->users_lock));
790 spin_lock_irqsave(&(user->intf->events_lock), flags);
791 user->gets_events = val;
794 /* Deliver any queued events. */
795 list_for_each_entry_safe(msg, msg2, &(user->intf->waiting_events), link) {
796 list_del(&msg->link);
798 deliver_response(msg);
802 spin_unlock_irqrestore(&(user->intf->events_lock), flags);
803 read_unlock(&(user->intf->users_lock));
808 int ipmi_register_for_cmd(ipmi_user_t user,
812 struct cmd_rcvr *cmp;
814 struct cmd_rcvr *rcvr;
818 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
822 read_lock(&(user->intf->users_lock));
823 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
824 if (user->intf->all_cmd_rcvr != NULL) {
829 /* Make sure the command/netfn is not already registered. */
830 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) {
831 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
841 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
844 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
845 read_unlock(&(user->intf->users_lock));
853 int ipmi_unregister_for_cmd(ipmi_user_t user,
858 struct cmd_rcvr *rcvr;
861 read_lock(&(user->intf->users_lock));
862 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
863 /* Make sure the command/netfn is not already registered. */
864 list_for_each_entry(rcvr, &(user->intf->cmd_rcvrs), link) {
865 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
867 list_del(&rcvr->link);
872 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
873 read_unlock(&(user->intf->users_lock));
878 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
880 user->intf->handlers->set_run_to_completion(user->intf->send_info,
885 ipmb_checksum(unsigned char *data, int size)
887 unsigned char csum = 0;
889 for (; size > 0; size--, data++)
895 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
896 struct kernel_ipmi_msg *msg,
897 struct ipmi_ipmb_addr *ipmb_addr,
899 unsigned char ipmb_seq,
901 unsigned char source_address,
902 unsigned char source_lun)
906 /* Format the IPMB header data. */
907 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
908 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
909 smi_msg->data[2] = ipmb_addr->channel;
911 smi_msg->data[3] = 0;
912 smi_msg->data[i+3] = ipmb_addr->slave_addr;
913 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
914 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
915 smi_msg->data[i+6] = source_address;
916 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
917 smi_msg->data[i+8] = msg->cmd;
919 /* Now tack on the data to the message. */
920 if (msg->data_len > 0)
921 memcpy(&(smi_msg->data[i+9]), msg->data,
923 smi_msg->data_size = msg->data_len + 9;
925 /* Now calculate the checksum and tack it on. */
926 smi_msg->data[i+smi_msg->data_size]
927 = ipmb_checksum(&(smi_msg->data[i+6]),
928 smi_msg->data_size-6);
930 /* Add on the checksum size and the offset from the
932 smi_msg->data_size += 1 + i;
934 smi_msg->msgid = msgid;
937 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
938 struct kernel_ipmi_msg *msg,
939 struct ipmi_lan_addr *lan_addr,
941 unsigned char ipmb_seq,
942 unsigned char source_lun)
944 /* Format the IPMB header data. */
945 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
946 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
947 smi_msg->data[2] = lan_addr->channel;
948 smi_msg->data[3] = lan_addr->session_handle;
949 smi_msg->data[4] = lan_addr->remote_SWID;
950 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
951 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
952 smi_msg->data[7] = lan_addr->local_SWID;
953 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
954 smi_msg->data[9] = msg->cmd;
956 /* Now tack on the data to the message. */
957 if (msg->data_len > 0)
958 memcpy(&(smi_msg->data[10]), msg->data,
960 smi_msg->data_size = msg->data_len + 10;
962 /* Now calculate the checksum and tack it on. */
963 smi_msg->data[smi_msg->data_size]
964 = ipmb_checksum(&(smi_msg->data[7]),
965 smi_msg->data_size-7);
967 /* Add on the checksum size and the offset from the
969 smi_msg->data_size += 1;
971 smi_msg->msgid = msgid;
974 /* Separate from ipmi_request so that the user does not have to be
975 supplied in certain circumstances (mainly at panic time). If
976 messages are supplied, they will be freed, even if an error
978 static inline int i_ipmi_request(ipmi_user_t user,
980 struct ipmi_addr *addr,
982 struct kernel_ipmi_msg *msg,
985 struct ipmi_recv_msg *supplied_recv,
987 unsigned char source_address,
988 unsigned char source_lun,
990 unsigned int retry_time_ms)
993 struct ipmi_smi_msg *smi_msg;
994 struct ipmi_recv_msg *recv_msg;
999 recv_msg = supplied_recv;
1001 recv_msg = ipmi_alloc_recv_msg();
1002 if (recv_msg == NULL) {
1006 recv_msg->user_msg_data = user_msg_data;
1009 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1011 smi_msg = ipmi_alloc_smi_msg();
1012 if (smi_msg == NULL) {
1013 ipmi_free_recv_msg(recv_msg);
1018 recv_msg->user = user;
1019 recv_msg->msgid = msgid;
1020 /* Store the message to send in the receive message so timeout
1021 responses can get the proper response data. */
1022 recv_msg->msg = *msg;
1024 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1025 struct ipmi_system_interface_addr *smi_addr;
1027 if (msg->netfn & 1) {
1028 /* Responses are not allowed to the SMI. */
1033 smi_addr = (struct ipmi_system_interface_addr *) addr;
1034 if (smi_addr->lun > 3) {
1035 spin_lock_irqsave(&intf->counter_lock, flags);
1036 intf->sent_invalid_commands++;
1037 spin_unlock_irqrestore(&intf->counter_lock, flags);
1042 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1044 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1045 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1046 || (msg->cmd == IPMI_GET_MSG_CMD)
1047 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1049 /* We don't let the user do these, since we manage
1050 the sequence numbers. */
1051 spin_lock_irqsave(&intf->counter_lock, flags);
1052 intf->sent_invalid_commands++;
1053 spin_unlock_irqrestore(&intf->counter_lock, flags);
1058 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1059 spin_lock_irqsave(&intf->counter_lock, flags);
1060 intf->sent_invalid_commands++;
1061 spin_unlock_irqrestore(&intf->counter_lock, flags);
1066 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1067 smi_msg->data[1] = msg->cmd;
1068 smi_msg->msgid = msgid;
1069 smi_msg->user_data = recv_msg;
1070 if (msg->data_len > 0)
1071 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1072 smi_msg->data_size = msg->data_len + 2;
1073 spin_lock_irqsave(&intf->counter_lock, flags);
1074 intf->sent_local_commands++;
1075 spin_unlock_irqrestore(&intf->counter_lock, flags);
1076 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1077 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1079 struct ipmi_ipmb_addr *ipmb_addr;
1080 unsigned char ipmb_seq;
1084 if (addr->channel > IPMI_NUM_CHANNELS) {
1085 spin_lock_irqsave(&intf->counter_lock, flags);
1086 intf->sent_invalid_commands++;
1087 spin_unlock_irqrestore(&intf->counter_lock, flags);
1092 if (intf->channels[addr->channel].medium
1093 != IPMI_CHANNEL_MEDIUM_IPMB)
1095 spin_lock_irqsave(&intf->counter_lock, flags);
1096 intf->sent_invalid_commands++;
1097 spin_unlock_irqrestore(&intf->counter_lock, flags);
1103 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1104 retries = 0; /* Don't retry broadcasts. */
1108 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1109 /* Broadcasts add a zero at the beginning of the
1110 message, but otherwise is the same as an IPMB
1112 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1117 /* Default to 1 second retries. */
1118 if (retry_time_ms == 0)
1119 retry_time_ms = 1000;
1121 /* 9 for the header and 1 for the checksum, plus
1122 possibly one for the broadcast. */
1123 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1124 spin_lock_irqsave(&intf->counter_lock, flags);
1125 intf->sent_invalid_commands++;
1126 spin_unlock_irqrestore(&intf->counter_lock, flags);
1131 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1132 if (ipmb_addr->lun > 3) {
1133 spin_lock_irqsave(&intf->counter_lock, flags);
1134 intf->sent_invalid_commands++;
1135 spin_unlock_irqrestore(&intf->counter_lock, flags);
1140 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1142 if (recv_msg->msg.netfn & 0x1) {
1143 /* It's a response, so use the user's sequence
1145 spin_lock_irqsave(&intf->counter_lock, flags);
1146 intf->sent_ipmb_responses++;
1147 spin_unlock_irqrestore(&intf->counter_lock, flags);
1148 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1150 source_address, source_lun);
1152 /* Save the receive message so we can use it
1153 to deliver the response. */
1154 smi_msg->user_data = recv_msg;
1156 /* It's a command, so get a sequence for it. */
1158 spin_lock_irqsave(&(intf->seq_lock), flags);
1160 spin_lock(&intf->counter_lock);
1161 intf->sent_ipmb_commands++;
1162 spin_unlock(&intf->counter_lock);
1164 /* Create a sequence number with a 1 second
1165 timeout and 4 retries. */
1166 rv = intf_next_seq(intf,
1174 /* We have used up all the sequence numbers,
1175 probably, so abort. */
1176 spin_unlock_irqrestore(&(intf->seq_lock),
1181 /* Store the sequence number in the message,
1182 so that when the send message response
1183 comes back we can start the timer. */
1184 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1185 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1186 ipmb_seq, broadcast,
1187 source_address, source_lun);
1189 /* Copy the message into the recv message data, so we
1190 can retransmit it later if necessary. */
1191 memcpy(recv_msg->msg_data, smi_msg->data,
1192 smi_msg->data_size);
1193 recv_msg->msg.data = recv_msg->msg_data;
1194 recv_msg->msg.data_len = smi_msg->data_size;
1196 /* We don't unlock until here, because we need
1197 to copy the completed message into the
1198 recv_msg before we release the lock.
1199 Otherwise, race conditions may bite us. I
1200 know that's pretty paranoid, but I prefer
1202 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1204 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1205 struct ipmi_lan_addr *lan_addr;
1206 unsigned char ipmb_seq;
1209 if (addr->channel > IPMI_NUM_CHANNELS) {
1210 spin_lock_irqsave(&intf->counter_lock, flags);
1211 intf->sent_invalid_commands++;
1212 spin_unlock_irqrestore(&intf->counter_lock, flags);
1217 if ((intf->channels[addr->channel].medium
1218 != IPMI_CHANNEL_MEDIUM_8023LAN)
1219 && (intf->channels[addr->channel].medium
1220 != IPMI_CHANNEL_MEDIUM_ASYNC))
1222 spin_lock_irqsave(&intf->counter_lock, flags);
1223 intf->sent_invalid_commands++;
1224 spin_unlock_irqrestore(&intf->counter_lock, flags);
1231 /* Default to 1 second retries. */
1232 if (retry_time_ms == 0)
1233 retry_time_ms = 1000;
1235 /* 11 for the header and 1 for the checksum. */
1236 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1237 spin_lock_irqsave(&intf->counter_lock, flags);
1238 intf->sent_invalid_commands++;
1239 spin_unlock_irqrestore(&intf->counter_lock, flags);
1244 lan_addr = (struct ipmi_lan_addr *) addr;
1245 if (lan_addr->lun > 3) {
1246 spin_lock_irqsave(&intf->counter_lock, flags);
1247 intf->sent_invalid_commands++;
1248 spin_unlock_irqrestore(&intf->counter_lock, flags);
1253 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1255 if (recv_msg->msg.netfn & 0x1) {
1256 /* It's a response, so use the user's sequence
1258 spin_lock_irqsave(&intf->counter_lock, flags);
1259 intf->sent_lan_responses++;
1260 spin_unlock_irqrestore(&intf->counter_lock, flags);
1261 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1264 /* Save the receive message so we can use it
1265 to deliver the response. */
1266 smi_msg->user_data = recv_msg;
1268 /* It's a command, so get a sequence for it. */
1270 spin_lock_irqsave(&(intf->seq_lock), flags);
1272 spin_lock(&intf->counter_lock);
1273 intf->sent_lan_commands++;
1274 spin_unlock(&intf->counter_lock);
1276 /* Create a sequence number with a 1 second
1277 timeout and 4 retries. */
1278 rv = intf_next_seq(intf,
1286 /* We have used up all the sequence numbers,
1287 probably, so abort. */
1288 spin_unlock_irqrestore(&(intf->seq_lock),
1293 /* Store the sequence number in the message,
1294 so that when the send message response
1295 comes back we can start the timer. */
1296 format_lan_msg(smi_msg, msg, lan_addr,
1297 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1298 ipmb_seq, source_lun);
1300 /* Copy the message into the recv message data, so we
1301 can retransmit it later if necessary. */
1302 memcpy(recv_msg->msg_data, smi_msg->data,
1303 smi_msg->data_size);
1304 recv_msg->msg.data = recv_msg->msg_data;
1305 recv_msg->msg.data_len = smi_msg->data_size;
1307 /* We don't unlock until here, because we need
1308 to copy the completed message into the
1309 recv_msg before we release the lock.
1310 Otherwise, race conditions may bite us. I
1311 know that's pretty paranoid, but I prefer
1313 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1316 /* Unknown address type. */
1317 spin_lock_irqsave(&intf->counter_lock, flags);
1318 intf->sent_invalid_commands++;
1319 spin_unlock_irqrestore(&intf->counter_lock, flags);
1327 for (m=0; m<smi_msg->data_size; m++)
1328 printk(" %2.2x", smi_msg->data[m]);
1332 intf->handlers->sender(intf->send_info, smi_msg, priority);
1337 ipmi_free_smi_msg(smi_msg);
1338 ipmi_free_recv_msg(recv_msg);
1342 int ipmi_request_settime(ipmi_user_t user,
1343 struct ipmi_addr *addr,
1345 struct kernel_ipmi_msg *msg,
1346 void *user_msg_data,
1349 unsigned int retry_time_ms)
1351 return i_ipmi_request(user,
1359 user->intf->my_address,
1365 int ipmi_request_supply_msgs(ipmi_user_t user,
1366 struct ipmi_addr *addr,
1368 struct kernel_ipmi_msg *msg,
1369 void *user_msg_data,
1371 struct ipmi_recv_msg *supplied_recv,
1374 return i_ipmi_request(user,
1383 user->intf->my_address,
1388 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1389 int count, int *eof, void *data)
1391 char *out = (char *) page;
1392 ipmi_smi_t intf = data;
1394 return sprintf(out, "%x\n", intf->my_address);
1397 static int version_file_read_proc(char *page, char **start, off_t off,
1398 int count, int *eof, void *data)
1400 char *out = (char *) page;
1401 ipmi_smi_t intf = data;
1403 return sprintf(out, "%d.%d\n",
1404 intf->version_major, intf->version_minor);
1407 static int stat_file_read_proc(char *page, char **start, off_t off,
1408 int count, int *eof, void *data)
1410 char *out = (char *) page;
1411 ipmi_smi_t intf = data;
1413 out += sprintf(out, "sent_invalid_commands: %d\n",
1414 intf->sent_invalid_commands);
1415 out += sprintf(out, "sent_local_commands: %d\n",
1416 intf->sent_local_commands);
1417 out += sprintf(out, "handled_local_responses: %d\n",
1418 intf->handled_local_responses);
1419 out += sprintf(out, "unhandled_local_responses: %d\n",
1420 intf->unhandled_local_responses);
1421 out += sprintf(out, "sent_ipmb_commands: %d\n",
1422 intf->sent_ipmb_commands);
1423 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1424 intf->sent_ipmb_command_errs);
1425 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1426 intf->retransmitted_ipmb_commands);
1427 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1428 intf->timed_out_ipmb_commands);
1429 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1430 intf->timed_out_ipmb_broadcasts);
1431 out += sprintf(out, "sent_ipmb_responses: %d\n",
1432 intf->sent_ipmb_responses);
1433 out += sprintf(out, "handled_ipmb_responses: %d\n",
1434 intf->handled_ipmb_responses);
1435 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1436 intf->invalid_ipmb_responses);
1437 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1438 intf->unhandled_ipmb_responses);
1439 out += sprintf(out, "sent_lan_commands: %d\n",
1440 intf->sent_lan_commands);
1441 out += sprintf(out, "sent_lan_command_errs: %d\n",
1442 intf->sent_lan_command_errs);
1443 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1444 intf->retransmitted_lan_commands);
1445 out += sprintf(out, "timed_out_lan_commands: %d\n",
1446 intf->timed_out_lan_commands);
1447 out += sprintf(out, "sent_lan_responses: %d\n",
1448 intf->sent_lan_responses);
1449 out += sprintf(out, "handled_lan_responses: %d\n",
1450 intf->handled_lan_responses);
1451 out += sprintf(out, "invalid_lan_responses: %d\n",
1452 intf->invalid_lan_responses);
1453 out += sprintf(out, "unhandled_lan_responses: %d\n",
1454 intf->unhandled_lan_responses);
1455 out += sprintf(out, "handled_commands: %d\n",
1456 intf->handled_commands);
1457 out += sprintf(out, "invalid_commands: %d\n",
1458 intf->invalid_commands);
1459 out += sprintf(out, "unhandled_commands: %d\n",
1460 intf->unhandled_commands);
1461 out += sprintf(out, "invalid_events: %d\n",
1462 intf->invalid_events);
1463 out += sprintf(out, "events: %d\n",
1466 return (out - ((char *) page));
1469 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1470 read_proc_t *read_proc, write_proc_t *write_proc,
1471 void *data, struct module *owner)
1473 struct proc_dir_entry *file;
1475 struct ipmi_proc_entry *entry;
1477 /* Create a list element. */
1478 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1481 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1486 strcpy(entry->name, name);
1488 file = create_proc_entry(name, 0, smi->proc_dir);
1496 file->read_proc = read_proc;
1497 file->write_proc = write_proc;
1498 file->owner = owner;
1500 /* Stick it on the list. */
1501 entry->next = smi->proc_entries;
1502 smi->proc_entries = entry;
1508 static int add_proc_entries(ipmi_smi_t smi, int num)
1512 sprintf(smi->proc_dir_name, "%d", num);
1513 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1517 smi->proc_dir->owner = THIS_MODULE;
1521 rv = ipmi_smi_add_proc_entry(smi, "stats",
1522 stat_file_read_proc, NULL,
1526 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1527 ipmb_file_read_proc, NULL,
1531 rv = ipmi_smi_add_proc_entry(smi, "version",
1532 version_file_read_proc, NULL,
1538 static void remove_proc_entries(ipmi_smi_t smi)
1540 struct ipmi_proc_entry *entry;
1542 while (smi->proc_entries) {
1543 entry = smi->proc_entries;
1544 smi->proc_entries = entry->next;
1546 remove_proc_entry(entry->name, smi->proc_dir);
1550 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1554 send_channel_info_cmd(ipmi_smi_t intf, int chan)
1556 struct kernel_ipmi_msg msg;
1557 unsigned char data[1];
1558 struct ipmi_system_interface_addr si;
1560 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1561 si.channel = IPMI_BMC_CHANNEL;
1564 msg.netfn = IPMI_NETFN_APP_REQUEST;
1565 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
1569 return i_ipmi_request(NULL,
1571 (struct ipmi_addr *) &si,
1584 channel_handler(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
1589 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2))
1590 && (msg->rsp[1] == IPMI_GET_CHANNEL_INFO_CMD))
1592 /* It's the one we want */
1593 if (msg->rsp[2] != 0) {
1594 /* Got an error from the channel, just go on. */
1596 if (msg->rsp[2] == IPMI_INVALID_COMMAND_ERR) {
1597 /* If the MC does not support this
1598 command, that is legal. We just
1599 assume it has one IPMB at channel
1601 intf->channels[0].medium
1602 = IPMI_CHANNEL_MEDIUM_IPMB;
1603 intf->channels[0].protocol
1604 = IPMI_CHANNEL_PROTOCOL_IPMB;
1607 intf->curr_channel = IPMI_MAX_CHANNELS;
1608 wake_up(&intf->waitq);
1613 if (msg->rsp_size < 6) {
1614 /* Message not big enough, just go on. */
1617 chan = intf->curr_channel;
1618 intf->channels[chan].medium = msg->rsp[4] & 0x7f;
1619 intf->channels[chan].protocol = msg->rsp[5] & 0x1f;
1622 intf->curr_channel++;
1623 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
1624 wake_up(&intf->waitq);
1626 rv = send_channel_info_cmd(intf, intf->curr_channel);
1629 /* Got an error somehow, just give up. */
1630 intf->curr_channel = IPMI_MAX_CHANNELS;
1631 wake_up(&intf->waitq);
1633 printk(KERN_WARNING PFX
1634 "Error sending channel information: %d\n",
1642 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1644 unsigned char version_major,
1645 unsigned char version_minor,
1646 unsigned char slave_addr,
1651 ipmi_smi_t new_intf;
1652 unsigned long flags;
1655 /* Make sure the driver is actually initialized, this handles
1656 problems with initialization order. */
1658 rv = ipmi_init_msghandler();
1661 /* The init code doesn't return an error if it was turned
1662 off, but it won't initialize. Check that. */
1667 new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL);
1670 memset(new_intf, 0, sizeof(*new_intf));
1672 new_intf->proc_dir = NULL;
1676 down_write(&interfaces_sem);
1677 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1678 if (ipmi_interfaces[i] == NULL) {
1679 new_intf->intf_num = i;
1680 new_intf->version_major = version_major;
1681 new_intf->version_minor = version_minor;
1682 if (slave_addr == 0)
1683 new_intf->my_address = IPMI_BMC_SLAVE_ADDR;
1685 new_intf->my_address = slave_addr;
1686 new_intf->my_lun = 2; /* the SMS LUN. */
1687 rwlock_init(&(new_intf->users_lock));
1688 INIT_LIST_HEAD(&(new_intf->users));
1689 new_intf->handlers = handlers;
1690 new_intf->send_info = send_info;
1691 spin_lock_init(&(new_intf->seq_lock));
1692 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
1693 new_intf->seq_table[j].inuse = 0;
1694 new_intf->seq_table[j].seqid = 0;
1696 new_intf->curr_seq = 0;
1697 spin_lock_init(&(new_intf->waiting_msgs_lock));
1698 INIT_LIST_HEAD(&(new_intf->waiting_msgs));
1699 spin_lock_init(&(new_intf->events_lock));
1700 INIT_LIST_HEAD(&(new_intf->waiting_events));
1701 new_intf->waiting_events_count = 0;
1702 rwlock_init(&(new_intf->cmd_rcvr_lock));
1703 init_waitqueue_head(&new_intf->waitq);
1704 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
1705 new_intf->all_cmd_rcvr = NULL;
1707 spin_lock_init(&(new_intf->counter_lock));
1709 spin_lock_irqsave(&interfaces_lock, flags);
1710 ipmi_interfaces[i] = new_intf;
1711 spin_unlock_irqrestore(&interfaces_lock, flags);
1719 downgrade_write(&interfaces_sem);
1722 rv = add_proc_entries(*intf, i);
1725 if ((version_major > 1)
1726 || ((version_major == 1) && (version_minor >= 5)))
1728 /* Start scanning the channels to see what is
1730 (*intf)->null_user_handler = channel_handler;
1731 (*intf)->curr_channel = 0;
1732 rv = send_channel_info_cmd(*intf, 0);
1736 /* Wait for the channel info to be read. */
1737 up_read(&interfaces_sem);
1738 wait_event((*intf)->waitq,
1739 ((*intf)->curr_channel>=IPMI_MAX_CHANNELS));
1740 down_read(&interfaces_sem);
1742 if (ipmi_interfaces[i] != new_intf)
1743 /* Well, it went away. Just return. */
1746 /* Assume a single IPMB channel at zero. */
1747 (*intf)->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
1748 (*intf)->channels[0].protocol
1749 = IPMI_CHANNEL_PROTOCOL_IPMB;
1752 /* Call all the watcher interfaces to tell
1753 them that a new interface is available. */
1754 call_smi_watchers(i);
1758 up_read(&interfaces_sem);
1761 if (new_intf->proc_dir)
1762 remove_proc_entries(new_intf);
1769 static void free_recv_msg_list(struct list_head *q)
1771 struct ipmi_recv_msg *msg, *msg2;
1773 list_for_each_entry_safe(msg, msg2, q, link) {
1774 list_del(&msg->link);
1775 ipmi_free_recv_msg(msg);
1779 static void free_cmd_rcvr_list(struct list_head *q)
1781 struct cmd_rcvr *rcvr, *rcvr2;
1783 list_for_each_entry_safe(rcvr, rcvr2, q, link) {
1784 list_del(&rcvr->link);
1789 static void clean_up_interface_data(ipmi_smi_t intf)
1793 free_recv_msg_list(&(intf->waiting_msgs));
1794 free_recv_msg_list(&(intf->waiting_events));
1795 free_cmd_rcvr_list(&(intf->cmd_rcvrs));
1797 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
1798 if ((intf->seq_table[i].inuse)
1799 && (intf->seq_table[i].recv_msg))
1801 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1806 int ipmi_unregister_smi(ipmi_smi_t intf)
1810 struct ipmi_smi_watcher *w;
1811 unsigned long flags;
1813 down_write(&interfaces_sem);
1814 if (list_empty(&(intf->users)))
1816 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1817 if (ipmi_interfaces[i] == intf) {
1818 remove_proc_entries(intf);
1819 spin_lock_irqsave(&interfaces_lock, flags);
1820 ipmi_interfaces[i] = NULL;
1821 clean_up_interface_data(intf);
1822 spin_unlock_irqrestore(&interfaces_lock,flags);
1825 goto out_call_watcher;
1831 up_write(&interfaces_sem);
1836 downgrade_write(&interfaces_sem);
1838 /* Call all the watcher interfaces to tell them that
1839 an interface is gone. */
1840 down_read(&smi_watchers_sem);
1841 list_for_each_entry(w, &smi_watchers, link) {
1844 up_read(&smi_watchers_sem);
1845 up_read(&interfaces_sem);
1849 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
1850 struct ipmi_smi_msg *msg)
1852 struct ipmi_ipmb_addr ipmb_addr;
1853 struct ipmi_recv_msg *recv_msg;
1854 unsigned long flags;
1857 /* This is 11, not 10, because the response must contain a
1858 * completion code. */
1859 if (msg->rsp_size < 11) {
1860 /* Message not big enough, just ignore it. */
1861 spin_lock_irqsave(&intf->counter_lock, flags);
1862 intf->invalid_ipmb_responses++;
1863 spin_unlock_irqrestore(&intf->counter_lock, flags);
1867 if (msg->rsp[2] != 0) {
1868 /* An error getting the response, just ignore it. */
1872 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
1873 ipmb_addr.slave_addr = msg->rsp[6];
1874 ipmb_addr.channel = msg->rsp[3] & 0x0f;
1875 ipmb_addr.lun = msg->rsp[7] & 3;
1877 /* It's a response from a remote entity. Look up the sequence
1878 number and handle the response. */
1879 if (intf_find_seq(intf,
1883 (msg->rsp[4] >> 2) & (~1),
1884 (struct ipmi_addr *) &(ipmb_addr),
1887 /* We were unable to find the sequence number,
1888 so just nuke the message. */
1889 spin_lock_irqsave(&intf->counter_lock, flags);
1890 intf->unhandled_ipmb_responses++;
1891 spin_unlock_irqrestore(&intf->counter_lock, flags);
1895 memcpy(recv_msg->msg_data,
1898 /* THe other fields matched, so no need to set them, except
1899 for netfn, which needs to be the response that was
1900 returned, not the request value. */
1901 recv_msg->msg.netfn = msg->rsp[4] >> 2;
1902 recv_msg->msg.data = recv_msg->msg_data;
1903 recv_msg->msg.data_len = msg->rsp_size - 10;
1904 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
1905 spin_lock_irqsave(&intf->counter_lock, flags);
1906 intf->handled_ipmb_responses++;
1907 spin_unlock_irqrestore(&intf->counter_lock, flags);
1908 deliver_response(recv_msg);
1913 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1914 struct ipmi_smi_msg *msg)
1916 struct cmd_rcvr *rcvr;
1918 unsigned char netfn;
1920 ipmi_user_t user = NULL;
1921 struct ipmi_ipmb_addr *ipmb_addr;
1922 struct ipmi_recv_msg *recv_msg;
1923 unsigned long flags;
1925 if (msg->rsp_size < 10) {
1926 /* Message not big enough, just ignore it. */
1927 spin_lock_irqsave(&intf->counter_lock, flags);
1928 intf->invalid_commands++;
1929 spin_unlock_irqrestore(&intf->counter_lock, flags);
1933 if (msg->rsp[2] != 0) {
1934 /* An error getting the response, just ignore it. */
1938 netfn = msg->rsp[4] >> 2;
1941 read_lock(&(intf->cmd_rcvr_lock));
1943 if (intf->all_cmd_rcvr) {
1944 user = intf->all_cmd_rcvr;
1946 /* Find the command/netfn. */
1947 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
1948 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
1954 read_unlock(&(intf->cmd_rcvr_lock));
1957 /* We didn't find a user, deliver an error response. */
1958 spin_lock_irqsave(&intf->counter_lock, flags);
1959 intf->unhandled_commands++;
1960 spin_unlock_irqrestore(&intf->counter_lock, flags);
1962 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1963 msg->data[1] = IPMI_SEND_MSG_CMD;
1964 msg->data[2] = msg->rsp[3];
1965 msg->data[3] = msg->rsp[6];
1966 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
1967 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
1968 msg->data[6] = intf->my_address;
1970 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
1971 msg->data[8] = msg->rsp[8]; /* cmd */
1972 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
1973 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
1974 msg->data_size = 11;
1979 printk("Invalid command:");
1980 for (m=0; m<msg->data_size; m++)
1981 printk(" %2.2x", msg->data[m]);
1985 intf->handlers->sender(intf->send_info, msg, 0);
1987 rv = -1; /* We used the message, so return the value that
1988 causes it to not be freed or queued. */
1990 /* Deliver the message to the user. */
1991 spin_lock_irqsave(&intf->counter_lock, flags);
1992 intf->handled_commands++;
1993 spin_unlock_irqrestore(&intf->counter_lock, flags);
1995 recv_msg = ipmi_alloc_recv_msg();
1997 /* We couldn't allocate memory for the
1998 message, so requeue it for handling
2002 /* Extract the source address from the data. */
2003 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2004 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2005 ipmb_addr->slave_addr = msg->rsp[6];
2006 ipmb_addr->lun = msg->rsp[7] & 3;
2007 ipmb_addr->channel = msg->rsp[3] & 0xf;
2009 /* Extract the rest of the message information
2010 from the IPMB header.*/
2011 recv_msg->user = user;
2012 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2013 recv_msg->msgid = msg->rsp[7] >> 2;
2014 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2015 recv_msg->msg.cmd = msg->rsp[8];
2016 recv_msg->msg.data = recv_msg->msg_data;
2018 /* We chop off 10, not 9 bytes because the checksum
2019 at the end also needs to be removed. */
2020 recv_msg->msg.data_len = msg->rsp_size - 10;
2021 memcpy(recv_msg->msg_data,
2023 msg->rsp_size - 10);
2024 deliver_response(recv_msg);
2031 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2032 struct ipmi_smi_msg *msg)
2034 struct ipmi_lan_addr lan_addr;
2035 struct ipmi_recv_msg *recv_msg;
2036 unsigned long flags;
2039 /* This is 13, not 12, because the response must contain a
2040 * completion code. */
2041 if (msg->rsp_size < 13) {
2042 /* Message not big enough, just ignore it. */
2043 spin_lock_irqsave(&intf->counter_lock, flags);
2044 intf->invalid_lan_responses++;
2045 spin_unlock_irqrestore(&intf->counter_lock, flags);
2049 if (msg->rsp[2] != 0) {
2050 /* An error getting the response, just ignore it. */
2054 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2055 lan_addr.session_handle = msg->rsp[4];
2056 lan_addr.remote_SWID = msg->rsp[8];
2057 lan_addr.local_SWID = msg->rsp[5];
2058 lan_addr.channel = msg->rsp[3] & 0x0f;
2059 lan_addr.privilege = msg->rsp[3] >> 4;
2060 lan_addr.lun = msg->rsp[9] & 3;
2062 /* It's a response from a remote entity. Look up the sequence
2063 number and handle the response. */
2064 if (intf_find_seq(intf,
2068 (msg->rsp[6] >> 2) & (~1),
2069 (struct ipmi_addr *) &(lan_addr),
2072 /* We were unable to find the sequence number,
2073 so just nuke the message. */
2074 spin_lock_irqsave(&intf->counter_lock, flags);
2075 intf->unhandled_lan_responses++;
2076 spin_unlock_irqrestore(&intf->counter_lock, flags);
2080 memcpy(recv_msg->msg_data,
2082 msg->rsp_size - 11);
2083 /* The other fields matched, so no need to set them, except
2084 for netfn, which needs to be the response that was
2085 returned, not the request value. */
2086 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2087 recv_msg->msg.data = recv_msg->msg_data;
2088 recv_msg->msg.data_len = msg->rsp_size - 12;
2089 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2090 spin_lock_irqsave(&intf->counter_lock, flags);
2091 intf->handled_lan_responses++;
2092 spin_unlock_irqrestore(&intf->counter_lock, flags);
2093 deliver_response(recv_msg);
2098 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2099 struct ipmi_smi_msg *msg)
2101 struct cmd_rcvr *rcvr;
2103 unsigned char netfn;
2105 ipmi_user_t user = NULL;
2106 struct ipmi_lan_addr *lan_addr;
2107 struct ipmi_recv_msg *recv_msg;
2108 unsigned long flags;
2110 if (msg->rsp_size < 12) {
2111 /* Message not big enough, just ignore it. */
2112 spin_lock_irqsave(&intf->counter_lock, flags);
2113 intf->invalid_commands++;
2114 spin_unlock_irqrestore(&intf->counter_lock, flags);
2118 if (msg->rsp[2] != 0) {
2119 /* An error getting the response, just ignore it. */
2123 netfn = msg->rsp[6] >> 2;
2126 read_lock(&(intf->cmd_rcvr_lock));
2128 if (intf->all_cmd_rcvr) {
2129 user = intf->all_cmd_rcvr;
2131 /* Find the command/netfn. */
2132 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
2133 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
2139 read_unlock(&(intf->cmd_rcvr_lock));
2142 /* We didn't find a user, deliver an error response. */
2143 spin_lock_irqsave(&intf->counter_lock, flags);
2144 intf->unhandled_commands++;
2145 spin_unlock_irqrestore(&intf->counter_lock, flags);
2147 rv = 0; /* Don't do anything with these messages, just
2148 allow them to be freed. */
2150 /* Deliver the message to the user. */
2151 spin_lock_irqsave(&intf->counter_lock, flags);
2152 intf->handled_commands++;
2153 spin_unlock_irqrestore(&intf->counter_lock, flags);
2155 recv_msg = ipmi_alloc_recv_msg();
2157 /* We couldn't allocate memory for the
2158 message, so requeue it for handling
2162 /* Extract the source address from the data. */
2163 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2164 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2165 lan_addr->session_handle = msg->rsp[4];
2166 lan_addr->remote_SWID = msg->rsp[8];
2167 lan_addr->local_SWID = msg->rsp[5];
2168 lan_addr->lun = msg->rsp[9] & 3;
2169 lan_addr->channel = msg->rsp[3] & 0xf;
2170 lan_addr->privilege = msg->rsp[3] >> 4;
2172 /* Extract the rest of the message information
2173 from the IPMB header.*/
2174 recv_msg->user = user;
2175 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2176 recv_msg->msgid = msg->rsp[9] >> 2;
2177 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2178 recv_msg->msg.cmd = msg->rsp[10];
2179 recv_msg->msg.data = recv_msg->msg_data;
2181 /* We chop off 12, not 11 bytes because the checksum
2182 at the end also needs to be removed. */
2183 recv_msg->msg.data_len = msg->rsp_size - 12;
2184 memcpy(recv_msg->msg_data,
2186 msg->rsp_size - 12);
2187 deliver_response(recv_msg);
2194 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2195 struct ipmi_smi_msg *msg)
2197 struct ipmi_system_interface_addr *smi_addr;
2199 recv_msg->msgid = 0;
2200 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2201 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2202 smi_addr->channel = IPMI_BMC_CHANNEL;
2203 smi_addr->lun = msg->rsp[0] & 3;
2204 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2205 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2206 recv_msg->msg.cmd = msg->rsp[1];
2207 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2208 recv_msg->msg.data = recv_msg->msg_data;
2209 recv_msg->msg.data_len = msg->rsp_size - 3;
2212 /* This will be called with the intf->users_lock read-locked, so no need
2214 static int handle_read_event_rsp(ipmi_smi_t intf,
2215 struct ipmi_smi_msg *msg)
2217 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2218 struct list_head msgs;
2221 int deliver_count = 0;
2222 unsigned long flags;
2224 if (msg->rsp_size < 19) {
2225 /* Message is too small to be an IPMB event. */
2226 spin_lock_irqsave(&intf->counter_lock, flags);
2227 intf->invalid_events++;
2228 spin_unlock_irqrestore(&intf->counter_lock, flags);
2232 if (msg->rsp[2] != 0) {
2233 /* An error getting the event, just ignore it. */
2237 INIT_LIST_HEAD(&msgs);
2239 spin_lock_irqsave(&(intf->events_lock), flags);
2241 spin_lock(&intf->counter_lock);
2243 spin_unlock(&intf->counter_lock);
2245 /* Allocate and fill in one message for every user that is getting
2247 list_for_each_entry(user, &(intf->users), link) {
2248 if (! user->gets_events)
2251 recv_msg = ipmi_alloc_recv_msg();
2253 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2254 list_del(&recv_msg->link);
2255 ipmi_free_recv_msg(recv_msg);
2257 /* We couldn't allocate memory for the
2258 message, so requeue it for handling
2266 copy_event_into_recv_msg(recv_msg, msg);
2267 recv_msg->user = user;
2268 list_add_tail(&(recv_msg->link), &msgs);
2271 if (deliver_count) {
2272 /* Now deliver all the messages. */
2273 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2274 list_del(&recv_msg->link);
2275 deliver_response(recv_msg);
2277 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2278 /* No one to receive the message, put it in queue if there's
2279 not already too many things in the queue. */
2280 recv_msg = ipmi_alloc_recv_msg();
2282 /* We couldn't allocate memory for the
2283 message, so requeue it for handling
2289 copy_event_into_recv_msg(recv_msg, msg);
2290 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2292 /* There's too many things in the queue, discard this
2294 printk(KERN_WARNING PFX "Event queue full, discarding an"
2295 " incoming event\n");
2299 spin_unlock_irqrestore(&(intf->events_lock), flags);
2304 static int handle_bmc_rsp(ipmi_smi_t intf,
2305 struct ipmi_smi_msg *msg)
2307 struct ipmi_recv_msg *recv_msg;
2309 struct ipmi_user *user;
2310 unsigned long flags;
2312 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2314 /* Make sure the user still exists. */
2315 list_for_each_entry(user, &(intf->users), link) {
2316 if (user == recv_msg->user) {
2317 /* Found it, so we can deliver it */
2324 /* Special handling for NULL users. */
2325 if (!recv_msg->user && intf->null_user_handler){
2326 intf->null_user_handler(intf, msg);
2327 spin_lock_irqsave(&intf->counter_lock, flags);
2328 intf->handled_local_responses++;
2329 spin_unlock_irqrestore(&intf->counter_lock, flags);
2331 /* The user for the message went away, so give up. */
2332 spin_lock_irqsave(&intf->counter_lock, flags);
2333 intf->unhandled_local_responses++;
2334 spin_unlock_irqrestore(&intf->counter_lock, flags);
2336 ipmi_free_recv_msg(recv_msg);
2338 struct ipmi_system_interface_addr *smi_addr;
2340 spin_lock_irqsave(&intf->counter_lock, flags);
2341 intf->handled_local_responses++;
2342 spin_unlock_irqrestore(&intf->counter_lock, flags);
2343 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2344 recv_msg->msgid = msg->msgid;
2345 smi_addr = ((struct ipmi_system_interface_addr *)
2347 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2348 smi_addr->channel = IPMI_BMC_CHANNEL;
2349 smi_addr->lun = msg->rsp[0] & 3;
2350 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2351 recv_msg->msg.cmd = msg->rsp[1];
2352 memcpy(recv_msg->msg_data,
2355 recv_msg->msg.data = recv_msg->msg_data;
2356 recv_msg->msg.data_len = msg->rsp_size - 2;
2357 deliver_response(recv_msg);
2363 /* Handle a new message. Return 1 if the message should be requeued,
2364 0 if the message should be freed, or -1 if the message should not
2365 be freed or requeued. */
2366 static int handle_new_recv_msg(ipmi_smi_t intf,
2367 struct ipmi_smi_msg *msg)
2375 for (m=0; m<msg->rsp_size; m++)
2376 printk(" %2.2x", msg->rsp[m]);
2379 if (msg->rsp_size < 2) {
2380 /* Message is too small to be correct. */
2381 printk(KERN_WARNING PFX "BMC returned to small a message"
2382 " for netfn %x cmd %x, got %d bytes\n",
2383 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
2385 /* Generate an error response for the message. */
2386 msg->rsp[0] = msg->data[0] | (1 << 2);
2387 msg->rsp[1] = msg->data[1];
2388 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2390 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
2391 || (msg->rsp[1] != msg->data[1])) /* Command */
2393 /* The response is not even marginally correct. */
2394 printk(KERN_WARNING PFX "BMC returned incorrect response,"
2395 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
2396 (msg->data[0] >> 2) | 1, msg->data[1],
2397 msg->rsp[0] >> 2, msg->rsp[1]);
2399 /* Generate an error response for the message. */
2400 msg->rsp[0] = msg->data[0] | (1 << 2);
2401 msg->rsp[1] = msg->data[1];
2402 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2406 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2407 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
2408 && (msg->user_data != NULL))
2410 /* It's a response to a response we sent. For this we
2411 deliver a send message response to the user. */
2412 struct ipmi_recv_msg *recv_msg = msg->user_data;
2415 if (msg->rsp_size < 2)
2416 /* Message is too small to be correct. */
2419 chan = msg->data[2] & 0x0f;
2420 if (chan >= IPMI_MAX_CHANNELS)
2421 /* Invalid channel number */
2425 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
2426 recv_msg->msg.data = recv_msg->msg_data;
2427 recv_msg->msg.data_len = 1;
2428 recv_msg->msg_data[0] = msg->rsp[2];
2429 deliver_response(recv_msg);
2431 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2432 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
2434 /* It's from the receive queue. */
2435 chan = msg->rsp[3] & 0xf;
2436 if (chan >= IPMI_MAX_CHANNELS) {
2437 /* Invalid channel number */
2442 switch (intf->channels[chan].medium) {
2443 case IPMI_CHANNEL_MEDIUM_IPMB:
2444 if (msg->rsp[4] & 0x04) {
2445 /* It's a response, so find the
2446 requesting message and send it up. */
2447 requeue = handle_ipmb_get_msg_rsp(intf, msg);
2449 /* It's a command to the SMS from some other
2450 entity. Handle that. */
2451 requeue = handle_ipmb_get_msg_cmd(intf, msg);
2455 case IPMI_CHANNEL_MEDIUM_8023LAN:
2456 case IPMI_CHANNEL_MEDIUM_ASYNC:
2457 if (msg->rsp[6] & 0x04) {
2458 /* It's a response, so find the
2459 requesting message and send it up. */
2460 requeue = handle_lan_get_msg_rsp(intf, msg);
2462 /* It's a command to the SMS from some other
2463 entity. Handle that. */
2464 requeue = handle_lan_get_msg_cmd(intf, msg);
2469 /* We don't handle the channel type, so just
2470 * free the message. */
2474 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2475 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
2477 /* It's an asyncronous event. */
2478 requeue = handle_read_event_rsp(intf, msg);
2480 /* It's a response from the local BMC. */
2481 requeue = handle_bmc_rsp(intf, msg);
2488 /* Handle a new message from the lower layer. */
2489 void ipmi_smi_msg_received(ipmi_smi_t intf,
2490 struct ipmi_smi_msg *msg)
2492 unsigned long flags;
2496 /* Lock the user lock so the user can't go away while we are
2498 read_lock(&(intf->users_lock));
2500 if ((msg->data_size >= 2)
2501 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2502 && (msg->data[1] == IPMI_SEND_MSG_CMD)
2503 && (msg->user_data == NULL)) {
2504 /* This is the local response to a command send, start
2505 the timer for these. The user_data will not be
2506 NULL if this is a response send, and we will let
2507 response sends just go through. */
2509 /* Check for errors, if we get certain errors (ones
2510 that mean basically we can try again later), we
2511 ignore them and start the timer. Otherwise we
2512 report the error immediately. */
2513 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
2514 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
2515 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
2517 int chan = msg->rsp[3] & 0xf;
2519 /* Got an error sending the message, handle it. */
2520 spin_lock_irqsave(&intf->counter_lock, flags);
2521 if (chan >= IPMI_MAX_CHANNELS)
2522 ; /* This shouldn't happen */
2523 else if ((intf->channels[chan].medium
2524 == IPMI_CHANNEL_MEDIUM_8023LAN)
2525 || (intf->channels[chan].medium
2526 == IPMI_CHANNEL_MEDIUM_ASYNC))
2527 intf->sent_lan_command_errs++;
2529 intf->sent_ipmb_command_errs++;
2530 spin_unlock_irqrestore(&intf->counter_lock, flags);
2531 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
2533 /* The message was sent, start the timer. */
2534 intf_start_seq_timer(intf, msg->msgid);
2537 ipmi_free_smi_msg(msg);
2541 /* To preserve message order, if the list is not empty, we
2542 tack this message onto the end of the list. */
2543 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2544 if (!list_empty(&(intf->waiting_msgs))) {
2545 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2546 spin_unlock(&(intf->waiting_msgs_lock));
2549 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2551 rv = handle_new_recv_msg(intf, msg);
2553 /* Could not handle the message now, just add it to a
2554 list to handle later. */
2555 spin_lock(&(intf->waiting_msgs_lock));
2556 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2557 spin_unlock(&(intf->waiting_msgs_lock));
2558 } else if (rv == 0) {
2559 ipmi_free_smi_msg(msg);
2563 read_unlock(&(intf->users_lock));
2566 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
2570 read_lock(&(intf->users_lock));
2571 list_for_each_entry(user, &(intf->users), link) {
2572 if (! user->handler->ipmi_watchdog_pretimeout)
2575 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
2577 read_unlock(&(intf->users_lock));
2581 handle_msg_timeout(struct ipmi_recv_msg *msg)
2583 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2584 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
2585 msg->msg.netfn |= 1; /* Convert to a response. */
2586 msg->msg.data_len = 1;
2587 msg->msg.data = msg->msg_data;
2588 deliver_response(msg);
2591 static struct ipmi_smi_msg *
2592 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2593 unsigned char seq, long seqid)
2595 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
2597 /* If we can't allocate the message, then just return, we
2598 get 4 retries, so this should be ok. */
2601 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
2602 smi_msg->data_size = recv_msg->msg.data_len;
2603 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
2609 for (m=0; m<smi_msg->data_size; m++)
2610 printk(" %2.2x", smi_msg->data[m]);
2618 ipmi_timeout_handler(long timeout_period)
2621 struct list_head timeouts;
2622 struct ipmi_recv_msg *msg, *msg2;
2623 struct ipmi_smi_msg *smi_msg, *smi_msg2;
2624 unsigned long flags;
2627 INIT_LIST_HEAD(&timeouts);
2629 spin_lock(&interfaces_lock);
2630 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2631 intf = ipmi_interfaces[i];
2635 read_lock(&(intf->users_lock));
2637 /* See if any waiting messages need to be processed. */
2638 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2639 list_for_each_entry_safe(smi_msg, smi_msg2, &(intf->waiting_msgs), link) {
2640 if (! handle_new_recv_msg(intf, smi_msg)) {
2641 list_del(&smi_msg->link);
2642 ipmi_free_smi_msg(smi_msg);
2644 /* To preserve message order, quit if we
2645 can't handle a message. */
2649 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2651 /* Go through the seq table and find any messages that
2652 have timed out, putting them in the timeouts
2654 spin_lock_irqsave(&(intf->seq_lock), flags);
2655 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
2656 struct seq_table *ent = &(intf->seq_table[j]);
2660 ent->timeout -= timeout_period;
2661 if (ent->timeout > 0)
2664 if (ent->retries_left == 0) {
2665 /* The message has used all its retries. */
2667 msg = ent->recv_msg;
2668 list_add_tail(&(msg->link), &timeouts);
2669 spin_lock(&intf->counter_lock);
2671 intf->timed_out_ipmb_broadcasts++;
2672 else if (ent->recv_msg->addr.addr_type
2673 == IPMI_LAN_ADDR_TYPE)
2674 intf->timed_out_lan_commands++;
2676 intf->timed_out_ipmb_commands++;
2677 spin_unlock(&intf->counter_lock);
2679 struct ipmi_smi_msg *smi_msg;
2680 /* More retries, send again. */
2682 /* Start with the max timer, set to normal
2683 timer after the message is sent. */
2684 ent->timeout = MAX_MSG_TIMEOUT;
2685 ent->retries_left--;
2686 spin_lock(&intf->counter_lock);
2687 if (ent->recv_msg->addr.addr_type
2688 == IPMI_LAN_ADDR_TYPE)
2689 intf->retransmitted_lan_commands++;
2691 intf->retransmitted_ipmb_commands++;
2692 spin_unlock(&intf->counter_lock);
2693 smi_msg = smi_from_recv_msg(intf,
2694 ent->recv_msg, j, ent->seqid);
2698 spin_unlock_irqrestore(&(intf->seq_lock),flags);
2699 /* Send the new message. We send with a zero
2700 * priority. It timed out, I doubt time is
2701 * that critical now, and high priority
2702 * messages are really only for messages to the
2703 * local MC, which don't get resent. */
2704 intf->handlers->sender(intf->send_info,
2706 spin_lock_irqsave(&(intf->seq_lock), flags);
2709 spin_unlock_irqrestore(&(intf->seq_lock), flags);
2711 list_for_each_entry_safe(msg, msg2, &timeouts, link) {
2712 handle_msg_timeout(msg);
2715 read_unlock(&(intf->users_lock));
2717 spin_unlock(&interfaces_lock);
2720 static void ipmi_request_event(void)
2725 spin_lock(&interfaces_lock);
2726 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2727 intf = ipmi_interfaces[i];
2731 intf->handlers->request_events(intf->send_info);
2733 spin_unlock(&interfaces_lock);
2736 static struct timer_list ipmi_timer;
2738 /* Call every ~100 ms. */
2739 #define IPMI_TIMEOUT_TIME 100
2741 /* How many jiffies does it take to get to the timeout time. */
2742 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
2744 /* Request events from the queue every second (this is the number of
2745 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
2746 future, IPMI will add a way to know immediately if an event is in
2747 the queue and this silliness can go away. */
2748 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
2750 static volatile int stop_operation = 0;
2751 static volatile int timer_stopped = 0;
2752 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2754 static void ipmi_timeout(unsigned long data)
2756 if (stop_operation) {
2762 if (ticks_to_req_ev == 0) {
2763 ipmi_request_event();
2764 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2767 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
2769 ipmi_timer.expires += IPMI_TIMEOUT_JIFFIES;
2770 add_timer(&ipmi_timer);
2774 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
2775 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
2777 /* FIXME - convert these to slabs. */
2778 static void free_smi_msg(struct ipmi_smi_msg *msg)
2780 atomic_dec(&smi_msg_inuse_count);
2784 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
2786 struct ipmi_smi_msg *rv;
2787 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
2789 rv->done = free_smi_msg;
2790 rv->user_data = NULL;
2791 atomic_inc(&smi_msg_inuse_count);
2796 static void free_recv_msg(struct ipmi_recv_msg *msg)
2798 atomic_dec(&recv_msg_inuse_count);
2802 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
2804 struct ipmi_recv_msg *rv;
2806 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
2808 rv->done = free_recv_msg;
2809 atomic_inc(&recv_msg_inuse_count);
2814 #ifdef CONFIG_IPMI_PANIC_EVENT
2816 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
2820 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
2824 #ifdef CONFIG_IPMI_PANIC_STRING
2825 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
2827 if ((msg->rsp[0] == (IPMI_NETFN_SENSOR_EVENT_RESPONSE << 2))
2828 && (msg->rsp[1] == IPMI_GET_EVENT_RECEIVER_CMD)
2829 && (msg->rsp[2] == IPMI_CC_NO_ERROR))
2831 /* A get event receiver command, save it. */
2832 intf->event_receiver = msg->rsp[3];
2833 intf->event_receiver_lun = msg->rsp[4] & 0x3;
2837 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
2839 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2))
2840 && (msg->rsp[1] == IPMI_GET_DEVICE_ID_CMD)
2841 && (msg->rsp[2] == IPMI_CC_NO_ERROR))
2843 /* A get device id command, save if we are an event
2844 receiver or generator. */
2845 intf->local_sel_device = (msg->rsp[8] >> 2) & 1;
2846 intf->local_event_generator = (msg->rsp[8] >> 5) & 1;
2851 static void send_panic_events(char *str)
2853 struct kernel_ipmi_msg msg;
2855 unsigned char data[16];
2857 struct ipmi_system_interface_addr *si;
2858 struct ipmi_addr addr;
2859 struct ipmi_smi_msg smi_msg;
2860 struct ipmi_recv_msg recv_msg;
2862 si = (struct ipmi_system_interface_addr *) &addr;
2863 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2864 si->channel = IPMI_BMC_CHANNEL;
2867 /* Fill in an event telling that we have failed. */
2868 msg.netfn = 0x04; /* Sensor or Event. */
2869 msg.cmd = 2; /* Platform event command. */
2872 data[0] = 0x21; /* Kernel generator ID, IPMI table 5-4 */
2873 data[1] = 0x03; /* This is for IPMI 1.0. */
2874 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
2875 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
2876 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
2878 /* Put a few breadcrumbs in. Hopefully later we can add more things
2879 to make the panic events more useful. */
2886 smi_msg.done = dummy_smi_done_handler;
2887 recv_msg.done = dummy_recv_done_handler;
2889 /* For every registered interface, send the event. */
2890 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2891 intf = ipmi_interfaces[i];
2895 /* Send the event announcing the panic. */
2896 intf->handlers->set_run_to_completion(intf->send_info, 1);
2897 i_ipmi_request(NULL,
2908 0, 1); /* Don't retry, and don't wait. */
2911 #ifdef CONFIG_IPMI_PANIC_STRING
2912 /* On every interface, dump a bunch of OEM event holding the
2917 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2919 struct ipmi_ipmb_addr *ipmb;
2922 intf = ipmi_interfaces[i];
2926 /* First job here is to figure out where to send the
2927 OEM events. There's no way in IPMI to send OEM
2928 events using an event send command, so we have to
2929 find the SEL to put them in and stick them in
2932 /* Get capabilities from the get device id. */
2933 intf->local_sel_device = 0;
2934 intf->local_event_generator = 0;
2935 intf->event_receiver = 0;
2937 /* Request the device info from the local MC. */
2938 msg.netfn = IPMI_NETFN_APP_REQUEST;
2939 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2942 intf->null_user_handler = device_id_fetcher;
2943 i_ipmi_request(NULL,
2954 0, 1); /* Don't retry, and don't wait. */
2956 if (intf->local_event_generator) {
2957 /* Request the event receiver from the local MC. */
2958 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
2959 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
2962 intf->null_user_handler = event_receiver_fetcher;
2963 i_ipmi_request(NULL,
2974 0, 1); /* no retry, and no wait. */
2976 intf->null_user_handler = NULL;
2978 /* Validate the event receiver. The low bit must not
2979 be 1 (it must be a valid IPMB address), it cannot
2980 be zero, and it must not be my address. */
2981 if (((intf->event_receiver & 1) == 0)
2982 && (intf->event_receiver != 0)
2983 && (intf->event_receiver != intf->my_address))
2985 /* The event receiver is valid, send an IPMB
2987 ipmb = (struct ipmi_ipmb_addr *) &addr;
2988 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
2989 ipmb->channel = 0; /* FIXME - is this right? */
2990 ipmb->lun = intf->event_receiver_lun;
2991 ipmb->slave_addr = intf->event_receiver;
2992 } else if (intf->local_sel_device) {
2993 /* The event receiver was not valid (or was
2994 me), but I am an SEL device, just dump it
2996 si = (struct ipmi_system_interface_addr *) &addr;
2997 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2998 si->channel = IPMI_BMC_CHANNEL;
3001 continue; /* No where to send the event. */
3004 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3005 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3011 int size = strlen(p);
3017 data[2] = 0xf0; /* OEM event without timestamp. */
3018 data[3] = intf->my_address;
3019 data[4] = j++; /* sequence # */
3020 /* Always give 11 bytes, so strncpy will fill
3021 it with zeroes for me. */
3022 strncpy(data+5, p, 11);
3025 i_ipmi_request(NULL,
3036 0, 1); /* no retry, and no wait. */
3039 #endif /* CONFIG_IPMI_PANIC_STRING */
3041 #endif /* CONFIG_IPMI_PANIC_EVENT */
3043 static int has_paniced = 0;
3045 static int panic_event(struct notifier_block *this,
3046 unsigned long event,
3056 /* For every registered interface, set it to run to completion. */
3057 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
3058 intf = ipmi_interfaces[i];
3062 intf->handlers->set_run_to_completion(intf->send_info, 1);
3065 #ifdef CONFIG_IPMI_PANIC_EVENT
3066 send_panic_events(ptr);
3072 static struct notifier_block panic_block = {
3073 .notifier_call = panic_event,
3075 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3078 static int ipmi_init_msghandler(void)
3085 printk(KERN_INFO "ipmi message handler version "
3086 IPMI_MSGHANDLER_VERSION "\n");
3088 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
3089 ipmi_interfaces[i] = NULL;
3092 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3093 if (!proc_ipmi_root) {
3094 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3098 proc_ipmi_root->owner = THIS_MODULE;
3100 init_timer(&ipmi_timer);
3101 ipmi_timer.data = 0;
3102 ipmi_timer.function = ipmi_timeout;
3103 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3104 add_timer(&ipmi_timer);
3106 notifier_chain_register(&panic_notifier_list, &panic_block);
3113 static __init int ipmi_init_msghandler_mod(void)
3115 ipmi_init_msghandler();
3119 static __exit void cleanup_ipmi(void)
3126 notifier_chain_unregister(&panic_notifier_list, &panic_block);
3128 /* This can't be called if any interfaces exist, so no worry about
3129 shutting down the interfaces. */
3131 /* Tell the timer to stop, then wait for it to stop. This avoids
3132 problems with race conditions removing the timer here. */
3134 while (!timer_stopped) {
3135 set_current_state(TASK_UNINTERRUPTIBLE);
3136 schedule_timeout(1);
3139 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3143 /* Check for buffer leaks. */
3144 count = atomic_read(&smi_msg_inuse_count);
3146 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3148 count = atomic_read(&recv_msg_inuse_count);
3150 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3153 module_exit(cleanup_ipmi);
3155 module_init(ipmi_init_msghandler_mod);
3156 MODULE_LICENSE("GPL");
3158 EXPORT_SYMBOL(ipmi_create_user);
3159 EXPORT_SYMBOL(ipmi_destroy_user);
3160 EXPORT_SYMBOL(ipmi_get_version);
3161 EXPORT_SYMBOL(ipmi_request_settime);
3162 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3163 EXPORT_SYMBOL(ipmi_register_smi);
3164 EXPORT_SYMBOL(ipmi_unregister_smi);
3165 EXPORT_SYMBOL(ipmi_register_for_cmd);
3166 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3167 EXPORT_SYMBOL(ipmi_smi_msg_received);
3168 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3169 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3170 EXPORT_SYMBOL(ipmi_addr_length);
3171 EXPORT_SYMBOL(ipmi_validate_addr);
3172 EXPORT_SYMBOL(ipmi_set_gets_events);
3173 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3174 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3175 EXPORT_SYMBOL(ipmi_set_my_address);
3176 EXPORT_SYMBOL(ipmi_get_my_address);
3177 EXPORT_SYMBOL(ipmi_set_my_LUN);
3178 EXPORT_SYMBOL(ipmi_get_my_LUN);
3179 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3180 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);