4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/kthread.h>
57 #ifdef CONFIG_HIGH_RES_TIMERS
58 #include <linux/hrtime.h>
59 # if defined(schedule_next_int)
60 /* Old high-res timer code, do translations. */
61 # define get_arch_cycles(a) quick_update_jiffies_sub(a)
62 # define arch_cycles_per_jiffy cycles_per_jiffies
64 static inline void add_usec_to_timer(struct timer_list *t, long v)
66 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
67 while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
70 t->arch_cycle_expires -= arch_cycles_per_jiffy;
74 #include <linux/interrupt.h>
75 #include <linux/rcupdate.h>
76 #include <linux/ipmi_smi.h>
78 #include "ipmi_si_sm.h"
79 #include <linux/init.h>
80 #include <linux/dmi.h>
82 /* Measure times between events in the driver. */
85 /* Call every 10 ms. */
86 #define SI_TIMEOUT_TIME_USEC 10000
87 #define SI_USEC_PER_JIFFY (1000000/HZ)
88 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
89 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
97 SI_CLEARING_FLAGS_THEN_SET_IRQ,
99 SI_ENABLE_INTERRUPTS1,
100 SI_ENABLE_INTERRUPTS2
101 /* FIXME - add watchdog stuff. */
104 /* Some BT-specific defines we need here. */
105 #define IPMI_BT_INTMASK_REG 2
106 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
107 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
110 SI_KCS, SI_SMIC, SI_BT
113 struct ipmi_device_id {
114 unsigned char device_id;
115 unsigned char device_revision;
116 unsigned char firmware_revision_1;
117 unsigned char firmware_revision_2;
118 unsigned char ipmi_version;
119 unsigned char additional_device_support;
120 unsigned char manufacturer_id[3];
121 unsigned char product_id[2];
122 unsigned char aux_firmware_revision[4];
123 } __attribute__((packed));
125 #define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
126 #define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
132 struct si_sm_data *si_sm;
133 struct si_sm_handlers *handlers;
134 enum si_type si_type;
137 struct list_head xmit_msgs;
138 struct list_head hp_xmit_msgs;
139 struct ipmi_smi_msg *curr_msg;
140 enum si_intf_state si_state;
142 /* Used to handle the various types of I/O that can occur with
145 int (*io_setup)(struct smi_info *info);
146 void (*io_cleanup)(struct smi_info *info);
147 int (*irq_setup)(struct smi_info *info);
148 void (*irq_cleanup)(struct smi_info *info);
149 unsigned int io_size;
151 /* Per-OEM handler, called from handle_flags().
152 Returns 1 when handle_flags() needs to be re-run
153 or 0 indicating it set si_state itself.
155 int (*oem_data_avail_handler)(struct smi_info *smi_info);
157 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
158 is set to hold the flags until we are done handling everything
160 #define RECEIVE_MSG_AVAIL 0x01
161 #define EVENT_MSG_BUFFER_FULL 0x02
162 #define WDT_PRE_TIMEOUT_INT 0x08
163 #define OEM0_DATA_AVAIL 0x20
164 #define OEM1_DATA_AVAIL 0x40
165 #define OEM2_DATA_AVAIL 0x80
166 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
169 unsigned char msg_flags;
171 /* If set to true, this will request events the next time the
172 state machine is idle. */
175 /* If true, run the state machine to completion on every send
176 call. Generally used after a panic to make sure stuff goes
178 int run_to_completion;
180 /* The I/O port of an SI interface. */
183 /* The space between start addresses of the two ports. For
184 instance, if the first port is 0xca2 and the spacing is 4, then
185 the second port is 0xca6. */
186 unsigned int spacing;
188 /* zero if no irq; */
191 /* The timer for this si. */
192 struct timer_list si_timer;
194 /* The time (in jiffies) the last timeout occurred at. */
195 unsigned long last_timeout_jiffies;
197 /* Used to gracefully stop the timer without race conditions. */
198 atomic_t stop_operation;
200 /* The driver will disable interrupts when it gets into a
201 situation where it cannot handle messages due to lack of
202 memory. Once that situation clears up, it will re-enable
204 int interrupt_disabled;
206 struct ipmi_device_id device_id;
208 /* Slave address, could be reported from DMI. */
209 unsigned char slave_addr;
211 /* Counters and things for the proc filesystem. */
212 spinlock_t count_lock;
213 unsigned long short_timeouts;
214 unsigned long long_timeouts;
215 unsigned long timeout_restarts;
217 unsigned long interrupts;
218 unsigned long attentions;
219 unsigned long flag_fetches;
220 unsigned long hosed_count;
221 unsigned long complete_transactions;
222 unsigned long events;
223 unsigned long watchdog_pretimeouts;
224 unsigned long incoming_messages;
226 struct task_struct *thread;
229 static struct notifier_block *xaction_notifier_list;
230 static int register_xaction_notifier(struct notifier_block * nb)
232 return notifier_chain_register(&xaction_notifier_list, nb);
235 static void si_restart_short_timer(struct smi_info *smi_info);
237 static void deliver_recv_msg(struct smi_info *smi_info,
238 struct ipmi_smi_msg *msg)
240 /* Deliver the message to the upper layer with the lock
242 spin_unlock(&(smi_info->si_lock));
243 ipmi_smi_msg_received(smi_info->intf, msg);
244 spin_lock(&(smi_info->si_lock));
247 static void return_hosed_msg(struct smi_info *smi_info)
249 struct ipmi_smi_msg *msg = smi_info->curr_msg;
251 /* Make it a reponse */
252 msg->rsp[0] = msg->data[0] | 4;
253 msg->rsp[1] = msg->data[1];
254 msg->rsp[2] = 0xFF; /* Unknown error. */
257 smi_info->curr_msg = NULL;
258 deliver_recv_msg(smi_info, msg);
261 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
264 struct list_head *entry = NULL;
269 /* No need to save flags, we aleady have interrupts off and we
270 already hold the SMI lock. */
271 spin_lock(&(smi_info->msg_lock));
273 /* Pick the high priority queue first. */
274 if (! list_empty(&(smi_info->hp_xmit_msgs))) {
275 entry = smi_info->hp_xmit_msgs.next;
276 } else if (! list_empty(&(smi_info->xmit_msgs))) {
277 entry = smi_info->xmit_msgs.next;
281 smi_info->curr_msg = NULL;
287 smi_info->curr_msg = list_entry(entry,
292 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
294 err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
295 if (err & NOTIFY_STOP_MASK) {
296 rv = SI_SM_CALL_WITHOUT_DELAY;
299 err = smi_info->handlers->start_transaction(
301 smi_info->curr_msg->data,
302 smi_info->curr_msg->data_size);
304 return_hosed_msg(smi_info);
307 rv = SI_SM_CALL_WITHOUT_DELAY;
310 spin_unlock(&(smi_info->msg_lock));
315 static void start_enable_irq(struct smi_info *smi_info)
317 unsigned char msg[2];
319 /* If we are enabling interrupts, we have to tell the
321 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
322 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
324 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
325 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
328 static void start_clear_flags(struct smi_info *smi_info)
330 unsigned char msg[3];
332 /* Make sure the watchdog pre-timeout flag is not set at startup. */
333 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
334 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
335 msg[2] = WDT_PRE_TIMEOUT_INT;
337 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
338 smi_info->si_state = SI_CLEARING_FLAGS;
341 /* When we have a situtaion where we run out of memory and cannot
342 allocate messages, we just leave them in the BMC and run the system
343 polled until we can allocate some memory. Once we have some
344 memory, we will re-enable the interrupt. */
345 static inline void disable_si_irq(struct smi_info *smi_info)
347 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
348 disable_irq_nosync(smi_info->irq);
349 smi_info->interrupt_disabled = 1;
353 static inline void enable_si_irq(struct smi_info *smi_info)
355 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
356 enable_irq(smi_info->irq);
357 smi_info->interrupt_disabled = 0;
361 static void handle_flags(struct smi_info *smi_info)
364 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
365 /* Watchdog pre-timeout */
366 spin_lock(&smi_info->count_lock);
367 smi_info->watchdog_pretimeouts++;
368 spin_unlock(&smi_info->count_lock);
370 start_clear_flags(smi_info);
371 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
372 spin_unlock(&(smi_info->si_lock));
373 ipmi_smi_watchdog_pretimeout(smi_info->intf);
374 spin_lock(&(smi_info->si_lock));
375 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
376 /* Messages available. */
377 smi_info->curr_msg = ipmi_alloc_smi_msg();
378 if (! smi_info->curr_msg) {
379 disable_si_irq(smi_info);
380 smi_info->si_state = SI_NORMAL;
383 enable_si_irq(smi_info);
385 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
386 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
387 smi_info->curr_msg->data_size = 2;
389 smi_info->handlers->start_transaction(
391 smi_info->curr_msg->data,
392 smi_info->curr_msg->data_size);
393 smi_info->si_state = SI_GETTING_MESSAGES;
394 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
395 /* Events available. */
396 smi_info->curr_msg = ipmi_alloc_smi_msg();
397 if (! smi_info->curr_msg) {
398 disable_si_irq(smi_info);
399 smi_info->si_state = SI_NORMAL;
402 enable_si_irq(smi_info);
404 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
405 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
406 smi_info->curr_msg->data_size = 2;
408 smi_info->handlers->start_transaction(
410 smi_info->curr_msg->data,
411 smi_info->curr_msg->data_size);
412 smi_info->si_state = SI_GETTING_EVENTS;
413 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
414 if (smi_info->oem_data_avail_handler)
415 if (smi_info->oem_data_avail_handler(smi_info))
418 smi_info->si_state = SI_NORMAL;
422 static void handle_transaction_done(struct smi_info *smi_info)
424 struct ipmi_smi_msg *msg;
429 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
431 switch (smi_info->si_state) {
433 if (! smi_info->curr_msg)
436 smi_info->curr_msg->rsp_size
437 = smi_info->handlers->get_result(
439 smi_info->curr_msg->rsp,
440 IPMI_MAX_MSG_LENGTH);
442 /* Do this here becase deliver_recv_msg() releases the
443 lock, and a new message can be put in during the
444 time the lock is released. */
445 msg = smi_info->curr_msg;
446 smi_info->curr_msg = NULL;
447 deliver_recv_msg(smi_info, msg);
450 case SI_GETTING_FLAGS:
452 unsigned char msg[4];
455 /* We got the flags from the SMI, now handle them. */
456 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
458 /* Error fetching flags, just give up for
460 smi_info->si_state = SI_NORMAL;
461 } else if (len < 4) {
462 /* Hmm, no flags. That's technically illegal, but
463 don't use uninitialized data. */
464 smi_info->si_state = SI_NORMAL;
466 smi_info->msg_flags = msg[3];
467 handle_flags(smi_info);
472 case SI_CLEARING_FLAGS:
473 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
475 unsigned char msg[3];
477 /* We cleared the flags. */
478 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
480 /* Error clearing flags */
482 "ipmi_si: Error clearing flags: %2.2x\n",
485 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
486 start_enable_irq(smi_info);
488 smi_info->si_state = SI_NORMAL;
492 case SI_GETTING_EVENTS:
494 smi_info->curr_msg->rsp_size
495 = smi_info->handlers->get_result(
497 smi_info->curr_msg->rsp,
498 IPMI_MAX_MSG_LENGTH);
500 /* Do this here becase deliver_recv_msg() releases the
501 lock, and a new message can be put in during the
502 time the lock is released. */
503 msg = smi_info->curr_msg;
504 smi_info->curr_msg = NULL;
505 if (msg->rsp[2] != 0) {
506 /* Error getting event, probably done. */
509 /* Take off the event flag. */
510 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
511 handle_flags(smi_info);
513 spin_lock(&smi_info->count_lock);
515 spin_unlock(&smi_info->count_lock);
517 /* Do this before we deliver the message
518 because delivering the message releases the
519 lock and something else can mess with the
521 handle_flags(smi_info);
523 deliver_recv_msg(smi_info, msg);
528 case SI_GETTING_MESSAGES:
530 smi_info->curr_msg->rsp_size
531 = smi_info->handlers->get_result(
533 smi_info->curr_msg->rsp,
534 IPMI_MAX_MSG_LENGTH);
536 /* Do this here becase deliver_recv_msg() releases the
537 lock, and a new message can be put in during the
538 time the lock is released. */
539 msg = smi_info->curr_msg;
540 smi_info->curr_msg = NULL;
541 if (msg->rsp[2] != 0) {
542 /* Error getting event, probably done. */
545 /* Take off the msg flag. */
546 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
547 handle_flags(smi_info);
549 spin_lock(&smi_info->count_lock);
550 smi_info->incoming_messages++;
551 spin_unlock(&smi_info->count_lock);
553 /* Do this before we deliver the message
554 because delivering the message releases the
555 lock and something else can mess with the
557 handle_flags(smi_info);
559 deliver_recv_msg(smi_info, msg);
564 case SI_ENABLE_INTERRUPTS1:
566 unsigned char msg[4];
568 /* We got the flags from the SMI, now handle them. */
569 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
572 "ipmi_si: Could not enable interrupts"
573 ", failed get, using polled mode.\n");
574 smi_info->si_state = SI_NORMAL;
576 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
577 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
578 msg[2] = msg[3] | 1; /* enable msg queue int */
579 smi_info->handlers->start_transaction(
580 smi_info->si_sm, msg, 3);
581 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
586 case SI_ENABLE_INTERRUPTS2:
588 unsigned char msg[4];
590 /* We got the flags from the SMI, now handle them. */
591 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
594 "ipmi_si: Could not enable interrupts"
595 ", failed set, using polled mode.\n");
597 smi_info->si_state = SI_NORMAL;
603 /* Called on timeouts and events. Timeouts should pass the elapsed
604 time, interrupts should pass in zero. */
605 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
608 enum si_sm_result si_sm_result;
611 /* There used to be a loop here that waited a little while
612 (around 25us) before giving up. That turned out to be
613 pointless, the minimum delays I was seeing were in the 300us
614 range, which is far too long to wait in an interrupt. So
615 we just run until the state machine tells us something
616 happened or it needs a delay. */
617 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
619 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
621 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
624 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
626 spin_lock(&smi_info->count_lock);
627 smi_info->complete_transactions++;
628 spin_unlock(&smi_info->count_lock);
630 handle_transaction_done(smi_info);
631 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
633 else if (si_sm_result == SI_SM_HOSED)
635 spin_lock(&smi_info->count_lock);
636 smi_info->hosed_count++;
637 spin_unlock(&smi_info->count_lock);
639 /* Do the before return_hosed_msg, because that
640 releases the lock. */
641 smi_info->si_state = SI_NORMAL;
642 if (smi_info->curr_msg != NULL) {
643 /* If we were handling a user message, format
644 a response to send to the upper layer to
645 tell it about the error. */
646 return_hosed_msg(smi_info);
648 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
651 /* We prefer handling attn over new messages. */
652 if (si_sm_result == SI_SM_ATTN)
654 unsigned char msg[2];
656 spin_lock(&smi_info->count_lock);
657 smi_info->attentions++;
658 spin_unlock(&smi_info->count_lock);
660 /* Got a attn, send down a get message flags to see
661 what's causing it. It would be better to handle
662 this in the upper layer, but due to the way
663 interrupts work with the SMI, that's not really
665 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
666 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
668 smi_info->handlers->start_transaction(
669 smi_info->si_sm, msg, 2);
670 smi_info->si_state = SI_GETTING_FLAGS;
674 /* If we are currently idle, try to start the next message. */
675 if (si_sm_result == SI_SM_IDLE) {
676 spin_lock(&smi_info->count_lock);
678 spin_unlock(&smi_info->count_lock);
680 si_sm_result = start_next_msg(smi_info);
681 if (si_sm_result != SI_SM_IDLE)
685 if ((si_sm_result == SI_SM_IDLE)
686 && (atomic_read(&smi_info->req_events)))
688 /* We are idle and the upper layer requested that I fetch
690 unsigned char msg[2];
692 spin_lock(&smi_info->count_lock);
693 smi_info->flag_fetches++;
694 spin_unlock(&smi_info->count_lock);
696 atomic_set(&smi_info->req_events, 0);
697 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
698 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
700 smi_info->handlers->start_transaction(
701 smi_info->si_sm, msg, 2);
702 smi_info->si_state = SI_GETTING_FLAGS;
709 static void sender(void *send_info,
710 struct ipmi_smi_msg *msg,
713 struct smi_info *smi_info = send_info;
714 enum si_sm_result result;
720 spin_lock_irqsave(&(smi_info->msg_lock), flags);
723 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
726 if (smi_info->run_to_completion) {
727 /* If we are running to completion, then throw it in
728 the list and run transactions until everything is
729 clear. Priority doesn't matter here. */
730 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
732 /* We have to release the msg lock and claim the smi
733 lock in this case, because of race conditions. */
734 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
736 spin_lock_irqsave(&(smi_info->si_lock), flags);
737 result = smi_event_handler(smi_info, 0);
738 while (result != SI_SM_IDLE) {
739 udelay(SI_SHORT_TIMEOUT_USEC);
740 result = smi_event_handler(smi_info,
741 SI_SHORT_TIMEOUT_USEC);
743 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
747 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
749 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
752 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
754 spin_lock_irqsave(&(smi_info->si_lock), flags);
755 if ((smi_info->si_state == SI_NORMAL)
756 && (smi_info->curr_msg == NULL))
758 start_next_msg(smi_info);
759 si_restart_short_timer(smi_info);
761 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
764 static void set_run_to_completion(void *send_info, int i_run_to_completion)
766 struct smi_info *smi_info = send_info;
767 enum si_sm_result result;
770 spin_lock_irqsave(&(smi_info->si_lock), flags);
772 smi_info->run_to_completion = i_run_to_completion;
773 if (i_run_to_completion) {
774 result = smi_event_handler(smi_info, 0);
775 while (result != SI_SM_IDLE) {
776 udelay(SI_SHORT_TIMEOUT_USEC);
777 result = smi_event_handler(smi_info,
778 SI_SHORT_TIMEOUT_USEC);
782 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
785 static int ipmi_thread(void *data)
787 struct smi_info *smi_info = data;
789 enum si_sm_result smi_result;
791 set_user_nice(current, 19);
792 while (!kthread_should_stop()) {
793 spin_lock_irqsave(&(smi_info->si_lock), flags);
794 smi_result=smi_event_handler(smi_info, 0);
795 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
796 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
799 else if (smi_result == SI_SM_CALL_WITH_DELAY)
802 schedule_timeout_interruptible(1);
808 static void poll(void *send_info)
810 struct smi_info *smi_info = send_info;
812 smi_event_handler(smi_info, 0);
815 static void request_events(void *send_info)
817 struct smi_info *smi_info = send_info;
819 atomic_set(&smi_info->req_events, 1);
822 static int initialized = 0;
824 /* Must be called with interrupts off and with the si_lock held. */
825 static void si_restart_short_timer(struct smi_info *smi_info)
827 #if defined(CONFIG_HIGH_RES_TIMERS)
829 unsigned long jiffies_now;
832 if (del_timer(&(smi_info->si_timer))) {
833 /* If we don't delete the timer, then it will go off
834 immediately, anyway. So we only process if we
835 actually delete the timer. */
838 seq = read_seqbegin_irqsave(&xtime_lock, flags);
839 jiffies_now = jiffies;
840 smi_info->si_timer.expires = jiffies_now;
841 smi_info->si_timer.arch_cycle_expires
842 = get_arch_cycles(jiffies_now);
843 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
845 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
847 add_timer(&(smi_info->si_timer));
848 spin_lock_irqsave(&smi_info->count_lock, flags);
849 smi_info->timeout_restarts++;
850 spin_unlock_irqrestore(&smi_info->count_lock, flags);
855 static void smi_timeout(unsigned long data)
857 struct smi_info *smi_info = (struct smi_info *) data;
858 enum si_sm_result smi_result;
860 unsigned long jiffies_now;
866 if (atomic_read(&smi_info->stop_operation))
869 spin_lock_irqsave(&(smi_info->si_lock), flags);
872 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
874 jiffies_now = jiffies;
875 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
876 * SI_USEC_PER_JIFFY);
877 smi_result = smi_event_handler(smi_info, time_diff);
879 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
881 smi_info->last_timeout_jiffies = jiffies_now;
883 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
884 /* Running with interrupts, only do long timeouts. */
885 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
886 spin_lock_irqsave(&smi_info->count_lock, flags);
887 smi_info->long_timeouts++;
888 spin_unlock_irqrestore(&smi_info->count_lock, flags);
892 /* If the state machine asks for a short delay, then shorten
893 the timer timeout. */
894 if (smi_result == SI_SM_CALL_WITH_DELAY) {
895 #if defined(CONFIG_HIGH_RES_TIMERS)
898 spin_lock_irqsave(&smi_info->count_lock, flags);
899 smi_info->short_timeouts++;
900 spin_unlock_irqrestore(&smi_info->count_lock, flags);
901 #if defined(CONFIG_HIGH_RES_TIMERS)
903 seq = read_seqbegin_irqsave(&xtime_lock, flags);
904 smi_info->si_timer.expires = jiffies;
905 smi_info->si_timer.arch_cycle_expires
906 = get_arch_cycles(smi_info->si_timer.expires);
907 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
908 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
910 smi_info->si_timer.expires = jiffies + 1;
913 spin_lock_irqsave(&smi_info->count_lock, flags);
914 smi_info->long_timeouts++;
915 spin_unlock_irqrestore(&smi_info->count_lock, flags);
916 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
917 #if defined(CONFIG_HIGH_RES_TIMERS)
918 smi_info->si_timer.arch_cycle_expires = 0;
923 add_timer(&(smi_info->si_timer));
926 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
928 struct smi_info *smi_info = data;
934 spin_lock_irqsave(&(smi_info->si_lock), flags);
936 spin_lock(&smi_info->count_lock);
937 smi_info->interrupts++;
938 spin_unlock(&smi_info->count_lock);
940 if (atomic_read(&smi_info->stop_operation))
945 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
947 smi_event_handler(smi_info, 0);
949 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
953 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
955 struct smi_info *smi_info = data;
956 /* We need to clear the IRQ flag for the BT interface. */
957 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
958 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
959 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
960 return si_irq_handler(irq, data, regs);
964 static struct ipmi_smi_handlers handlers =
966 .owner = THIS_MODULE,
968 .request_events = request_events,
969 .set_run_to_completion = set_run_to_completion,
973 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
974 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
976 #define SI_MAX_PARMS 4
977 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
978 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
979 { NULL, NULL, NULL, NULL };
981 #define DEVICE_NAME "ipmi_si"
983 #define DEFAULT_KCS_IO_PORT 0xca2
984 #define DEFAULT_SMIC_IO_PORT 0xca9
985 #define DEFAULT_BT_IO_PORT 0xe4
986 #define DEFAULT_REGSPACING 1
988 static int si_trydefaults = 1;
989 static char *si_type[SI_MAX_PARMS];
990 #define MAX_SI_TYPE_STR 30
991 static char si_type_str[MAX_SI_TYPE_STR];
992 static unsigned long addrs[SI_MAX_PARMS];
993 static int num_addrs;
994 static unsigned int ports[SI_MAX_PARMS];
995 static int num_ports;
996 static int irqs[SI_MAX_PARMS];
998 static int regspacings[SI_MAX_PARMS];
999 static int num_regspacings = 0;
1000 static int regsizes[SI_MAX_PARMS];
1001 static int num_regsizes = 0;
1002 static int regshifts[SI_MAX_PARMS];
1003 static int num_regshifts = 0;
1004 static int slave_addrs[SI_MAX_PARMS];
1005 static int num_slave_addrs = 0;
1008 module_param_named(trydefaults, si_trydefaults, bool, 0);
1009 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1010 " default scan of the KCS and SMIC interface at the standard"
1012 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1013 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1014 " interface separated by commas. The types are 'kcs',"
1015 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1016 " the first interface to kcs and the second to bt");
1017 module_param_array(addrs, long, &num_addrs, 0);
1018 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1019 " addresses separated by commas. Only use if an interface"
1020 " is in memory. Otherwise, set it to zero or leave"
1022 module_param_array(ports, int, &num_ports, 0);
1023 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1024 " addresses separated by commas. Only use if an interface"
1025 " is a port. Otherwise, set it to zero or leave"
1027 module_param_array(irqs, int, &num_irqs, 0);
1028 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1029 " addresses separated by commas. Only use if an interface"
1030 " has an interrupt. Otherwise, set it to zero or leave"
1032 module_param_array(regspacings, int, &num_regspacings, 0);
1033 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1034 " and each successive register used by the interface. For"
1035 " instance, if the start address is 0xca2 and the spacing"
1036 " is 2, then the second address is at 0xca4. Defaults"
1038 module_param_array(regsizes, int, &num_regsizes, 0);
1039 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1040 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1041 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1042 " the 8-bit IPMI register has to be read from a larger"
1044 module_param_array(regshifts, int, &num_regshifts, 0);
1045 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1046 " IPMI register, in bits. For instance, if the data"
1047 " is read from a 32-bit word and the IPMI data is in"
1048 " bit 8-15, then the shift would be 8");
1049 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1050 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1051 " the controller. Normally this is 0x20, but can be"
1052 " overridden by this parm. This is an array indexed"
1053 " by interface number.");
1056 #define IPMI_MEM_ADDR_SPACE 1
1057 #define IPMI_IO_ADDR_SPACE 2
1059 #if defined(CONFIG_ACPI) || defined(CONFIG_X86) || defined(CONFIG_PCI)
1060 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
1064 for (i = 0; i < SI_MAX_PARMS; ++i) {
1065 /* Don't check our address. */
1068 if (si_type[i] != NULL) {
1069 if ((addr_space == IPMI_MEM_ADDR_SPACE &&
1070 base_addr == addrs[i]) ||
1071 (addr_space == IPMI_IO_ADDR_SPACE &&
1072 base_addr == ports[i]))
1083 static int std_irq_setup(struct smi_info *info)
1090 if (info->si_type == SI_BT) {
1091 rv = request_irq(info->irq,
1097 /* Enable the interrupt in the BT interface. */
1098 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1099 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1101 rv = request_irq(info->irq,
1108 "ipmi_si: %s unable to claim interrupt %d,"
1109 " running polled\n",
1110 DEVICE_NAME, info->irq);
1113 printk(" Using irq %d\n", info->irq);
1119 static void std_irq_cleanup(struct smi_info *info)
1124 if (info->si_type == SI_BT)
1125 /* Disable the interrupt in the BT interface. */
1126 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1127 free_irq(info->irq, info);
1130 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1132 unsigned int *addr = io->info;
1134 return inb((*addr)+(offset*io->regspacing));
1137 static void port_outb(struct si_sm_io *io, unsigned int offset,
1140 unsigned int *addr = io->info;
1142 outb(b, (*addr)+(offset * io->regspacing));
1145 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1147 unsigned int *addr = io->info;
1149 return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1152 static void port_outw(struct si_sm_io *io, unsigned int offset,
1155 unsigned int *addr = io->info;
1157 outw(b << io->regshift, (*addr)+(offset * io->regspacing));
1160 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1162 unsigned int *addr = io->info;
1164 return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1167 static void port_outl(struct si_sm_io *io, unsigned int offset,
1170 unsigned int *addr = io->info;
1172 outl(b << io->regshift, (*addr)+(offset * io->regspacing));
1175 static void port_cleanup(struct smi_info *info)
1177 unsigned int *addr = info->io.info;
1180 if (addr && (*addr)) {
1181 mapsize = ((info->io_size * info->io.regspacing)
1182 - (info->io.regspacing - info->io.regsize));
1184 release_region (*addr, mapsize);
1189 static int port_setup(struct smi_info *info)
1191 unsigned int *addr = info->io.info;
1194 if (! addr || (! *addr))
1197 info->io_cleanup = port_cleanup;
1199 /* Figure out the actual inb/inw/inl/etc routine to use based
1200 upon the register size. */
1201 switch (info->io.regsize) {
1203 info->io.inputb = port_inb;
1204 info->io.outputb = port_outb;
1207 info->io.inputb = port_inw;
1208 info->io.outputb = port_outw;
1211 info->io.inputb = port_inl;
1212 info->io.outputb = port_outl;
1215 printk("ipmi_si: Invalid register size: %d\n",
1220 /* Calculate the total amount of memory to claim. This is an
1221 * unusual looking calculation, but it avoids claiming any
1222 * more memory than it has to. It will claim everything
1223 * between the first address to the end of the last full
1225 mapsize = ((info->io_size * info->io.regspacing)
1226 - (info->io.regspacing - info->io.regsize));
1228 if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
1233 static int try_init_port(int intf_num, struct smi_info **new_info)
1235 struct smi_info *info;
1237 if (! ports[intf_num])
1240 if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1244 info = kmalloc(sizeof(*info), GFP_KERNEL);
1246 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1249 memset(info, 0, sizeof(*info));
1251 info->io_setup = port_setup;
1252 info->io.info = &(ports[intf_num]);
1253 info->io.addr = NULL;
1254 info->io.regspacing = regspacings[intf_num];
1255 if (! info->io.regspacing)
1256 info->io.regspacing = DEFAULT_REGSPACING;
1257 info->io.regsize = regsizes[intf_num];
1258 if (! info->io.regsize)
1259 info->io.regsize = DEFAULT_REGSPACING;
1260 info->io.regshift = regshifts[intf_num];
1262 info->irq_setup = NULL;
1265 if (si_type[intf_num] == NULL)
1266 si_type[intf_num] = "kcs";
1268 printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1269 si_type[intf_num], ports[intf_num]);
1273 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1275 return readb((io->addr)+(offset * io->regspacing));
1278 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1281 writeb(b, (io->addr)+(offset * io->regspacing));
1284 static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset)
1286 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1290 static void mem_outw(struct si_sm_io *io, unsigned int offset,
1293 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1296 static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset)
1298 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1302 static void mem_outl(struct si_sm_io *io, unsigned int offset,
1305 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1309 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1311 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1315 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1318 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1322 static void mem_cleanup(struct smi_info *info)
1324 unsigned long *addr = info->io.info;
1327 if (info->io.addr) {
1328 iounmap(info->io.addr);
1330 mapsize = ((info->io_size * info->io.regspacing)
1331 - (info->io.regspacing - info->io.regsize));
1333 release_mem_region(*addr, mapsize);
1338 static int mem_setup(struct smi_info *info)
1340 unsigned long *addr = info->io.info;
1343 if (! addr || (! *addr))
1346 info->io_cleanup = mem_cleanup;
1348 /* Figure out the actual readb/readw/readl/etc routine to use based
1349 upon the register size. */
1350 switch (info->io.regsize) {
1352 info->io.inputb = mem_inb;
1353 info->io.outputb = mem_outb;
1356 info->io.inputb = mem_inw;
1357 info->io.outputb = mem_outw;
1360 info->io.inputb = mem_inl;
1361 info->io.outputb = mem_outl;
1365 info->io.inputb = mem_inq;
1366 info->io.outputb = mem_outq;
1370 printk("ipmi_si: Invalid register size: %d\n",
1375 /* Calculate the total amount of memory to claim. This is an
1376 * unusual looking calculation, but it avoids claiming any
1377 * more memory than it has to. It will claim everything
1378 * between the first address to the end of the last full
1380 mapsize = ((info->io_size * info->io.regspacing)
1381 - (info->io.regspacing - info->io.regsize));
1383 if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
1386 info->io.addr = ioremap(*addr, mapsize);
1387 if (info->io.addr == NULL) {
1388 release_mem_region(*addr, mapsize);
1394 static int try_init_mem(int intf_num, struct smi_info **new_info)
1396 struct smi_info *info;
1398 if (! addrs[intf_num])
1401 if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1405 info = kmalloc(sizeof(*info), GFP_KERNEL);
1407 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1410 memset(info, 0, sizeof(*info));
1412 info->io_setup = mem_setup;
1413 info->io.info = &addrs[intf_num];
1414 info->io.addr = NULL;
1415 info->io.regspacing = regspacings[intf_num];
1416 if (! info->io.regspacing)
1417 info->io.regspacing = DEFAULT_REGSPACING;
1418 info->io.regsize = regsizes[intf_num];
1419 if (! info->io.regsize)
1420 info->io.regsize = DEFAULT_REGSPACING;
1421 info->io.regshift = regshifts[intf_num];
1423 info->irq_setup = NULL;
1426 if (si_type[intf_num] == NULL)
1427 si_type[intf_num] = "kcs";
1429 printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1430 si_type[intf_num], addrs[intf_num]);
1437 #include <linux/acpi.h>
1439 /* Once we get an ACPI failure, we don't try any more, because we go
1440 through the tables sequentially. Once we don't find a table, there
1442 static int acpi_failure = 0;
1444 /* For GPE-type interrupts. */
1445 static u32 ipmi_acpi_gpe(void *context)
1447 struct smi_info *smi_info = context;
1448 unsigned long flags;
1453 spin_lock_irqsave(&(smi_info->si_lock), flags);
1455 spin_lock(&smi_info->count_lock);
1456 smi_info->interrupts++;
1457 spin_unlock(&smi_info->count_lock);
1459 if (atomic_read(&smi_info->stop_operation))
1463 do_gettimeofday(&t);
1464 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1466 smi_event_handler(smi_info, 0);
1468 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1470 return ACPI_INTERRUPT_HANDLED;
1473 static int acpi_gpe_irq_setup(struct smi_info *info)
1480 /* FIXME - is level triggered right? */
1481 status = acpi_install_gpe_handler(NULL,
1483 ACPI_GPE_LEVEL_TRIGGERED,
1486 if (status != AE_OK) {
1488 "ipmi_si: %s unable to claim ACPI GPE %d,"
1489 " running polled\n",
1490 DEVICE_NAME, info->irq);
1494 printk(" Using ACPI GPE %d\n", info->irq);
1499 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1504 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1509 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1520 s8 CreatorRevision[4];
1523 s16 SpecificationRevision;
1526 * Bit 0 - SCI interrupt supported
1527 * Bit 1 - I/O APIC/SAPIC
1531 /* If bit 0 of InterruptType is set, then this is the SCI
1532 interrupt in the GPEx_STS register. */
1537 /* If bit 1 of InterruptType is set, then this is the I/O
1538 APIC/SAPIC interrupt. */
1539 u32 GlobalSystemInterrupt;
1541 /* The actual register address. */
1542 struct acpi_generic_address addr;
1546 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1549 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1551 struct smi_info *info;
1553 struct SPMITable *spmi;
1563 status = acpi_get_firmware_table("SPMI", intf_num+1,
1564 ACPI_LOGICAL_ADDRESSING,
1565 (struct acpi_table_header **) &spmi);
1566 if (status != AE_OK) {
1571 if (spmi->IPMIlegacy != 1) {
1572 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1576 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1577 addr_space = IPMI_MEM_ADDR_SPACE;
1579 addr_space = IPMI_IO_ADDR_SPACE;
1580 if (! is_new_interface(-1, addr_space, spmi->addr.address))
1583 if (! spmi->addr.register_bit_width) {
1588 /* Figure out the interface type. */
1589 switch (spmi->InterfaceType)
1592 si_type[intf_num] = "kcs";
1596 si_type[intf_num] = "smic";
1600 si_type[intf_num] = "bt";
1604 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1605 spmi->InterfaceType);
1609 info = kmalloc(sizeof(*info), GFP_KERNEL);
1611 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1614 memset(info, 0, sizeof(*info));
1616 if (spmi->InterruptType & 1) {
1617 /* We've got a GPE interrupt. */
1618 info->irq = spmi->GPE;
1619 info->irq_setup = acpi_gpe_irq_setup;
1620 info->irq_cleanup = acpi_gpe_irq_cleanup;
1621 } else if (spmi->InterruptType & 2) {
1622 /* We've got an APIC/SAPIC interrupt. */
1623 info->irq = spmi->GlobalSystemInterrupt;
1624 info->irq_setup = std_irq_setup;
1625 info->irq_cleanup = std_irq_cleanup;
1627 /* Use the default interrupt setting. */
1629 info->irq_setup = NULL;
1632 if (spmi->addr.register_bit_width) {
1633 /* A (hopefully) properly formed register bit width. */
1634 regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1635 info->io.regspacing = spmi->addr.register_bit_width / 8;
1637 /* Some broken systems get this wrong and set the value
1638 * to zero. Assume it is the default spacing. If that
1639 * is wrong, too bad, the vendor should fix the tables. */
1640 regspacings[intf_num] = DEFAULT_REGSPACING;
1641 info->io.regspacing = DEFAULT_REGSPACING;
1643 regsizes[intf_num] = regspacings[intf_num];
1644 info->io.regsize = regsizes[intf_num];
1645 regshifts[intf_num] = spmi->addr.register_bit_offset;
1646 info->io.regshift = regshifts[intf_num];
1648 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1650 info->io_setup = mem_setup;
1651 addrs[intf_num] = spmi->addr.address;
1652 info->io.info = &(addrs[intf_num]);
1653 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1655 info->io_setup = port_setup;
1656 ports[intf_num] = spmi->addr.address;
1657 info->io.info = &(ports[intf_num]);
1660 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1666 printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1667 si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1673 typedef struct dmi_ipmi_data
1677 unsigned long base_addr;
1683 static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1684 static int dmi_data_entries;
1686 static int __init decode_dmi(struct dmi_header *dm, int intf_num)
1688 u8 *data = (u8 *)dm;
1689 unsigned long base_addr;
1691 u8 len = dm->length;
1692 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1694 ipmi_data->type = data[4];
1696 memcpy(&base_addr, data+8, sizeof(unsigned long));
1698 if (base_addr & 1) {
1700 base_addr &= 0xFFFE;
1701 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1705 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1707 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1709 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1711 ipmi_data->irq = data[0x11];
1713 /* The top two bits of byte 0x10 hold the register spacing. */
1714 reg_spacing = (data[0x10] & 0xC0) >> 6;
1715 switch(reg_spacing){
1716 case 0x00: /* Byte boundaries */
1717 ipmi_data->offset = 1;
1719 case 0x01: /* 32-bit boundaries */
1720 ipmi_data->offset = 4;
1722 case 0x02: /* 16-byte boundaries */
1723 ipmi_data->offset = 16;
1726 /* Some other interface, just ignore it. */
1731 /* Note that technically, the lower bit of the base
1732 * address should be 1 if the address is I/O and 0 if
1733 * the address is in memory. So many systems get that
1734 * wrong (and all that I have seen are I/O) so we just
1735 * ignore that bit and assume I/O. Systems that use
1736 * memory should use the newer spec, anyway. */
1737 ipmi_data->base_addr = base_addr & 0xfffe;
1738 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1739 ipmi_data->offset = 1;
1742 ipmi_data->slave_addr = data[6];
1744 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1749 memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t));
1754 static void __init dmi_find_bmc(void)
1756 struct dmi_device *dev = NULL;
1759 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1760 if (intf_num >= SI_MAX_DRIVERS)
1763 decode_dmi((struct dmi_header *) dev->device_data, intf_num++);
1767 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1769 struct smi_info *info;
1770 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1773 if (intf_num >= dmi_data_entries)
1776 switch (ipmi_data->type) {
1777 case 0x01: /* KCS */
1778 si_type[intf_num] = "kcs";
1780 case 0x02: /* SMIC */
1781 si_type[intf_num] = "smic";
1784 si_type[intf_num] = "bt";
1790 info = kmalloc(sizeof(*info), GFP_KERNEL);
1792 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1795 memset(info, 0, sizeof(*info));
1797 if (ipmi_data->addr_space == 1) {
1799 info->io_setup = mem_setup;
1800 addrs[intf_num] = ipmi_data->base_addr;
1801 info->io.info = &(addrs[intf_num]);
1802 } else if (ipmi_data->addr_space == 2) {
1804 info->io_setup = port_setup;
1805 ports[intf_num] = ipmi_data->base_addr;
1806 info->io.info = &(ports[intf_num]);
1809 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1813 regspacings[intf_num] = ipmi_data->offset;
1814 info->io.regspacing = regspacings[intf_num];
1815 if (! info->io.regspacing)
1816 info->io.regspacing = DEFAULT_REGSPACING;
1817 info->io.regsize = DEFAULT_REGSPACING;
1818 info->io.regshift = regshifts[intf_num];
1820 info->slave_addr = ipmi_data->slave_addr;
1822 irqs[intf_num] = ipmi_data->irq;
1826 printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1827 " address 0x%lx, slave address 0x%x\n",
1828 io_type, (unsigned long)ipmi_data->base_addr,
1829 ipmi_data->slave_addr);
1832 #endif /* CONFIG_X86 */
1836 #define PCI_ERMC_CLASSCODE 0x0C0700
1837 #define PCI_HP_VENDOR_ID 0x103C
1838 #define PCI_MMC_DEVICE_ID 0x121A
1839 #define PCI_MMC_ADDR_CW 0x10
1841 /* Avoid more than one attempt to probe pci smic. */
1842 static int pci_smic_checked = 0;
1844 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1846 struct smi_info *info;
1848 struct pci_dev *pci_dev = NULL;
1852 if (pci_smic_checked)
1855 pci_smic_checked = 1;
1857 pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL);
1859 pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL);
1860 if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID))
1866 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1869 pci_dev_put(pci_dev);
1871 "ipmi_si: pci_read_config_word() failed (%d).\n",
1876 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1877 if (! (base_addr & 0x0001))
1879 pci_dev_put(pci_dev);
1881 "ipmi_si: memory mapped I/O not supported for PCI"
1886 base_addr &= 0xFFFE;
1888 /* Data register starts at base address + 1 in eRMC */
1891 if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1892 pci_dev_put(pci_dev);
1896 info = kmalloc(sizeof(*info), GFP_KERNEL);
1898 pci_dev_put(pci_dev);
1899 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1902 memset(info, 0, sizeof(*info));
1904 info->io_setup = port_setup;
1905 ports[intf_num] = base_addr;
1906 info->io.info = &(ports[intf_num]);
1907 info->io.regspacing = regspacings[intf_num];
1908 if (! info->io.regspacing)
1909 info->io.regspacing = DEFAULT_REGSPACING;
1910 info->io.regsize = DEFAULT_REGSPACING;
1911 info->io.regshift = regshifts[intf_num];
1915 irqs[intf_num] = pci_dev->irq;
1916 si_type[intf_num] = "smic";
1918 printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1919 (long unsigned int) base_addr);
1921 pci_dev_put(pci_dev);
1924 #endif /* CONFIG_PCI */
1926 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1929 if (find_pci_smic(intf_num, new_info) == 0)
1932 /* Include other methods here. */
1938 static int try_get_dev_id(struct smi_info *smi_info)
1940 unsigned char msg[2];
1941 unsigned char *resp;
1942 unsigned long resp_len;
1943 enum si_sm_result smi_result;
1946 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1950 /* Do a Get Device ID command, since it comes back with some
1952 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1953 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1954 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1956 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1959 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1960 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1961 schedule_timeout_uninterruptible(1);
1962 smi_result = smi_info->handlers->event(
1963 smi_info->si_sm, 100);
1965 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1967 smi_result = smi_info->handlers->event(
1968 smi_info->si_sm, 0);
1973 if (smi_result == SI_SM_HOSED) {
1974 /* We couldn't get the state machine to run, so whatever's at
1975 the port is probably not an IPMI SMI interface. */
1980 /* Otherwise, we got some data. */
1981 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1982 resp, IPMI_MAX_MSG_LENGTH);
1984 /* That's odd, it should be longer. */
1989 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1990 /* That's odd, it shouldn't be able to fail. */
1995 /* Record info from the get device id, in case we need it. */
1996 memcpy(&smi_info->device_id, &resp[3],
1997 min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
2004 static int type_file_read_proc(char *page, char **start, off_t off,
2005 int count, int *eof, void *data)
2007 char *out = (char *) page;
2008 struct smi_info *smi = data;
2010 switch (smi->si_type) {
2012 return sprintf(out, "kcs\n");
2014 return sprintf(out, "smic\n");
2016 return sprintf(out, "bt\n");
2022 static int stat_file_read_proc(char *page, char **start, off_t off,
2023 int count, int *eof, void *data)
2025 char *out = (char *) page;
2026 struct smi_info *smi = data;
2028 out += sprintf(out, "interrupts_enabled: %d\n",
2029 smi->irq && ! smi->interrupt_disabled);
2030 out += sprintf(out, "short_timeouts: %ld\n",
2031 smi->short_timeouts);
2032 out += sprintf(out, "long_timeouts: %ld\n",
2033 smi->long_timeouts);
2034 out += sprintf(out, "timeout_restarts: %ld\n",
2035 smi->timeout_restarts);
2036 out += sprintf(out, "idles: %ld\n",
2038 out += sprintf(out, "interrupts: %ld\n",
2040 out += sprintf(out, "attentions: %ld\n",
2042 out += sprintf(out, "flag_fetches: %ld\n",
2044 out += sprintf(out, "hosed_count: %ld\n",
2046 out += sprintf(out, "complete_transactions: %ld\n",
2047 smi->complete_transactions);
2048 out += sprintf(out, "events: %ld\n",
2050 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2051 smi->watchdog_pretimeouts);
2052 out += sprintf(out, "incoming_messages: %ld\n",
2053 smi->incoming_messages);
2055 return (out - ((char *) page));
2059 * oem_data_avail_to_receive_msg_avail
2060 * @info - smi_info structure with msg_flags set
2062 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2063 * Returns 1 indicating need to re-run handle_flags().
2065 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2067 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2073 * setup_dell_poweredge_oem_data_handler
2074 * @info - smi_info.device_id must be populated
2076 * Systems that match, but have firmware version < 1.40 may assert
2077 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2078 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2079 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2080 * as RECEIVE_MSG_AVAIL instead.
2082 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2083 * assert the OEM[012] bits, and if it did, the driver would have to
2084 * change to handle that properly, we don't actually check for the
2086 * Device ID = 0x20 BMC on PowerEdge 8G servers
2087 * Device Revision = 0x80
2088 * Firmware Revision1 = 0x01 BMC version 1.40
2089 * Firmware Revision2 = 0x40 BCD encoded
2090 * IPMI Version = 0x51 IPMI 1.5
2091 * Manufacturer ID = A2 02 00 Dell IANA
2093 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2094 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2097 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2098 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2099 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2100 #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
2101 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2103 struct ipmi_device_id *id = &smi_info->device_id;
2104 const char mfr[3]=DELL_IANA_MFR_ID;
2105 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) {
2106 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2107 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2108 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2109 smi_info->oem_data_avail_handler =
2110 oem_data_avail_to_receive_msg_avail;
2112 else if (ipmi_version_major(id) < 1 ||
2113 (ipmi_version_major(id) == 1 &&
2114 ipmi_version_minor(id) < 5)) {
2115 smi_info->oem_data_avail_handler =
2116 oem_data_avail_to_receive_msg_avail;
2121 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2122 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2124 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2126 /* Make it a reponse */
2127 msg->rsp[0] = msg->data[0] | 4;
2128 msg->rsp[1] = msg->data[1];
2129 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2131 smi_info->curr_msg = NULL;
2132 deliver_recv_msg(smi_info, msg);
2136 * dell_poweredge_bt_xaction_handler
2137 * @info - smi_info.device_id must be populated
2139 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2140 * not respond to a Get SDR command if the length of the data
2141 * requested is exactly 0x3A, which leads to command timeouts and no
2142 * data returned. This intercepts such commands, and causes userspace
2143 * callers to try again with a different-sized buffer, which succeeds.
2146 #define STORAGE_NETFN 0x0A
2147 #define STORAGE_CMD_GET_SDR 0x23
2148 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2149 unsigned long unused,
2152 struct smi_info *smi_info = in;
2153 unsigned char *data = smi_info->curr_msg->data;
2154 unsigned int size = smi_info->curr_msg->data_size;
2156 (data[0]>>2) == STORAGE_NETFN &&
2157 data[1] == STORAGE_CMD_GET_SDR &&
2159 return_hosed_msg_badsize(smi_info);
2165 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2166 .notifier_call = dell_poweredge_bt_xaction_handler,
2170 * setup_dell_poweredge_bt_xaction_handler
2171 * @info - smi_info.device_id must be filled in already
2173 * Fills in smi_info.device_id.start_transaction_pre_hook
2174 * when we know what function to use there.
2177 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2179 struct ipmi_device_id *id = &smi_info->device_id;
2180 const char mfr[3]=DELL_IANA_MFR_ID;
2181 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) &&
2182 smi_info->si_type == SI_BT)
2183 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2187 * setup_oem_data_handler
2188 * @info - smi_info.device_id must be filled in already
2190 * Fills in smi_info.device_id.oem_data_available_handler
2191 * when we know what function to use there.
2194 static void setup_oem_data_handler(struct smi_info *smi_info)
2196 setup_dell_poweredge_oem_data_handler(smi_info);
2199 static void setup_xaction_handlers(struct smi_info *smi_info)
2201 setup_dell_poweredge_bt_xaction_handler(smi_info);
2204 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2206 if (smi_info->thread != NULL && smi_info->thread != ERR_PTR(-ENOMEM))
2207 kthread_stop(smi_info->thread);
2208 del_timer_sync(&smi_info->si_timer);
2211 /* Returns 0 if initialized, or negative on an error. */
2212 static int init_one_smi(int intf_num, struct smi_info **smi)
2215 struct smi_info *new_smi;
2218 rv = try_init_mem(intf_num, &new_smi);
2220 rv = try_init_port(intf_num, &new_smi);
2222 if (rv && si_trydefaults)
2223 rv = try_init_acpi(intf_num, &new_smi);
2226 if (rv && si_trydefaults)
2227 rv = try_init_smbios(intf_num, &new_smi);
2229 if (rv && si_trydefaults)
2230 rv = try_init_plug_and_play(intf_num, &new_smi);
2235 /* So we know not to free it unless we have allocated one. */
2236 new_smi->intf = NULL;
2237 new_smi->si_sm = NULL;
2238 new_smi->handlers = NULL;
2240 if (! new_smi->irq_setup) {
2241 new_smi->irq = irqs[intf_num];
2242 new_smi->irq_setup = std_irq_setup;
2243 new_smi->irq_cleanup = std_irq_cleanup;
2246 /* Default to KCS if no type is specified. */
2247 if (si_type[intf_num] == NULL) {
2249 si_type[intf_num] = "kcs";
2256 /* Set up the state machine to use. */
2257 if (strcmp(si_type[intf_num], "kcs") == 0) {
2258 new_smi->handlers = &kcs_smi_handlers;
2259 new_smi->si_type = SI_KCS;
2260 } else if (strcmp(si_type[intf_num], "smic") == 0) {
2261 new_smi->handlers = &smic_smi_handlers;
2262 new_smi->si_type = SI_SMIC;
2263 } else if (strcmp(si_type[intf_num], "bt") == 0) {
2264 new_smi->handlers = &bt_smi_handlers;
2265 new_smi->si_type = SI_BT;
2267 /* No support for anything else yet. */
2272 /* Allocate the state machine's data and initialize it. */
2273 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2274 if (! new_smi->si_sm) {
2275 printk(" Could not allocate state machine memory\n");
2279 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2282 /* Now that we know the I/O size, we can set up the I/O. */
2283 rv = new_smi->io_setup(new_smi);
2285 printk(" Could not set up I/O space\n");
2289 spin_lock_init(&(new_smi->si_lock));
2290 spin_lock_init(&(new_smi->msg_lock));
2291 spin_lock_init(&(new_smi->count_lock));
2293 /* Do low-level detection first. */
2294 if (new_smi->handlers->detect(new_smi->si_sm)) {
2299 /* Attempt a get device id command. If it fails, we probably
2300 don't have a SMI here. */
2301 rv = try_get_dev_id(new_smi);
2305 setup_oem_data_handler(new_smi);
2306 setup_xaction_handlers(new_smi);
2308 /* Try to claim any interrupts. */
2309 new_smi->irq_setup(new_smi);
2311 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2312 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2313 new_smi->curr_msg = NULL;
2314 atomic_set(&new_smi->req_events, 0);
2315 new_smi->run_to_completion = 0;
2317 new_smi->interrupt_disabled = 0;
2318 atomic_set(&new_smi->stop_operation, 0);
2319 new_smi->intf_num = intf_num;
2321 /* Start clearing the flags before we enable interrupts or the
2322 timer to avoid racing with the timer. */
2323 start_clear_flags(new_smi);
2324 /* IRQ is defined to be set when non-zero. */
2326 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2328 /* The ipmi_register_smi() code does some operations to
2329 determine the channel information, so we must be ready to
2330 handle operations before it is called. This means we have
2331 to stop the timer if we get an error after this point. */
2332 init_timer(&(new_smi->si_timer));
2333 new_smi->si_timer.data = (long) new_smi;
2334 new_smi->si_timer.function = smi_timeout;
2335 new_smi->last_timeout_jiffies = jiffies;
2336 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2338 add_timer(&(new_smi->si_timer));
2339 if (new_smi->si_type != SI_BT)
2340 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2341 "kipmi%d", new_smi->intf_num);
2343 rv = ipmi_register_smi(&handlers,
2345 ipmi_version_major(&new_smi->device_id),
2346 ipmi_version_minor(&new_smi->device_id),
2347 new_smi->slave_addr,
2351 "ipmi_si: Unable to register device: error %d\n",
2353 goto out_err_stop_timer;
2356 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2357 type_file_read_proc, NULL,
2358 new_smi, THIS_MODULE);
2361 "ipmi_si: Unable to create proc entry: %d\n",
2363 goto out_err_stop_timer;
2366 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2367 stat_file_read_proc, NULL,
2368 new_smi, THIS_MODULE);
2371 "ipmi_si: Unable to create proc entry: %d\n",
2373 goto out_err_stop_timer;
2378 printk(" IPMI %s interface initialized\n", si_type[intf_num]);
2383 atomic_inc(&new_smi->stop_operation);
2384 wait_for_timer_and_thread(new_smi);
2388 ipmi_unregister_smi(new_smi->intf);
2390 new_smi->irq_cleanup(new_smi);
2392 /* Wait until we know that we are out of any interrupt
2393 handlers might have been running before we freed the
2395 synchronize_sched();
2397 if (new_smi->si_sm) {
2398 if (new_smi->handlers)
2399 new_smi->handlers->cleanup(new_smi->si_sm);
2400 kfree(new_smi->si_sm);
2402 new_smi->io_cleanup(new_smi);
2407 static __init int init_ipmi_si(void)
2418 /* Parse out the si_type string into its components. */
2421 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2423 str = strchr(str, ',');
2433 printk(KERN_INFO "IPMI System Interface driver.\n");
2439 rv = init_one_smi(0, &(smi_infos[pos]));
2440 if (rv && ! ports[0] && si_trydefaults) {
2441 /* If we are trying defaults and the initial port is
2442 not set, then set it. */
2444 ports[0] = DEFAULT_KCS_IO_PORT;
2445 rv = init_one_smi(0, &(smi_infos[pos]));
2447 /* No KCS - try SMIC */
2448 si_type[0] = "smic";
2449 ports[0] = DEFAULT_SMIC_IO_PORT;
2450 rv = init_one_smi(0, &(smi_infos[pos]));
2453 /* No SMIC - try BT */
2455 ports[0] = DEFAULT_BT_IO_PORT;
2456 rv = init_one_smi(0, &(smi_infos[pos]));
2462 for (i = 1; i < SI_MAX_PARMS; i++) {
2463 rv = init_one_smi(i, &(smi_infos[pos]));
2468 if (smi_infos[0] == NULL) {
2469 printk("ipmi_si: Unable to find any System Interface(s)\n");
2475 module_init(init_ipmi_si);
2477 static void __exit cleanup_one_si(struct smi_info *to_clean)
2480 unsigned long flags;
2485 /* Tell the timer and interrupt handlers that we are shutting
2487 spin_lock_irqsave(&(to_clean->si_lock), flags);
2488 spin_lock(&(to_clean->msg_lock));
2490 atomic_inc(&to_clean->stop_operation);
2491 to_clean->irq_cleanup(to_clean);
2493 spin_unlock(&(to_clean->msg_lock));
2494 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2496 /* Wait until we know that we are out of any interrupt
2497 handlers might have been running before we freed the
2499 synchronize_sched();
2501 wait_for_timer_and_thread(to_clean);
2503 /* Interrupts and timeouts are stopped, now make sure the
2504 interface is in a clean state. */
2505 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2507 schedule_timeout_uninterruptible(1);
2510 rv = ipmi_unregister_smi(to_clean->intf);
2513 "ipmi_si: Unable to unregister device: errno=%d\n",
2517 to_clean->handlers->cleanup(to_clean->si_sm);
2519 kfree(to_clean->si_sm);
2521 to_clean->io_cleanup(to_clean);
2524 static __exit void cleanup_ipmi_si(void)
2531 for (i = 0; i < SI_MAX_DRIVERS; i++) {
2532 cleanup_one_si(smi_infos[i]);
2535 module_exit(cleanup_ipmi_si);
2537 MODULE_LICENSE("GPL");
2538 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2539 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");