4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <asm/system.h>
44 #include <linux/sched.h>
45 #include <linux/timer.h>
46 #include <linux/errno.h>
47 #include <linux/spinlock.h>
48 #include <linux/slab.h>
49 #include <linux/delay.h>
50 #include <linux/list.h>
51 #include <linux/pci.h>
52 #include <linux/ioport.h>
53 #include <linux/notifier.h>
54 #include <linux/mutex.h>
55 #include <linux/kthread.h>
57 #include <linux/interrupt.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ipmi_smi.h>
61 #include "ipmi_si_sm.h"
62 #include <linux/init.h>
63 #include <linux/dmi.h>
64 #include <linux/string.h>
65 #include <linux/ctype.h>
67 #define PFX "ipmi_si: "
69 /* Measure times between events in the driver. */
72 /* Call every 10 ms. */
73 #define SI_TIMEOUT_TIME_USEC 10000
74 #define SI_USEC_PER_JIFFY (1000000/HZ)
75 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
76 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
84 SI_CLEARING_FLAGS_THEN_SET_IRQ,
86 SI_ENABLE_INTERRUPTS1,
88 /* FIXME - add watchdog stuff. */
91 /* Some BT-specific defines we need here. */
92 #define IPMI_BT_INTMASK_REG 2
93 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
94 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
97 SI_KCS, SI_SMIC, SI_BT
99 static char *si_to_str[] = { "kcs", "smic", "bt" };
101 #define DEVICE_NAME "ipmi_si"
103 static struct device_driver ipmi_driver =
106 .bus = &platform_bus_type
113 struct si_sm_data *si_sm;
114 struct si_sm_handlers *handlers;
115 enum si_type si_type;
118 struct list_head xmit_msgs;
119 struct list_head hp_xmit_msgs;
120 struct ipmi_smi_msg *curr_msg;
121 enum si_intf_state si_state;
123 /* Used to handle the various types of I/O that can occur with
126 int (*io_setup)(struct smi_info *info);
127 void (*io_cleanup)(struct smi_info *info);
128 int (*irq_setup)(struct smi_info *info);
129 void (*irq_cleanup)(struct smi_info *info);
130 unsigned int io_size;
131 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
132 void (*addr_source_cleanup)(struct smi_info *info);
133 void *addr_source_data;
135 /* Per-OEM handler, called from handle_flags().
136 Returns 1 when handle_flags() needs to be re-run
137 or 0 indicating it set si_state itself.
139 int (*oem_data_avail_handler)(struct smi_info *smi_info);
141 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
142 is set to hold the flags until we are done handling everything
144 #define RECEIVE_MSG_AVAIL 0x01
145 #define EVENT_MSG_BUFFER_FULL 0x02
146 #define WDT_PRE_TIMEOUT_INT 0x08
147 #define OEM0_DATA_AVAIL 0x20
148 #define OEM1_DATA_AVAIL 0x40
149 #define OEM2_DATA_AVAIL 0x80
150 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
153 unsigned char msg_flags;
155 /* If set to true, this will request events the next time the
156 state machine is idle. */
159 /* If true, run the state machine to completion on every send
160 call. Generally used after a panic to make sure stuff goes
162 int run_to_completion;
164 /* The I/O port of an SI interface. */
167 /* The space between start addresses of the two ports. For
168 instance, if the first port is 0xca2 and the spacing is 4, then
169 the second port is 0xca6. */
170 unsigned int spacing;
172 /* zero if no irq; */
175 /* The timer for this si. */
176 struct timer_list si_timer;
178 /* The time (in jiffies) the last timeout occurred at. */
179 unsigned long last_timeout_jiffies;
181 /* Used to gracefully stop the timer without race conditions. */
182 atomic_t stop_operation;
184 /* The driver will disable interrupts when it gets into a
185 situation where it cannot handle messages due to lack of
186 memory. Once that situation clears up, it will re-enable
188 int interrupt_disabled;
190 /* From the get device id response... */
191 struct ipmi_device_id device_id;
193 /* Driver model stuff. */
195 struct platform_device *pdev;
197 /* True if we allocated the device, false if it came from
198 * someplace else (like PCI). */
201 /* Slave address, could be reported from DMI. */
202 unsigned char slave_addr;
204 /* Counters and things for the proc filesystem. */
205 spinlock_t count_lock;
206 unsigned long short_timeouts;
207 unsigned long long_timeouts;
208 unsigned long timeout_restarts;
210 unsigned long interrupts;
211 unsigned long attentions;
212 unsigned long flag_fetches;
213 unsigned long hosed_count;
214 unsigned long complete_transactions;
215 unsigned long events;
216 unsigned long watchdog_pretimeouts;
217 unsigned long incoming_messages;
219 struct task_struct *thread;
221 struct list_head link;
224 #define SI_MAX_PARMS 4
226 static int force_kipmid[SI_MAX_PARMS];
227 static int num_force_kipmid;
229 static int unload_when_empty = 1;
231 static int try_smi_init(struct smi_info *smi);
232 static void cleanup_one_si(struct smi_info *to_clean);
234 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
235 static int register_xaction_notifier(struct notifier_block * nb)
237 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
240 static void deliver_recv_msg(struct smi_info *smi_info,
241 struct ipmi_smi_msg *msg)
243 /* Deliver the message to the upper layer with the lock
245 spin_unlock(&(smi_info->si_lock));
246 ipmi_smi_msg_received(smi_info->intf, msg);
247 spin_lock(&(smi_info->si_lock));
250 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
252 struct ipmi_smi_msg *msg = smi_info->curr_msg;
254 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
255 cCode = IPMI_ERR_UNSPECIFIED;
256 /* else use it as is */
258 /* Make it a reponse */
259 msg->rsp[0] = msg->data[0] | 4;
260 msg->rsp[1] = msg->data[1];
264 smi_info->curr_msg = NULL;
265 deliver_recv_msg(smi_info, msg);
268 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
271 struct list_head *entry = NULL;
276 /* No need to save flags, we aleady have interrupts off and we
277 already hold the SMI lock. */
278 spin_lock(&(smi_info->msg_lock));
280 /* Pick the high priority queue first. */
281 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
282 entry = smi_info->hp_xmit_msgs.next;
283 } else if (!list_empty(&(smi_info->xmit_msgs))) {
284 entry = smi_info->xmit_msgs.next;
288 smi_info->curr_msg = NULL;
294 smi_info->curr_msg = list_entry(entry,
299 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
301 err = atomic_notifier_call_chain(&xaction_notifier_list,
303 if (err & NOTIFY_STOP_MASK) {
304 rv = SI_SM_CALL_WITHOUT_DELAY;
307 err = smi_info->handlers->start_transaction(
309 smi_info->curr_msg->data,
310 smi_info->curr_msg->data_size);
312 return_hosed_msg(smi_info, err);
315 rv = SI_SM_CALL_WITHOUT_DELAY;
318 spin_unlock(&(smi_info->msg_lock));
323 static void start_enable_irq(struct smi_info *smi_info)
325 unsigned char msg[2];
327 /* If we are enabling interrupts, we have to tell the
329 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
330 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
332 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
333 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
336 static void start_clear_flags(struct smi_info *smi_info)
338 unsigned char msg[3];
340 /* Make sure the watchdog pre-timeout flag is not set at startup. */
341 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
342 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
343 msg[2] = WDT_PRE_TIMEOUT_INT;
345 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
346 smi_info->si_state = SI_CLEARING_FLAGS;
349 /* When we have a situtaion where we run out of memory and cannot
350 allocate messages, we just leave them in the BMC and run the system
351 polled until we can allocate some memory. Once we have some
352 memory, we will re-enable the interrupt. */
353 static inline void disable_si_irq(struct smi_info *smi_info)
355 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
356 disable_irq_nosync(smi_info->irq);
357 smi_info->interrupt_disabled = 1;
361 static inline void enable_si_irq(struct smi_info *smi_info)
363 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
364 enable_irq(smi_info->irq);
365 smi_info->interrupt_disabled = 0;
369 static void handle_flags(struct smi_info *smi_info)
372 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
373 /* Watchdog pre-timeout */
374 spin_lock(&smi_info->count_lock);
375 smi_info->watchdog_pretimeouts++;
376 spin_unlock(&smi_info->count_lock);
378 start_clear_flags(smi_info);
379 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
380 spin_unlock(&(smi_info->si_lock));
381 ipmi_smi_watchdog_pretimeout(smi_info->intf);
382 spin_lock(&(smi_info->si_lock));
383 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
384 /* Messages available. */
385 smi_info->curr_msg = ipmi_alloc_smi_msg();
386 if (!smi_info->curr_msg) {
387 disable_si_irq(smi_info);
388 smi_info->si_state = SI_NORMAL;
391 enable_si_irq(smi_info);
393 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
394 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
395 smi_info->curr_msg->data_size = 2;
397 smi_info->handlers->start_transaction(
399 smi_info->curr_msg->data,
400 smi_info->curr_msg->data_size);
401 smi_info->si_state = SI_GETTING_MESSAGES;
402 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
403 /* Events available. */
404 smi_info->curr_msg = ipmi_alloc_smi_msg();
405 if (!smi_info->curr_msg) {
406 disable_si_irq(smi_info);
407 smi_info->si_state = SI_NORMAL;
410 enable_si_irq(smi_info);
412 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
413 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
414 smi_info->curr_msg->data_size = 2;
416 smi_info->handlers->start_transaction(
418 smi_info->curr_msg->data,
419 smi_info->curr_msg->data_size);
420 smi_info->si_state = SI_GETTING_EVENTS;
421 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
422 smi_info->oem_data_avail_handler) {
423 if (smi_info->oem_data_avail_handler(smi_info))
426 smi_info->si_state = SI_NORMAL;
430 static void handle_transaction_done(struct smi_info *smi_info)
432 struct ipmi_smi_msg *msg;
437 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
439 switch (smi_info->si_state) {
441 if (!smi_info->curr_msg)
444 smi_info->curr_msg->rsp_size
445 = smi_info->handlers->get_result(
447 smi_info->curr_msg->rsp,
448 IPMI_MAX_MSG_LENGTH);
450 /* Do this here becase deliver_recv_msg() releases the
451 lock, and a new message can be put in during the
452 time the lock is released. */
453 msg = smi_info->curr_msg;
454 smi_info->curr_msg = NULL;
455 deliver_recv_msg(smi_info, msg);
458 case SI_GETTING_FLAGS:
460 unsigned char msg[4];
463 /* We got the flags from the SMI, now handle them. */
464 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
466 /* Error fetching flags, just give up for
468 smi_info->si_state = SI_NORMAL;
469 } else if (len < 4) {
470 /* Hmm, no flags. That's technically illegal, but
471 don't use uninitialized data. */
472 smi_info->si_state = SI_NORMAL;
474 smi_info->msg_flags = msg[3];
475 handle_flags(smi_info);
480 case SI_CLEARING_FLAGS:
481 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
483 unsigned char msg[3];
485 /* We cleared the flags. */
486 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
488 /* Error clearing flags */
490 "ipmi_si: Error clearing flags: %2.2x\n",
493 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
494 start_enable_irq(smi_info);
496 smi_info->si_state = SI_NORMAL;
500 case SI_GETTING_EVENTS:
502 smi_info->curr_msg->rsp_size
503 = smi_info->handlers->get_result(
505 smi_info->curr_msg->rsp,
506 IPMI_MAX_MSG_LENGTH);
508 /* Do this here becase deliver_recv_msg() releases the
509 lock, and a new message can be put in during the
510 time the lock is released. */
511 msg = smi_info->curr_msg;
512 smi_info->curr_msg = NULL;
513 if (msg->rsp[2] != 0) {
514 /* Error getting event, probably done. */
517 /* Take off the event flag. */
518 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
519 handle_flags(smi_info);
521 spin_lock(&smi_info->count_lock);
523 spin_unlock(&smi_info->count_lock);
525 /* Do this before we deliver the message
526 because delivering the message releases the
527 lock and something else can mess with the
529 handle_flags(smi_info);
531 deliver_recv_msg(smi_info, msg);
536 case SI_GETTING_MESSAGES:
538 smi_info->curr_msg->rsp_size
539 = smi_info->handlers->get_result(
541 smi_info->curr_msg->rsp,
542 IPMI_MAX_MSG_LENGTH);
544 /* Do this here becase deliver_recv_msg() releases the
545 lock, and a new message can be put in during the
546 time the lock is released. */
547 msg = smi_info->curr_msg;
548 smi_info->curr_msg = NULL;
549 if (msg->rsp[2] != 0) {
550 /* Error getting event, probably done. */
553 /* Take off the msg flag. */
554 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
555 handle_flags(smi_info);
557 spin_lock(&smi_info->count_lock);
558 smi_info->incoming_messages++;
559 spin_unlock(&smi_info->count_lock);
561 /* Do this before we deliver the message
562 because delivering the message releases the
563 lock and something else can mess with the
565 handle_flags(smi_info);
567 deliver_recv_msg(smi_info, msg);
572 case SI_ENABLE_INTERRUPTS1:
574 unsigned char msg[4];
576 /* We got the flags from the SMI, now handle them. */
577 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
580 "ipmi_si: Could not enable interrupts"
581 ", failed get, using polled mode.\n");
582 smi_info->si_state = SI_NORMAL;
584 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
585 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
586 msg[2] = msg[3] | 1; /* enable msg queue int */
587 smi_info->handlers->start_transaction(
588 smi_info->si_sm, msg, 3);
589 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
594 case SI_ENABLE_INTERRUPTS2:
596 unsigned char msg[4];
598 /* We got the flags from the SMI, now handle them. */
599 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
602 "ipmi_si: Could not enable interrupts"
603 ", failed set, using polled mode.\n");
605 smi_info->si_state = SI_NORMAL;
611 /* Called on timeouts and events. Timeouts should pass the elapsed
612 time, interrupts should pass in zero. */
613 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
616 enum si_sm_result si_sm_result;
619 /* There used to be a loop here that waited a little while
620 (around 25us) before giving up. That turned out to be
621 pointless, the minimum delays I was seeing were in the 300us
622 range, which is far too long to wait in an interrupt. So
623 we just run until the state machine tells us something
624 happened or it needs a delay. */
625 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
627 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
629 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
632 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
634 spin_lock(&smi_info->count_lock);
635 smi_info->complete_transactions++;
636 spin_unlock(&smi_info->count_lock);
638 handle_transaction_done(smi_info);
639 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
641 else if (si_sm_result == SI_SM_HOSED)
643 spin_lock(&smi_info->count_lock);
644 smi_info->hosed_count++;
645 spin_unlock(&smi_info->count_lock);
647 /* Do the before return_hosed_msg, because that
648 releases the lock. */
649 smi_info->si_state = SI_NORMAL;
650 if (smi_info->curr_msg != NULL) {
651 /* If we were handling a user message, format
652 a response to send to the upper layer to
653 tell it about the error. */
654 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
656 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
659 /* We prefer handling attn over new messages. */
660 if (si_sm_result == SI_SM_ATTN)
662 unsigned char msg[2];
664 spin_lock(&smi_info->count_lock);
665 smi_info->attentions++;
666 spin_unlock(&smi_info->count_lock);
668 /* Got a attn, send down a get message flags to see
669 what's causing it. It would be better to handle
670 this in the upper layer, but due to the way
671 interrupts work with the SMI, that's not really
673 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
674 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
676 smi_info->handlers->start_transaction(
677 smi_info->si_sm, msg, 2);
678 smi_info->si_state = SI_GETTING_FLAGS;
682 /* If we are currently idle, try to start the next message. */
683 if (si_sm_result == SI_SM_IDLE) {
684 spin_lock(&smi_info->count_lock);
686 spin_unlock(&smi_info->count_lock);
688 si_sm_result = start_next_msg(smi_info);
689 if (si_sm_result != SI_SM_IDLE)
693 if ((si_sm_result == SI_SM_IDLE)
694 && (atomic_read(&smi_info->req_events)))
696 /* We are idle and the upper layer requested that I fetch
698 atomic_set(&smi_info->req_events, 0);
700 smi_info->curr_msg = ipmi_alloc_smi_msg();
701 if (!smi_info->curr_msg)
704 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
705 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
706 smi_info->curr_msg->data_size = 2;
708 smi_info->handlers->start_transaction(
710 smi_info->curr_msg->data,
711 smi_info->curr_msg->data_size);
712 smi_info->si_state = SI_GETTING_EVENTS;
719 static void sender(void *send_info,
720 struct ipmi_smi_msg *msg,
723 struct smi_info *smi_info = send_info;
724 enum si_sm_result result;
730 if (atomic_read(&smi_info->stop_operation)) {
731 msg->rsp[0] = msg->data[0] | 4;
732 msg->rsp[1] = msg->data[1];
733 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
735 deliver_recv_msg(smi_info, msg);
739 spin_lock_irqsave(&(smi_info->msg_lock), flags);
742 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
745 if (smi_info->run_to_completion) {
746 /* If we are running to completion, then throw it in
747 the list and run transactions until everything is
748 clear. Priority doesn't matter here. */
749 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
751 /* We have to release the msg lock and claim the smi
752 lock in this case, because of race conditions. */
753 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
755 spin_lock_irqsave(&(smi_info->si_lock), flags);
756 result = smi_event_handler(smi_info, 0);
757 while (result != SI_SM_IDLE) {
758 udelay(SI_SHORT_TIMEOUT_USEC);
759 result = smi_event_handler(smi_info,
760 SI_SHORT_TIMEOUT_USEC);
762 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
766 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
768 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
771 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
773 spin_lock_irqsave(&(smi_info->si_lock), flags);
774 if ((smi_info->si_state == SI_NORMAL)
775 && (smi_info->curr_msg == NULL))
777 start_next_msg(smi_info);
779 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
782 static void set_run_to_completion(void *send_info, int i_run_to_completion)
784 struct smi_info *smi_info = send_info;
785 enum si_sm_result result;
788 spin_lock_irqsave(&(smi_info->si_lock), flags);
790 smi_info->run_to_completion = i_run_to_completion;
791 if (i_run_to_completion) {
792 result = smi_event_handler(smi_info, 0);
793 while (result != SI_SM_IDLE) {
794 udelay(SI_SHORT_TIMEOUT_USEC);
795 result = smi_event_handler(smi_info,
796 SI_SHORT_TIMEOUT_USEC);
800 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
803 static int ipmi_thread(void *data)
805 struct smi_info *smi_info = data;
807 enum si_sm_result smi_result;
809 set_user_nice(current, 19);
810 while (!kthread_should_stop()) {
811 spin_lock_irqsave(&(smi_info->si_lock), flags);
812 smi_result = smi_event_handler(smi_info, 0);
813 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
814 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
817 else if (smi_result == SI_SM_CALL_WITH_DELAY)
820 schedule_timeout_interruptible(1);
826 static void poll(void *send_info)
828 struct smi_info *smi_info = send_info;
831 * Make sure there is some delay in the poll loop so we can
832 * drive time forward and timeout things.
835 smi_event_handler(smi_info, 10);
838 static void request_events(void *send_info)
840 struct smi_info *smi_info = send_info;
842 if (atomic_read(&smi_info->stop_operation))
845 atomic_set(&smi_info->req_events, 1);
848 static int initialized;
850 static void smi_timeout(unsigned long data)
852 struct smi_info *smi_info = (struct smi_info *) data;
853 enum si_sm_result smi_result;
855 unsigned long jiffies_now;
861 if (atomic_read(&smi_info->stop_operation))
864 spin_lock_irqsave(&(smi_info->si_lock), flags);
867 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
869 jiffies_now = jiffies;
870 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
871 * SI_USEC_PER_JIFFY);
872 smi_result = smi_event_handler(smi_info, time_diff);
874 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
876 smi_info->last_timeout_jiffies = jiffies_now;
878 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
879 /* Running with interrupts, only do long timeouts. */
880 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
881 spin_lock_irqsave(&smi_info->count_lock, flags);
882 smi_info->long_timeouts++;
883 spin_unlock_irqrestore(&smi_info->count_lock, flags);
887 /* If the state machine asks for a short delay, then shorten
888 the timer timeout. */
889 if (smi_result == SI_SM_CALL_WITH_DELAY) {
890 spin_lock_irqsave(&smi_info->count_lock, flags);
891 smi_info->short_timeouts++;
892 spin_unlock_irqrestore(&smi_info->count_lock, flags);
893 smi_info->si_timer.expires = jiffies + 1;
895 spin_lock_irqsave(&smi_info->count_lock, flags);
896 smi_info->long_timeouts++;
897 spin_unlock_irqrestore(&smi_info->count_lock, flags);
898 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
902 add_timer(&(smi_info->si_timer));
905 static irqreturn_t si_irq_handler(int irq, void *data)
907 struct smi_info *smi_info = data;
913 spin_lock_irqsave(&(smi_info->si_lock), flags);
915 spin_lock(&smi_info->count_lock);
916 smi_info->interrupts++;
917 spin_unlock(&smi_info->count_lock);
919 if (atomic_read(&smi_info->stop_operation))
924 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
926 smi_event_handler(smi_info, 0);
928 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
932 static irqreturn_t si_bt_irq_handler(int irq, void *data)
934 struct smi_info *smi_info = data;
935 /* We need to clear the IRQ flag for the BT interface. */
936 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
937 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
938 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
939 return si_irq_handler(irq, data);
942 static int smi_start_processing(void *send_info,
945 struct smi_info *new_smi = send_info;
948 new_smi->intf = intf;
950 /* Set up the timer that drives the interface. */
951 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
952 new_smi->last_timeout_jiffies = jiffies;
953 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
956 * Check if the user forcefully enabled the daemon.
958 if (new_smi->intf_num < num_force_kipmid)
959 enable = force_kipmid[new_smi->intf_num];
961 * The BT interface is efficient enough to not need a thread,
962 * and there is no need for a thread if we have interrupts.
964 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
968 new_smi->thread = kthread_run(ipmi_thread, new_smi,
969 "kipmi%d", new_smi->intf_num);
970 if (IS_ERR(new_smi->thread)) {
971 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
972 " kernel thread due to error %ld, only using"
973 " timers to drive the interface\n",
974 PTR_ERR(new_smi->thread));
975 new_smi->thread = NULL;
982 static void set_maintenance_mode(void *send_info, int enable)
984 struct smi_info *smi_info = send_info;
987 atomic_set(&smi_info->req_events, 0);
990 static struct ipmi_smi_handlers handlers =
992 .owner = THIS_MODULE,
993 .start_processing = smi_start_processing,
995 .request_events = request_events,
996 .set_maintenance_mode = set_maintenance_mode,
997 .set_run_to_completion = set_run_to_completion,
1001 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1002 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
1004 static LIST_HEAD(smi_infos);
1005 static DEFINE_MUTEX(smi_infos_lock);
1006 static int smi_num; /* Used to sequence the SMIs */
1008 #define DEFAULT_REGSPACING 1
1010 static int si_trydefaults = 1;
1011 static char *si_type[SI_MAX_PARMS];
1012 #define MAX_SI_TYPE_STR 30
1013 static char si_type_str[MAX_SI_TYPE_STR];
1014 static unsigned long addrs[SI_MAX_PARMS];
1015 static int num_addrs;
1016 static unsigned int ports[SI_MAX_PARMS];
1017 static int num_ports;
1018 static int irqs[SI_MAX_PARMS];
1019 static int num_irqs;
1020 static int regspacings[SI_MAX_PARMS];
1021 static int num_regspacings;
1022 static int regsizes[SI_MAX_PARMS];
1023 static int num_regsizes;
1024 static int regshifts[SI_MAX_PARMS];
1025 static int num_regshifts;
1026 static int slave_addrs[SI_MAX_PARMS];
1027 static int num_slave_addrs;
1029 #define IPMI_IO_ADDR_SPACE 0
1030 #define IPMI_MEM_ADDR_SPACE 1
1031 static char *addr_space_to_str[] = { "i/o", "mem" };
1033 static int hotmod_handler(const char *val, struct kernel_param *kp);
1035 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1036 MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1037 " Documentation/IPMI.txt in the kernel sources for the"
1040 module_param_named(trydefaults, si_trydefaults, bool, 0);
1041 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1042 " default scan of the KCS and SMIC interface at the standard"
1044 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1045 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1046 " interface separated by commas. The types are 'kcs',"
1047 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1048 " the first interface to kcs and the second to bt");
1049 module_param_array(addrs, long, &num_addrs, 0);
1050 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1051 " addresses separated by commas. Only use if an interface"
1052 " is in memory. Otherwise, set it to zero or leave"
1054 module_param_array(ports, int, &num_ports, 0);
1055 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1056 " addresses separated by commas. Only use if an interface"
1057 " is a port. Otherwise, set it to zero or leave"
1059 module_param_array(irqs, int, &num_irqs, 0);
1060 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1061 " addresses separated by commas. Only use if an interface"
1062 " has an interrupt. Otherwise, set it to zero or leave"
1064 module_param_array(regspacings, int, &num_regspacings, 0);
1065 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1066 " and each successive register used by the interface. For"
1067 " instance, if the start address is 0xca2 and the spacing"
1068 " is 2, then the second address is at 0xca4. Defaults"
1070 module_param_array(regsizes, int, &num_regsizes, 0);
1071 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1072 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1073 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1074 " the 8-bit IPMI register has to be read from a larger"
1076 module_param_array(regshifts, int, &num_regshifts, 0);
1077 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1078 " IPMI register, in bits. For instance, if the data"
1079 " is read from a 32-bit word and the IPMI data is in"
1080 " bit 8-15, then the shift would be 8");
1081 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1082 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1083 " the controller. Normally this is 0x20, but can be"
1084 " overridden by this parm. This is an array indexed"
1085 " by interface number.");
1086 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1087 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1088 " disabled(0). Normally the IPMI driver auto-detects"
1089 " this, but the value may be overridden by this parm.");
1090 module_param(unload_when_empty, int, 0);
1091 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1092 " specified or found, default is 1. Setting to 0"
1093 " is useful for hot add of devices using hotmod.");
1096 static void std_irq_cleanup(struct smi_info *info)
1098 if (info->si_type == SI_BT)
1099 /* Disable the interrupt in the BT interface. */
1100 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1101 free_irq(info->irq, info);
1104 static int std_irq_setup(struct smi_info *info)
1111 if (info->si_type == SI_BT) {
1112 rv = request_irq(info->irq,
1118 /* Enable the interrupt in the BT interface. */
1119 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1120 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1122 rv = request_irq(info->irq,
1129 "ipmi_si: %s unable to claim interrupt %d,"
1130 " running polled\n",
1131 DEVICE_NAME, info->irq);
1134 info->irq_cleanup = std_irq_cleanup;
1135 printk(" Using irq %d\n", info->irq);
1141 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1143 unsigned int addr = io->addr_data;
1145 return inb(addr + (offset * io->regspacing));
1148 static void port_outb(struct si_sm_io *io, unsigned int offset,
1151 unsigned int addr = io->addr_data;
1153 outb(b, addr + (offset * io->regspacing));
1156 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1158 unsigned int addr = io->addr_data;
1160 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1163 static void port_outw(struct si_sm_io *io, unsigned int offset,
1166 unsigned int addr = io->addr_data;
1168 outw(b << io->regshift, addr + (offset * io->regspacing));
1171 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1173 unsigned int addr = io->addr_data;
1175 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1178 static void port_outl(struct si_sm_io *io, unsigned int offset,
1181 unsigned int addr = io->addr_data;
1183 outl(b << io->regshift, addr+(offset * io->regspacing));
1186 static void port_cleanup(struct smi_info *info)
1188 unsigned int addr = info->io.addr_data;
1192 for (idx = 0; idx < info->io_size; idx++) {
1193 release_region(addr + idx * info->io.regspacing,
1199 static int port_setup(struct smi_info *info)
1201 unsigned int addr = info->io.addr_data;
1207 info->io_cleanup = port_cleanup;
1209 /* Figure out the actual inb/inw/inl/etc routine to use based
1210 upon the register size. */
1211 switch (info->io.regsize) {
1213 info->io.inputb = port_inb;
1214 info->io.outputb = port_outb;
1217 info->io.inputb = port_inw;
1218 info->io.outputb = port_outw;
1221 info->io.inputb = port_inl;
1222 info->io.outputb = port_outl;
1225 printk("ipmi_si: Invalid register size: %d\n",
1230 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1231 * tables. This causes problems when trying to register the
1232 * entire I/O region. Therefore we must register each I/O
1235 for (idx = 0; idx < info->io_size; idx++) {
1236 if (request_region(addr + idx * info->io.regspacing,
1237 info->io.regsize, DEVICE_NAME) == NULL) {
1238 /* Undo allocations */
1240 release_region(addr + idx * info->io.regspacing,
1249 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1251 return readb((io->addr)+(offset * io->regspacing));
1254 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1257 writeb(b, (io->addr)+(offset * io->regspacing));
1260 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1262 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1266 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1269 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1272 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1274 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1278 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1281 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1285 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1287 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1291 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1294 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1298 static void mem_cleanup(struct smi_info *info)
1300 unsigned long addr = info->io.addr_data;
1303 if (info->io.addr) {
1304 iounmap(info->io.addr);
1306 mapsize = ((info->io_size * info->io.regspacing)
1307 - (info->io.regspacing - info->io.regsize));
1309 release_mem_region(addr, mapsize);
1313 static int mem_setup(struct smi_info *info)
1315 unsigned long addr = info->io.addr_data;
1321 info->io_cleanup = mem_cleanup;
1323 /* Figure out the actual readb/readw/readl/etc routine to use based
1324 upon the register size. */
1325 switch (info->io.regsize) {
1327 info->io.inputb = intf_mem_inb;
1328 info->io.outputb = intf_mem_outb;
1331 info->io.inputb = intf_mem_inw;
1332 info->io.outputb = intf_mem_outw;
1335 info->io.inputb = intf_mem_inl;
1336 info->io.outputb = intf_mem_outl;
1340 info->io.inputb = mem_inq;
1341 info->io.outputb = mem_outq;
1345 printk("ipmi_si: Invalid register size: %d\n",
1350 /* Calculate the total amount of memory to claim. This is an
1351 * unusual looking calculation, but it avoids claiming any
1352 * more memory than it has to. It will claim everything
1353 * between the first address to the end of the last full
1355 mapsize = ((info->io_size * info->io.regspacing)
1356 - (info->io.regspacing - info->io.regsize));
1358 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1361 info->io.addr = ioremap(addr, mapsize);
1362 if (info->io.addr == NULL) {
1363 release_mem_region(addr, mapsize);
1370 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1371 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1379 enum hotmod_op { HM_ADD, HM_REMOVE };
1380 struct hotmod_vals {
1384 static struct hotmod_vals hotmod_ops[] = {
1386 { "remove", HM_REMOVE },
1389 static struct hotmod_vals hotmod_si[] = {
1391 { "smic", SI_SMIC },
1395 static struct hotmod_vals hotmod_as[] = {
1396 { "mem", IPMI_MEM_ADDR_SPACE },
1397 { "i/o", IPMI_IO_ADDR_SPACE },
1401 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1406 s = strchr(*curr, ',');
1408 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1413 for (i = 0; hotmod_ops[i].name; i++) {
1414 if (strcmp(*curr, v[i].name) == 0) {
1421 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1425 static int check_hotmod_int_op(const char *curr, const char *option,
1426 const char *name, int *val)
1430 if (strcmp(curr, name) == 0) {
1432 printk(KERN_WARNING PFX
1433 "No option given for '%s'\n",
1437 *val = simple_strtoul(option, &n, 0);
1438 if ((*n != '\0') || (*option == '\0')) {
1439 printk(KERN_WARNING PFX
1440 "Bad option given for '%s'\n",
1449 static int hotmod_handler(const char *val, struct kernel_param *kp)
1451 char *str = kstrdup(val, GFP_KERNEL);
1453 char *next, *curr, *s, *n, *o;
1455 enum si_type si_type;
1465 struct smi_info *info;
1470 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1473 while ((ival >= 0) && isspace(str[ival])) {
1478 for (curr = str; curr; curr = next) {
1485 next = strchr(curr, ':');
1491 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1496 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1501 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1505 s = strchr(curr, ',');
1510 addr = simple_strtoul(curr, &n, 0);
1511 if ((*n != '\0') || (*curr == '\0')) {
1512 printk(KERN_WARNING PFX "Invalid hotmod address"
1519 s = strchr(curr, ',');
1524 o = strchr(curr, '=');
1529 rv = check_hotmod_int_op(curr, o, "rsp", ®spacing);
1534 rv = check_hotmod_int_op(curr, o, "rsi", ®size);
1539 rv = check_hotmod_int_op(curr, o, "rsh", ®shift);
1544 rv = check_hotmod_int_op(curr, o, "irq", &irq);
1549 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1556 printk(KERN_WARNING PFX
1557 "Invalid hotmod option '%s'\n",
1563 info = kzalloc(sizeof(*info), GFP_KERNEL);
1569 info->addr_source = "hotmod";
1570 info->si_type = si_type;
1571 info->io.addr_data = addr;
1572 info->io.addr_type = addr_space;
1573 if (addr_space == IPMI_MEM_ADDR_SPACE)
1574 info->io_setup = mem_setup;
1576 info->io_setup = port_setup;
1578 info->io.addr = NULL;
1579 info->io.regspacing = regspacing;
1580 if (!info->io.regspacing)
1581 info->io.regspacing = DEFAULT_REGSPACING;
1582 info->io.regsize = regsize;
1583 if (!info->io.regsize)
1584 info->io.regsize = DEFAULT_REGSPACING;
1585 info->io.regshift = regshift;
1588 info->irq_setup = std_irq_setup;
1589 info->slave_addr = ipmb;
1594 struct smi_info *e, *tmp_e;
1596 mutex_lock(&smi_infos_lock);
1597 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1598 if (e->io.addr_type != addr_space)
1600 if (e->si_type != si_type)
1602 if (e->io.addr_data == addr)
1605 mutex_unlock(&smi_infos_lock);
1614 static __devinit void hardcode_find_bmc(void)
1617 struct smi_info *info;
1619 for (i = 0; i < SI_MAX_PARMS; i++) {
1620 if (!ports[i] && !addrs[i])
1623 info = kzalloc(sizeof(*info), GFP_KERNEL);
1627 info->addr_source = "hardcoded";
1629 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1630 info->si_type = SI_KCS;
1631 } else if (strcmp(si_type[i], "smic") == 0) {
1632 info->si_type = SI_SMIC;
1633 } else if (strcmp(si_type[i], "bt") == 0) {
1634 info->si_type = SI_BT;
1637 "ipmi_si: Interface type specified "
1638 "for interface %d, was invalid: %s\n",
1646 info->io_setup = port_setup;
1647 info->io.addr_data = ports[i];
1648 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1649 } else if (addrs[i]) {
1651 info->io_setup = mem_setup;
1652 info->io.addr_data = addrs[i];
1653 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1656 "ipmi_si: Interface type specified "
1657 "for interface %d, "
1658 "but port and address were not set or "
1659 "set to zero.\n", i);
1664 info->io.addr = NULL;
1665 info->io.regspacing = regspacings[i];
1666 if (!info->io.regspacing)
1667 info->io.regspacing = DEFAULT_REGSPACING;
1668 info->io.regsize = regsizes[i];
1669 if (!info->io.regsize)
1670 info->io.regsize = DEFAULT_REGSPACING;
1671 info->io.regshift = regshifts[i];
1672 info->irq = irqs[i];
1674 info->irq_setup = std_irq_setup;
1682 #include <linux/acpi.h>
1684 /* Once we get an ACPI failure, we don't try any more, because we go
1685 through the tables sequentially. Once we don't find a table, there
1687 static int acpi_failure;
1689 /* For GPE-type interrupts. */
1690 static u32 ipmi_acpi_gpe(void *context)
1692 struct smi_info *smi_info = context;
1693 unsigned long flags;
1698 spin_lock_irqsave(&(smi_info->si_lock), flags);
1700 spin_lock(&smi_info->count_lock);
1701 smi_info->interrupts++;
1702 spin_unlock(&smi_info->count_lock);
1704 if (atomic_read(&smi_info->stop_operation))
1708 do_gettimeofday(&t);
1709 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1711 smi_event_handler(smi_info, 0);
1713 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1715 return ACPI_INTERRUPT_HANDLED;
1718 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1723 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1726 static int acpi_gpe_irq_setup(struct smi_info *info)
1733 /* FIXME - is level triggered right? */
1734 status = acpi_install_gpe_handler(NULL,
1736 ACPI_GPE_LEVEL_TRIGGERED,
1739 if (status != AE_OK) {
1741 "ipmi_si: %s unable to claim ACPI GPE %d,"
1742 " running polled\n",
1743 DEVICE_NAME, info->irq);
1747 info->irq_cleanup = acpi_gpe_irq_cleanup;
1748 printk(" Using ACPI GPE %d\n", info->irq);
1755 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1766 s8 CreatorRevision[4];
1769 s16 SpecificationRevision;
1772 * Bit 0 - SCI interrupt supported
1773 * Bit 1 - I/O APIC/SAPIC
1777 /* If bit 0 of InterruptType is set, then this is the SCI
1778 interrupt in the GPEx_STS register. */
1783 /* If bit 1 of InterruptType is set, then this is the I/O
1784 APIC/SAPIC interrupt. */
1785 u32 GlobalSystemInterrupt;
1787 /* The actual register address. */
1788 struct acpi_generic_address addr;
1792 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1795 static __devinit int try_init_acpi(struct SPMITable *spmi)
1797 struct smi_info *info;
1800 if (spmi->IPMIlegacy != 1) {
1801 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1805 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1806 addr_space = IPMI_MEM_ADDR_SPACE;
1808 addr_space = IPMI_IO_ADDR_SPACE;
1810 info = kzalloc(sizeof(*info), GFP_KERNEL);
1812 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1816 info->addr_source = "ACPI";
1818 /* Figure out the interface type. */
1819 switch (spmi->InterfaceType)
1822 info->si_type = SI_KCS;
1825 info->si_type = SI_SMIC;
1828 info->si_type = SI_BT;
1831 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1832 spmi->InterfaceType);
1837 if (spmi->InterruptType & 1) {
1838 /* We've got a GPE interrupt. */
1839 info->irq = spmi->GPE;
1840 info->irq_setup = acpi_gpe_irq_setup;
1841 } else if (spmi->InterruptType & 2) {
1842 /* We've got an APIC/SAPIC interrupt. */
1843 info->irq = spmi->GlobalSystemInterrupt;
1844 info->irq_setup = std_irq_setup;
1846 /* Use the default interrupt setting. */
1848 info->irq_setup = NULL;
1851 if (spmi->addr.register_bit_width) {
1852 /* A (hopefully) properly formed register bit width. */
1853 info->io.regspacing = spmi->addr.register_bit_width / 8;
1855 info->io.regspacing = DEFAULT_REGSPACING;
1857 info->io.regsize = info->io.regspacing;
1858 info->io.regshift = spmi->addr.register_bit_offset;
1860 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1861 info->io_setup = mem_setup;
1862 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1863 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1864 info->io_setup = port_setup;
1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1868 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1871 info->io.addr_data = spmi->addr.address;
1878 static __devinit void acpi_find_bmc(void)
1881 struct SPMITable *spmi;
1890 for (i = 0; ; i++) {
1891 status = acpi_get_firmware_table("SPMI", i+1,
1892 ACPI_LOGICAL_ADDRESSING,
1893 (struct acpi_table_header **)
1895 if (status != AE_OK)
1898 try_init_acpi(spmi);
1904 struct dmi_ipmi_data
1908 unsigned long base_addr;
1914 static int __devinit decode_dmi(struct dmi_header *dm,
1915 struct dmi_ipmi_data *dmi)
1917 u8 *data = (u8 *)dm;
1918 unsigned long base_addr;
1920 u8 len = dm->length;
1922 dmi->type = data[4];
1924 memcpy(&base_addr, data+8, sizeof(unsigned long));
1926 if (base_addr & 1) {
1928 base_addr &= 0xFFFE;
1929 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1933 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1935 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1937 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1939 dmi->irq = data[0x11];
1941 /* The top two bits of byte 0x10 hold the register spacing. */
1942 reg_spacing = (data[0x10] & 0xC0) >> 6;
1943 switch(reg_spacing){
1944 case 0x00: /* Byte boundaries */
1947 case 0x01: /* 32-bit boundaries */
1950 case 0x02: /* 16-byte boundaries */
1954 /* Some other interface, just ignore it. */
1959 /* Note that technically, the lower bit of the base
1960 * address should be 1 if the address is I/O and 0 if
1961 * the address is in memory. So many systems get that
1962 * wrong (and all that I have seen are I/O) so we just
1963 * ignore that bit and assume I/O. Systems that use
1964 * memory should use the newer spec, anyway. */
1965 dmi->base_addr = base_addr & 0xfffe;
1966 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1970 dmi->slave_addr = data[6];
1975 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1977 struct smi_info *info;
1979 info = kzalloc(sizeof(*info), GFP_KERNEL);
1982 "ipmi_si: Could not allocate SI data\n");
1986 info->addr_source = "SMBIOS";
1988 switch (ipmi_data->type) {
1989 case 0x01: /* KCS */
1990 info->si_type = SI_KCS;
1992 case 0x02: /* SMIC */
1993 info->si_type = SI_SMIC;
1996 info->si_type = SI_BT;
2002 switch (ipmi_data->addr_space) {
2003 case IPMI_MEM_ADDR_SPACE:
2004 info->io_setup = mem_setup;
2005 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2008 case IPMI_IO_ADDR_SPACE:
2009 info->io_setup = port_setup;
2010 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2016 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2017 ipmi_data->addr_space);
2020 info->io.addr_data = ipmi_data->base_addr;
2022 info->io.regspacing = ipmi_data->offset;
2023 if (!info->io.regspacing)
2024 info->io.regspacing = DEFAULT_REGSPACING;
2025 info->io.regsize = DEFAULT_REGSPACING;
2026 info->io.regshift = 0;
2028 info->slave_addr = ipmi_data->slave_addr;
2030 info->irq = ipmi_data->irq;
2032 info->irq_setup = std_irq_setup;
2037 static void __devinit dmi_find_bmc(void)
2039 struct dmi_device *dev = NULL;
2040 struct dmi_ipmi_data data;
2043 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2044 memset(&data, 0, sizeof(data));
2045 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2047 try_init_dmi(&data);
2050 #endif /* CONFIG_DMI */
2054 #define PCI_ERMC_CLASSCODE 0x0C0700
2055 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2056 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2057 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2058 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2059 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2061 #define PCI_HP_VENDOR_ID 0x103C
2062 #define PCI_MMC_DEVICE_ID 0x121A
2063 #define PCI_MMC_ADDR_CW 0x10
2065 static void ipmi_pci_cleanup(struct smi_info *info)
2067 struct pci_dev *pdev = info->addr_source_data;
2069 pci_disable_device(pdev);
2072 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2073 const struct pci_device_id *ent)
2076 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2077 struct smi_info *info;
2078 int first_reg_offset = 0;
2080 info = kzalloc(sizeof(*info), GFP_KERNEL);
2084 info->addr_source = "PCI";
2086 switch (class_type) {
2087 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2088 info->si_type = SI_SMIC;
2091 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2092 info->si_type = SI_KCS;
2095 case PCI_ERMC_CLASSCODE_TYPE_BT:
2096 info->si_type = SI_BT;
2101 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2102 pci_name(pdev), class_type);
2106 rv = pci_enable_device(pdev);
2108 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2114 info->addr_source_cleanup = ipmi_pci_cleanup;
2115 info->addr_source_data = pdev;
2117 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2118 first_reg_offset = 1;
2120 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2121 info->io_setup = port_setup;
2122 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2124 info->io_setup = mem_setup;
2125 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2127 info->io.addr_data = pci_resource_start(pdev, 0);
2129 info->io.regspacing = DEFAULT_REGSPACING;
2130 info->io.regsize = DEFAULT_REGSPACING;
2131 info->io.regshift = 0;
2133 info->irq = pdev->irq;
2135 info->irq_setup = std_irq_setup;
2137 info->dev = &pdev->dev;
2139 return try_smi_init(info);
2142 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2147 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2152 static int ipmi_pci_resume(struct pci_dev *pdev)
2158 static struct pci_device_id ipmi_pci_devices[] = {
2159 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2160 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2162 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2164 static struct pci_driver ipmi_pci_driver = {
2165 .name = DEVICE_NAME,
2166 .id_table = ipmi_pci_devices,
2167 .probe = ipmi_pci_probe,
2168 .remove = __devexit_p(ipmi_pci_remove),
2170 .suspend = ipmi_pci_suspend,
2171 .resume = ipmi_pci_resume,
2174 #endif /* CONFIG_PCI */
2177 static int try_get_dev_id(struct smi_info *smi_info)
2179 unsigned char msg[2];
2180 unsigned char *resp;
2181 unsigned long resp_len;
2182 enum si_sm_result smi_result;
2185 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2189 /* Do a Get Device ID command, since it comes back with some
2191 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2192 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2193 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2195 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2198 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2199 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2200 schedule_timeout_uninterruptible(1);
2201 smi_result = smi_info->handlers->event(
2202 smi_info->si_sm, 100);
2204 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2206 smi_result = smi_info->handlers->event(
2207 smi_info->si_sm, 0);
2212 if (smi_result == SI_SM_HOSED) {
2213 /* We couldn't get the state machine to run, so whatever's at
2214 the port is probably not an IPMI SMI interface. */
2219 /* Otherwise, we got some data. */
2220 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2221 resp, IPMI_MAX_MSG_LENGTH);
2222 if (resp_len < 14) {
2223 /* That's odd, it should be longer. */
2228 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2229 /* That's odd, it shouldn't be able to fail. */
2234 /* Record info from the get device id, in case we need it. */
2235 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2242 static int type_file_read_proc(char *page, char **start, off_t off,
2243 int count, int *eof, void *data)
2245 struct smi_info *smi = data;
2247 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2250 static int stat_file_read_proc(char *page, char **start, off_t off,
2251 int count, int *eof, void *data)
2253 char *out = (char *) page;
2254 struct smi_info *smi = data;
2256 out += sprintf(out, "interrupts_enabled: %d\n",
2257 smi->irq && !smi->interrupt_disabled);
2258 out += sprintf(out, "short_timeouts: %ld\n",
2259 smi->short_timeouts);
2260 out += sprintf(out, "long_timeouts: %ld\n",
2261 smi->long_timeouts);
2262 out += sprintf(out, "timeout_restarts: %ld\n",
2263 smi->timeout_restarts);
2264 out += sprintf(out, "idles: %ld\n",
2266 out += sprintf(out, "interrupts: %ld\n",
2268 out += sprintf(out, "attentions: %ld\n",
2270 out += sprintf(out, "flag_fetches: %ld\n",
2272 out += sprintf(out, "hosed_count: %ld\n",
2274 out += sprintf(out, "complete_transactions: %ld\n",
2275 smi->complete_transactions);
2276 out += sprintf(out, "events: %ld\n",
2278 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2279 smi->watchdog_pretimeouts);
2280 out += sprintf(out, "incoming_messages: %ld\n",
2281 smi->incoming_messages);
2286 static int param_read_proc(char *page, char **start, off_t off,
2287 int count, int *eof, void *data)
2289 struct smi_info *smi = data;
2291 return sprintf(page,
2292 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2293 si_to_str[smi->si_type],
2294 addr_space_to_str[smi->io.addr_type],
2304 * oem_data_avail_to_receive_msg_avail
2305 * @info - smi_info structure with msg_flags set
2307 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2308 * Returns 1 indicating need to re-run handle_flags().
2310 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2312 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2318 * setup_dell_poweredge_oem_data_handler
2319 * @info - smi_info.device_id must be populated
2321 * Systems that match, but have firmware version < 1.40 may assert
2322 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2323 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2324 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2325 * as RECEIVE_MSG_AVAIL instead.
2327 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2328 * assert the OEM[012] bits, and if it did, the driver would have to
2329 * change to handle that properly, we don't actually check for the
2331 * Device ID = 0x20 BMC on PowerEdge 8G servers
2332 * Device Revision = 0x80
2333 * Firmware Revision1 = 0x01 BMC version 1.40
2334 * Firmware Revision2 = 0x40 BCD encoded
2335 * IPMI Version = 0x51 IPMI 1.5
2336 * Manufacturer ID = A2 02 00 Dell IANA
2338 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2339 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2342 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2343 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2344 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2345 #define DELL_IANA_MFR_ID 0x0002a2
2346 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2348 struct ipmi_device_id *id = &smi_info->device_id;
2349 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2350 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2351 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2352 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2353 smi_info->oem_data_avail_handler =
2354 oem_data_avail_to_receive_msg_avail;
2356 else if (ipmi_version_major(id) < 1 ||
2357 (ipmi_version_major(id) == 1 &&
2358 ipmi_version_minor(id) < 5)) {
2359 smi_info->oem_data_avail_handler =
2360 oem_data_avail_to_receive_msg_avail;
2365 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2366 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2368 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2370 /* Make it a reponse */
2371 msg->rsp[0] = msg->data[0] | 4;
2372 msg->rsp[1] = msg->data[1];
2373 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2375 smi_info->curr_msg = NULL;
2376 deliver_recv_msg(smi_info, msg);
2380 * dell_poweredge_bt_xaction_handler
2381 * @info - smi_info.device_id must be populated
2383 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2384 * not respond to a Get SDR command if the length of the data
2385 * requested is exactly 0x3A, which leads to command timeouts and no
2386 * data returned. This intercepts such commands, and causes userspace
2387 * callers to try again with a different-sized buffer, which succeeds.
2390 #define STORAGE_NETFN 0x0A
2391 #define STORAGE_CMD_GET_SDR 0x23
2392 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2393 unsigned long unused,
2396 struct smi_info *smi_info = in;
2397 unsigned char *data = smi_info->curr_msg->data;
2398 unsigned int size = smi_info->curr_msg->data_size;
2400 (data[0]>>2) == STORAGE_NETFN &&
2401 data[1] == STORAGE_CMD_GET_SDR &&
2403 return_hosed_msg_badsize(smi_info);
2409 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2410 .notifier_call = dell_poweredge_bt_xaction_handler,
2414 * setup_dell_poweredge_bt_xaction_handler
2415 * @info - smi_info.device_id must be filled in already
2417 * Fills in smi_info.device_id.start_transaction_pre_hook
2418 * when we know what function to use there.
2421 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2423 struct ipmi_device_id *id = &smi_info->device_id;
2424 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2425 smi_info->si_type == SI_BT)
2426 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2430 * setup_oem_data_handler
2431 * @info - smi_info.device_id must be filled in already
2433 * Fills in smi_info.device_id.oem_data_available_handler
2434 * when we know what function to use there.
2437 static void setup_oem_data_handler(struct smi_info *smi_info)
2439 setup_dell_poweredge_oem_data_handler(smi_info);
2442 static void setup_xaction_handlers(struct smi_info *smi_info)
2444 setup_dell_poweredge_bt_xaction_handler(smi_info);
2447 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2449 if (smi_info->intf) {
2450 /* The timer and thread are only running if the
2451 interface has been started up and registered. */
2452 if (smi_info->thread != NULL)
2453 kthread_stop(smi_info->thread);
2454 del_timer_sync(&smi_info->si_timer);
2458 static __devinitdata struct ipmi_default_vals
2464 { .type = SI_KCS, .port = 0xca2 },
2465 { .type = SI_SMIC, .port = 0xca9 },
2466 { .type = SI_BT, .port = 0xe4 },
2470 static __devinit void default_find_bmc(void)
2472 struct smi_info *info;
2475 for (i = 0; ; i++) {
2476 if (!ipmi_defaults[i].port)
2479 info = kzalloc(sizeof(*info), GFP_KERNEL);
2483 info->addr_source = NULL;
2485 info->si_type = ipmi_defaults[i].type;
2486 info->io_setup = port_setup;
2487 info->io.addr_data = ipmi_defaults[i].port;
2488 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2490 info->io.addr = NULL;
2491 info->io.regspacing = DEFAULT_REGSPACING;
2492 info->io.regsize = DEFAULT_REGSPACING;
2493 info->io.regshift = 0;
2495 if (try_smi_init(info) == 0) {
2497 printk(KERN_INFO "ipmi_si: Found default %s state"
2498 " machine at %s address 0x%lx\n",
2499 si_to_str[info->si_type],
2500 addr_space_to_str[info->io.addr_type],
2501 info->io.addr_data);
2507 static int is_new_interface(struct smi_info *info)
2511 list_for_each_entry(e, &smi_infos, link) {
2512 if (e->io.addr_type != info->io.addr_type)
2514 if (e->io.addr_data == info->io.addr_data)
2521 static int try_smi_init(struct smi_info *new_smi)
2525 if (new_smi->addr_source) {
2526 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2527 " machine at %s address 0x%lx, slave address 0x%x,"
2529 new_smi->addr_source,
2530 si_to_str[new_smi->si_type],
2531 addr_space_to_str[new_smi->io.addr_type],
2532 new_smi->io.addr_data,
2533 new_smi->slave_addr, new_smi->irq);
2536 mutex_lock(&smi_infos_lock);
2537 if (!is_new_interface(new_smi)) {
2538 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2543 /* So we know not to free it unless we have allocated one. */
2544 new_smi->intf = NULL;
2545 new_smi->si_sm = NULL;
2546 new_smi->handlers = NULL;
2548 switch (new_smi->si_type) {
2550 new_smi->handlers = &kcs_smi_handlers;
2554 new_smi->handlers = &smic_smi_handlers;
2558 new_smi->handlers = &bt_smi_handlers;
2562 /* No support for anything else yet. */
2567 /* Allocate the state machine's data and initialize it. */
2568 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2569 if (!new_smi->si_sm) {
2570 printk(" Could not allocate state machine memory\n");
2574 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2577 /* Now that we know the I/O size, we can set up the I/O. */
2578 rv = new_smi->io_setup(new_smi);
2580 printk(" Could not set up I/O space\n");
2584 spin_lock_init(&(new_smi->si_lock));
2585 spin_lock_init(&(new_smi->msg_lock));
2586 spin_lock_init(&(new_smi->count_lock));
2588 /* Do low-level detection first. */
2589 if (new_smi->handlers->detect(new_smi->si_sm)) {
2590 if (new_smi->addr_source)
2591 printk(KERN_INFO "ipmi_si: Interface detection"
2597 /* Attempt a get device id command. If it fails, we probably
2598 don't have a BMC here. */
2599 rv = try_get_dev_id(new_smi);
2601 if (new_smi->addr_source)
2602 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2603 " at this location\n");
2607 setup_oem_data_handler(new_smi);
2608 setup_xaction_handlers(new_smi);
2610 /* Try to claim any interrupts. */
2611 if (new_smi->irq_setup)
2612 new_smi->irq_setup(new_smi);
2614 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2615 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2616 new_smi->curr_msg = NULL;
2617 atomic_set(&new_smi->req_events, 0);
2618 new_smi->run_to_completion = 0;
2620 new_smi->interrupt_disabled = 0;
2621 atomic_set(&new_smi->stop_operation, 0);
2622 new_smi->intf_num = smi_num;
2625 /* Start clearing the flags before we enable interrupts or the
2626 timer to avoid racing with the timer. */
2627 start_clear_flags(new_smi);
2628 /* IRQ is defined to be set when non-zero. */
2630 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2632 if (!new_smi->dev) {
2633 /* If we don't already have a device from something
2634 * else (like PCI), then register a new one. */
2635 new_smi->pdev = platform_device_alloc("ipmi_si",
2640 " Unable to allocate platform device\n");
2643 new_smi->dev = &new_smi->pdev->dev;
2644 new_smi->dev->driver = &ipmi_driver;
2646 rv = platform_device_add(new_smi->pdev);
2650 " Unable to register system interface device:"
2655 new_smi->dev_registered = 1;
2658 rv = ipmi_register_smi(&handlers,
2660 &new_smi->device_id,
2663 new_smi->slave_addr);
2666 "ipmi_si: Unable to register device: error %d\n",
2668 goto out_err_stop_timer;
2671 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2672 type_file_read_proc, NULL,
2673 new_smi, THIS_MODULE);
2676 "ipmi_si: Unable to create proc entry: %d\n",
2678 goto out_err_stop_timer;
2681 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2682 stat_file_read_proc, NULL,
2683 new_smi, THIS_MODULE);
2686 "ipmi_si: Unable to create proc entry: %d\n",
2688 goto out_err_stop_timer;
2691 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2692 param_read_proc, NULL,
2693 new_smi, THIS_MODULE);
2696 "ipmi_si: Unable to create proc entry: %d\n",
2698 goto out_err_stop_timer;
2701 list_add_tail(&new_smi->link, &smi_infos);
2703 mutex_unlock(&smi_infos_lock);
2705 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2710 atomic_inc(&new_smi->stop_operation);
2711 wait_for_timer_and_thread(new_smi);
2715 ipmi_unregister_smi(new_smi->intf);
2717 if (new_smi->irq_cleanup)
2718 new_smi->irq_cleanup(new_smi);
2720 /* Wait until we know that we are out of any interrupt
2721 handlers might have been running before we freed the
2723 synchronize_sched();
2725 if (new_smi->si_sm) {
2726 if (new_smi->handlers)
2727 new_smi->handlers->cleanup(new_smi->si_sm);
2728 kfree(new_smi->si_sm);
2730 if (new_smi->addr_source_cleanup)
2731 new_smi->addr_source_cleanup(new_smi);
2732 if (new_smi->io_cleanup)
2733 new_smi->io_cleanup(new_smi);
2735 if (new_smi->dev_registered)
2736 platform_device_unregister(new_smi->pdev);
2740 mutex_unlock(&smi_infos_lock);
2745 static __devinit int init_ipmi_si(void)
2755 /* Register the device drivers. */
2756 rv = driver_register(&ipmi_driver);
2759 "init_ipmi_si: Unable to register driver: %d\n",
2765 /* Parse out the si_type string into its components. */
2768 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2770 str = strchr(str, ',');
2780 printk(KERN_INFO "IPMI System Interface driver.\n");
2782 hardcode_find_bmc();
2793 rv = pci_register_driver(&ipmi_pci_driver);
2796 "init_ipmi_si: Unable to register PCI driver: %d\n",
2801 if (si_trydefaults) {
2802 mutex_lock(&smi_infos_lock);
2803 if (list_empty(&smi_infos)) {
2804 /* No BMC was found, try defaults. */
2805 mutex_unlock(&smi_infos_lock);
2808 mutex_unlock(&smi_infos_lock);
2812 mutex_lock(&smi_infos_lock);
2813 if (unload_when_empty && list_empty(&smi_infos)) {
2814 mutex_unlock(&smi_infos_lock);
2816 pci_unregister_driver(&ipmi_pci_driver);
2818 driver_unregister(&ipmi_driver);
2819 printk("ipmi_si: Unable to find any System Interface(s)\n");
2822 mutex_unlock(&smi_infos_lock);
2826 module_init(init_ipmi_si);
2828 static void cleanup_one_si(struct smi_info *to_clean)
2831 unsigned long flags;
2836 list_del(&to_clean->link);
2838 /* Tell the timer and interrupt handlers that we are shutting
2840 spin_lock_irqsave(&(to_clean->si_lock), flags);
2841 spin_lock(&(to_clean->msg_lock));
2843 atomic_inc(&to_clean->stop_operation);
2845 if (to_clean->irq_cleanup)
2846 to_clean->irq_cleanup(to_clean);
2848 spin_unlock(&(to_clean->msg_lock));
2849 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2851 /* Wait until we know that we are out of any interrupt
2852 handlers might have been running before we freed the
2854 synchronize_sched();
2856 wait_for_timer_and_thread(to_clean);
2858 /* Interrupts and timeouts are stopped, now make sure the
2859 interface is in a clean state. */
2860 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2862 schedule_timeout_uninterruptible(1);
2865 rv = ipmi_unregister_smi(to_clean->intf);
2868 "ipmi_si: Unable to unregister device: errno=%d\n",
2872 to_clean->handlers->cleanup(to_clean->si_sm);
2874 kfree(to_clean->si_sm);
2876 if (to_clean->addr_source_cleanup)
2877 to_clean->addr_source_cleanup(to_clean);
2878 if (to_clean->io_cleanup)
2879 to_clean->io_cleanup(to_clean);
2881 if (to_clean->dev_registered)
2882 platform_device_unregister(to_clean->pdev);
2887 static __exit void cleanup_ipmi_si(void)
2889 struct smi_info *e, *tmp_e;
2895 pci_unregister_driver(&ipmi_pci_driver);
2898 mutex_lock(&smi_infos_lock);
2899 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2901 mutex_unlock(&smi_infos_lock);
2903 driver_unregister(&ipmi_driver);
2905 module_exit(cleanup_ipmi_si);
2907 MODULE_LICENSE("GPL");
2908 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2909 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");