header cleaning: don't include smp_lock.h when not used
[linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67
68 #ifdef CONFIG_PPC_OF
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
71 #endif
72
73 #define PFX "ipmi_si: "
74
75 /* Measure times between events in the driver. */
76 #undef DEBUG_TIMING
77
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC    10000
80 #define SI_USEC_PER_JIFFY       (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
83                                        short timeout */
84
85 /* Bit for BMC global enables. */
86 #define IPMI_BMC_RCV_MSG_INTR     0x01
87 #define IPMI_BMC_EVT_MSG_INTR     0x02
88 #define IPMI_BMC_EVT_MSG_BUFF     0x04
89 #define IPMI_BMC_SYS_LOG          0x08
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2,
100         SI_DISABLE_INTERRUPTS1,
101         SI_DISABLE_INTERRUPTS2
102         /* FIXME - add watchdog stuff. */
103 };
104
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG             2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
109
110 enum si_type {
111     SI_KCS, SI_SMIC, SI_BT
112 };
113 static char *si_to_str[] = { "kcs", "smic", "bt" };
114
115 #define DEVICE_NAME "ipmi_si"
116
117 static struct device_driver ipmi_driver =
118 {
119         .name = DEVICE_NAME,
120         .bus = &platform_bus_type
121 };
122
123 struct smi_info
124 {
125         int                    intf_num;
126         ipmi_smi_t             intf;
127         struct si_sm_data      *si_sm;
128         struct si_sm_handlers  *handlers;
129         enum si_type           si_type;
130         spinlock_t             si_lock;
131         spinlock_t             msg_lock;
132         struct list_head       xmit_msgs;
133         struct list_head       hp_xmit_msgs;
134         struct ipmi_smi_msg    *curr_msg;
135         enum si_intf_state     si_state;
136
137         /* Used to handle the various types of I/O that can occur with
138            IPMI */
139         struct si_sm_io io;
140         int (*io_setup)(struct smi_info *info);
141         void (*io_cleanup)(struct smi_info *info);
142         int (*irq_setup)(struct smi_info *info);
143         void (*irq_cleanup)(struct smi_info *info);
144         unsigned int io_size;
145         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146         void (*addr_source_cleanup)(struct smi_info *info);
147         void *addr_source_data;
148
149         /* Per-OEM handler, called from handle_flags().
150            Returns 1 when handle_flags() needs to be re-run
151            or 0 indicating it set si_state itself.
152         */
153         int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156            is set to hold the flags until we are done handling everything
157            from the flags. */
158 #define RECEIVE_MSG_AVAIL       0x01
159 #define EVENT_MSG_BUFFER_FULL   0x02
160 #define WDT_PRE_TIMEOUT_INT     0x08
161 #define OEM0_DATA_AVAIL     0x20
162 #define OEM1_DATA_AVAIL     0x40
163 #define OEM2_DATA_AVAIL     0x80
164 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
165                              OEM1_DATA_AVAIL | \
166                              OEM2_DATA_AVAIL)
167         unsigned char       msg_flags;
168
169         /* If set to true, this will request events the next time the
170            state machine is idle. */
171         atomic_t            req_events;
172
173         /* If true, run the state machine to completion on every send
174            call.  Generally used after a panic to make sure stuff goes
175            out. */
176         int                 run_to_completion;
177
178         /* The I/O port of an SI interface. */
179         int                 port;
180
181         /* The space between start addresses of the two ports.  For
182            instance, if the first port is 0xca2 and the spacing is 4, then
183            the second port is 0xca6. */
184         unsigned int        spacing;
185
186         /* zero if no irq; */
187         int                 irq;
188
189         /* The timer for this si. */
190         struct timer_list   si_timer;
191
192         /* The time (in jiffies) the last timeout occurred at. */
193         unsigned long       last_timeout_jiffies;
194
195         /* Used to gracefully stop the timer without race conditions. */
196         atomic_t            stop_operation;
197
198         /* The driver will disable interrupts when it gets into a
199            situation where it cannot handle messages due to lack of
200            memory.  Once that situation clears up, it will re-enable
201            interrupts. */
202         int interrupt_disabled;
203
204         /* From the get device id response... */
205         struct ipmi_device_id device_id;
206
207         /* Driver model stuff. */
208         struct device *dev;
209         struct platform_device *pdev;
210
211          /* True if we allocated the device, false if it came from
212           * someplace else (like PCI). */
213         int dev_registered;
214
215         /* Slave address, could be reported from DMI. */
216         unsigned char slave_addr;
217
218         /* Counters and things for the proc filesystem. */
219         spinlock_t count_lock;
220         unsigned long short_timeouts;
221         unsigned long long_timeouts;
222         unsigned long timeout_restarts;
223         unsigned long idles;
224         unsigned long interrupts;
225         unsigned long attentions;
226         unsigned long flag_fetches;
227         unsigned long hosed_count;
228         unsigned long complete_transactions;
229         unsigned long events;
230         unsigned long watchdog_pretimeouts;
231         unsigned long incoming_messages;
232
233         struct task_struct *thread;
234
235         struct list_head link;
236 };
237
238 #define SI_MAX_PARMS 4
239
240 static int force_kipmid[SI_MAX_PARMS];
241 static int num_force_kipmid;
242
243 static int unload_when_empty = 1;
244
245 static int try_smi_init(struct smi_info *smi);
246 static void cleanup_one_si(struct smi_info *to_clean);
247
248 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
249 static int register_xaction_notifier(struct notifier_block * nb)
250 {
251         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
252 }
253
254 static void deliver_recv_msg(struct smi_info *smi_info,
255                              struct ipmi_smi_msg *msg)
256 {
257         /* Deliver the message to the upper layer with the lock
258            released. */
259         spin_unlock(&(smi_info->si_lock));
260         ipmi_smi_msg_received(smi_info->intf, msg);
261         spin_lock(&(smi_info->si_lock));
262 }
263
264 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
265 {
266         struct ipmi_smi_msg *msg = smi_info->curr_msg;
267
268         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
269                 cCode = IPMI_ERR_UNSPECIFIED;
270         /* else use it as is */
271
272         /* Make it a reponse */
273         msg->rsp[0] = msg->data[0] | 4;
274         msg->rsp[1] = msg->data[1];
275         msg->rsp[2] = cCode;
276         msg->rsp_size = 3;
277
278         smi_info->curr_msg = NULL;
279         deliver_recv_msg(smi_info, msg);
280 }
281
282 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
283 {
284         int              rv;
285         struct list_head *entry = NULL;
286 #ifdef DEBUG_TIMING
287         struct timeval t;
288 #endif
289
290         /* No need to save flags, we aleady have interrupts off and we
291            already hold the SMI lock. */
292         spin_lock(&(smi_info->msg_lock));
293
294         /* Pick the high priority queue first. */
295         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
296                 entry = smi_info->hp_xmit_msgs.next;
297         } else if (!list_empty(&(smi_info->xmit_msgs))) {
298                 entry = smi_info->xmit_msgs.next;
299         }
300
301         if (!entry) {
302                 smi_info->curr_msg = NULL;
303                 rv = SI_SM_IDLE;
304         } else {
305                 int err;
306
307                 list_del(entry);
308                 smi_info->curr_msg = list_entry(entry,
309                                                 struct ipmi_smi_msg,
310                                                 link);
311 #ifdef DEBUG_TIMING
312                 do_gettimeofday(&t);
313                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314 #endif
315                 err = atomic_notifier_call_chain(&xaction_notifier_list,
316                                 0, smi_info);
317                 if (err & NOTIFY_STOP_MASK) {
318                         rv = SI_SM_CALL_WITHOUT_DELAY;
319                         goto out;
320                 }
321                 err = smi_info->handlers->start_transaction(
322                         smi_info->si_sm,
323                         smi_info->curr_msg->data,
324                         smi_info->curr_msg->data_size);
325                 if (err) {
326                         return_hosed_msg(smi_info, err);
327                 }
328
329                 rv = SI_SM_CALL_WITHOUT_DELAY;
330         }
331         out:
332         spin_unlock(&(smi_info->msg_lock));
333
334         return rv;
335 }
336
337 static void start_enable_irq(struct smi_info *smi_info)
338 {
339         unsigned char msg[2];
340
341         /* If we are enabling interrupts, we have to tell the
342            BMC to use them. */
343         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345
346         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
347         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
348 }
349
350 static void start_disable_irq(struct smi_info *smi_info)
351 {
352         unsigned char msg[2];
353
354         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
355         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
356
357         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
358         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
359 }
360
361 static void start_clear_flags(struct smi_info *smi_info)
362 {
363         unsigned char msg[3];
364
365         /* Make sure the watchdog pre-timeout flag is not set at startup. */
366         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
367         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
368         msg[2] = WDT_PRE_TIMEOUT_INT;
369
370         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
371         smi_info->si_state = SI_CLEARING_FLAGS;
372 }
373
374 /* When we have a situtaion where we run out of memory and cannot
375    allocate messages, we just leave them in the BMC and run the system
376    polled until we can allocate some memory.  Once we have some
377    memory, we will re-enable the interrupt. */
378 static inline void disable_si_irq(struct smi_info *smi_info)
379 {
380         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
381                 start_disable_irq(smi_info);
382                 smi_info->interrupt_disabled = 1;
383         }
384 }
385
386 static inline void enable_si_irq(struct smi_info *smi_info)
387 {
388         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
389                 start_enable_irq(smi_info);
390                 smi_info->interrupt_disabled = 0;
391         }
392 }
393
394 static void handle_flags(struct smi_info *smi_info)
395 {
396  retry:
397         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398                 /* Watchdog pre-timeout */
399                 spin_lock(&smi_info->count_lock);
400                 smi_info->watchdog_pretimeouts++;
401                 spin_unlock(&smi_info->count_lock);
402
403                 start_clear_flags(smi_info);
404                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
405                 spin_unlock(&(smi_info->si_lock));
406                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
407                 spin_lock(&(smi_info->si_lock));
408         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
409                 /* Messages available. */
410                 smi_info->curr_msg = ipmi_alloc_smi_msg();
411                 if (!smi_info->curr_msg) {
412                         disable_si_irq(smi_info);
413                         smi_info->si_state = SI_NORMAL;
414                         return;
415                 }
416                 enable_si_irq(smi_info);
417
418                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
420                 smi_info->curr_msg->data_size = 2;
421
422                 smi_info->handlers->start_transaction(
423                         smi_info->si_sm,
424                         smi_info->curr_msg->data,
425                         smi_info->curr_msg->data_size);
426                 smi_info->si_state = SI_GETTING_MESSAGES;
427         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
428                 /* Events available. */
429                 smi_info->curr_msg = ipmi_alloc_smi_msg();
430                 if (!smi_info->curr_msg) {
431                         disable_si_irq(smi_info);
432                         smi_info->si_state = SI_NORMAL;
433                         return;
434                 }
435                 enable_si_irq(smi_info);
436
437                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
438                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
439                 smi_info->curr_msg->data_size = 2;
440
441                 smi_info->handlers->start_transaction(
442                         smi_info->si_sm,
443                         smi_info->curr_msg->data,
444                         smi_info->curr_msg->data_size);
445                 smi_info->si_state = SI_GETTING_EVENTS;
446         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447                    smi_info->oem_data_avail_handler) {
448                 if (smi_info->oem_data_avail_handler(smi_info))
449                         goto retry;
450         } else {
451                 smi_info->si_state = SI_NORMAL;
452         }
453 }
454
455 static void handle_transaction_done(struct smi_info *smi_info)
456 {
457         struct ipmi_smi_msg *msg;
458 #ifdef DEBUG_TIMING
459         struct timeval t;
460
461         do_gettimeofday(&t);
462         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463 #endif
464         switch (smi_info->si_state) {
465         case SI_NORMAL:
466                 if (!smi_info->curr_msg)
467                         break;
468
469                 smi_info->curr_msg->rsp_size
470                         = smi_info->handlers->get_result(
471                                 smi_info->si_sm,
472                                 smi_info->curr_msg->rsp,
473                                 IPMI_MAX_MSG_LENGTH);
474
475                 /* Do this here becase deliver_recv_msg() releases the
476                    lock, and a new message can be put in during the
477                    time the lock is released. */
478                 msg = smi_info->curr_msg;
479                 smi_info->curr_msg = NULL;
480                 deliver_recv_msg(smi_info, msg);
481                 break;
482
483         case SI_GETTING_FLAGS:
484         {
485                 unsigned char msg[4];
486                 unsigned int  len;
487
488                 /* We got the flags from the SMI, now handle them. */
489                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490                 if (msg[2] != 0) {
491                         /* Error fetching flags, just give up for
492                            now. */
493                         smi_info->si_state = SI_NORMAL;
494                 } else if (len < 4) {
495                         /* Hmm, no flags.  That's technically illegal, but
496                            don't use uninitialized data. */
497                         smi_info->si_state = SI_NORMAL;
498                 } else {
499                         smi_info->msg_flags = msg[3];
500                         handle_flags(smi_info);
501                 }
502                 break;
503         }
504
505         case SI_CLEARING_FLAGS:
506         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
507         {
508                 unsigned char msg[3];
509
510                 /* We cleared the flags. */
511                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
512                 if (msg[2] != 0) {
513                         /* Error clearing flags */
514                         printk(KERN_WARNING
515                                "ipmi_si: Error clearing flags: %2.2x\n",
516                                msg[2]);
517                 }
518                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
519                         start_enable_irq(smi_info);
520                 else
521                         smi_info->si_state = SI_NORMAL;
522                 break;
523         }
524
525         case SI_GETTING_EVENTS:
526         {
527                 smi_info->curr_msg->rsp_size
528                         = smi_info->handlers->get_result(
529                                 smi_info->si_sm,
530                                 smi_info->curr_msg->rsp,
531                                 IPMI_MAX_MSG_LENGTH);
532
533                 /* Do this here becase deliver_recv_msg() releases the
534                    lock, and a new message can be put in during the
535                    time the lock is released. */
536                 msg = smi_info->curr_msg;
537                 smi_info->curr_msg = NULL;
538                 if (msg->rsp[2] != 0) {
539                         /* Error getting event, probably done. */
540                         msg->done(msg);
541
542                         /* Take off the event flag. */
543                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544                         handle_flags(smi_info);
545                 } else {
546                         spin_lock(&smi_info->count_lock);
547                         smi_info->events++;
548                         spin_unlock(&smi_info->count_lock);
549
550                         /* Do this before we deliver the message
551                            because delivering the message releases the
552                            lock and something else can mess with the
553                            state. */
554                         handle_flags(smi_info);
555
556                         deliver_recv_msg(smi_info, msg);
557                 }
558                 break;
559         }
560
561         case SI_GETTING_MESSAGES:
562         {
563                 smi_info->curr_msg->rsp_size
564                         = smi_info->handlers->get_result(
565                                 smi_info->si_sm,
566                                 smi_info->curr_msg->rsp,
567                                 IPMI_MAX_MSG_LENGTH);
568
569                 /* Do this here becase deliver_recv_msg() releases the
570                    lock, and a new message can be put in during the
571                    time the lock is released. */
572                 msg = smi_info->curr_msg;
573                 smi_info->curr_msg = NULL;
574                 if (msg->rsp[2] != 0) {
575                         /* Error getting event, probably done. */
576                         msg->done(msg);
577
578                         /* Take off the msg flag. */
579                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580                         handle_flags(smi_info);
581                 } else {
582                         spin_lock(&smi_info->count_lock);
583                         smi_info->incoming_messages++;
584                         spin_unlock(&smi_info->count_lock);
585
586                         /* Do this before we deliver the message
587                            because delivering the message releases the
588                            lock and something else can mess with the
589                            state. */
590                         handle_flags(smi_info);
591
592                         deliver_recv_msg(smi_info, msg);
593                 }
594                 break;
595         }
596
597         case SI_ENABLE_INTERRUPTS1:
598         {
599                 unsigned char msg[4];
600
601                 /* We got the flags from the SMI, now handle them. */
602                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603                 if (msg[2] != 0) {
604                         printk(KERN_WARNING
605                                "ipmi_si: Could not enable interrupts"
606                                ", failed get, using polled mode.\n");
607                         smi_info->si_state = SI_NORMAL;
608                 } else {
609                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
610                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
611                         msg[2] = (msg[3] |
612                                   IPMI_BMC_RCV_MSG_INTR |
613                                   IPMI_BMC_EVT_MSG_INTR);
614                         smi_info->handlers->start_transaction(
615                                 smi_info->si_sm, msg, 3);
616                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
617                 }
618                 break;
619         }
620
621         case SI_ENABLE_INTERRUPTS2:
622         {
623                 unsigned char msg[4];
624
625                 /* We got the flags from the SMI, now handle them. */
626                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
627                 if (msg[2] != 0) {
628                         printk(KERN_WARNING
629                                "ipmi_si: Could not enable interrupts"
630                                ", failed set, using polled mode.\n");
631                 }
632                 smi_info->si_state = SI_NORMAL;
633                 break;
634         }
635
636         case SI_DISABLE_INTERRUPTS1:
637         {
638                 unsigned char msg[4];
639
640                 /* We got the flags from the SMI, now handle them. */
641                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
642                 if (msg[2] != 0) {
643                         printk(KERN_WARNING
644                                "ipmi_si: Could not disable interrupts"
645                                ", failed get.\n");
646                         smi_info->si_state = SI_NORMAL;
647                 } else {
648                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
649                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
650                         msg[2] = (msg[3] &
651                                   ~(IPMI_BMC_RCV_MSG_INTR |
652                                     IPMI_BMC_EVT_MSG_INTR));
653                         smi_info->handlers->start_transaction(
654                                 smi_info->si_sm, msg, 3);
655                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
656                 }
657                 break;
658         }
659
660         case SI_DISABLE_INTERRUPTS2:
661         {
662                 unsigned char msg[4];
663
664                 /* We got the flags from the SMI, now handle them. */
665                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
666                 if (msg[2] != 0) {
667                         printk(KERN_WARNING
668                                "ipmi_si: Could not disable interrupts"
669                                ", failed set.\n");
670                 }
671                 smi_info->si_state = SI_NORMAL;
672                 break;
673         }
674         }
675 }
676
677 /* Called on timeouts and events.  Timeouts should pass the elapsed
678    time, interrupts should pass in zero. */
679 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
680                                            int time)
681 {
682         enum si_sm_result si_sm_result;
683
684  restart:
685         /* There used to be a loop here that waited a little while
686            (around 25us) before giving up.  That turned out to be
687            pointless, the minimum delays I was seeing were in the 300us
688            range, which is far too long to wait in an interrupt.  So
689            we just run until the state machine tells us something
690            happened or it needs a delay. */
691         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
692         time = 0;
693         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
694         {
695                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
696         }
697
698         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
699         {
700                 spin_lock(&smi_info->count_lock);
701                 smi_info->complete_transactions++;
702                 spin_unlock(&smi_info->count_lock);
703
704                 handle_transaction_done(smi_info);
705                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
706         }
707         else if (si_sm_result == SI_SM_HOSED)
708         {
709                 spin_lock(&smi_info->count_lock);
710                 smi_info->hosed_count++;
711                 spin_unlock(&smi_info->count_lock);
712
713                 /* Do the before return_hosed_msg, because that
714                    releases the lock. */
715                 smi_info->si_state = SI_NORMAL;
716                 if (smi_info->curr_msg != NULL) {
717                         /* If we were handling a user message, format
718                            a response to send to the upper layer to
719                            tell it about the error. */
720                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
721                 }
722                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
723         }
724
725         /* We prefer handling attn over new messages. */
726         if (si_sm_result == SI_SM_ATTN)
727         {
728                 unsigned char msg[2];
729
730                 spin_lock(&smi_info->count_lock);
731                 smi_info->attentions++;
732                 spin_unlock(&smi_info->count_lock);
733
734                 /* Got a attn, send down a get message flags to see
735                    what's causing it.  It would be better to handle
736                    this in the upper layer, but due to the way
737                    interrupts work with the SMI, that's not really
738                    possible. */
739                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
740                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
741
742                 smi_info->handlers->start_transaction(
743                         smi_info->si_sm, msg, 2);
744                 smi_info->si_state = SI_GETTING_FLAGS;
745                 goto restart;
746         }
747
748         /* If we are currently idle, try to start the next message. */
749         if (si_sm_result == SI_SM_IDLE) {
750                 spin_lock(&smi_info->count_lock);
751                 smi_info->idles++;
752                 spin_unlock(&smi_info->count_lock);
753
754                 si_sm_result = start_next_msg(smi_info);
755                 if (si_sm_result != SI_SM_IDLE)
756                         goto restart;
757         }
758
759         if ((si_sm_result == SI_SM_IDLE)
760             && (atomic_read(&smi_info->req_events)))
761         {
762                 /* We are idle and the upper layer requested that I fetch
763                    events, so do so. */
764                 atomic_set(&smi_info->req_events, 0);
765
766                 smi_info->curr_msg = ipmi_alloc_smi_msg();
767                 if (!smi_info->curr_msg)
768                         goto out;
769
770                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
771                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
772                 smi_info->curr_msg->data_size = 2;
773
774                 smi_info->handlers->start_transaction(
775                         smi_info->si_sm,
776                         smi_info->curr_msg->data,
777                         smi_info->curr_msg->data_size);
778                 smi_info->si_state = SI_GETTING_EVENTS;
779                 goto restart;
780         }
781  out:
782         return si_sm_result;
783 }
784
785 static void sender(void                *send_info,
786                    struct ipmi_smi_msg *msg,
787                    int                 priority)
788 {
789         struct smi_info   *smi_info = send_info;
790         enum si_sm_result result;
791         unsigned long     flags;
792 #ifdef DEBUG_TIMING
793         struct timeval    t;
794 #endif
795
796         if (atomic_read(&smi_info->stop_operation)) {
797                 msg->rsp[0] = msg->data[0] | 4;
798                 msg->rsp[1] = msg->data[1];
799                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
800                 msg->rsp_size = 3;
801                 deliver_recv_msg(smi_info, msg);
802                 return;
803         }
804
805         spin_lock_irqsave(&(smi_info->msg_lock), flags);
806 #ifdef DEBUG_TIMING
807         do_gettimeofday(&t);
808         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
809 #endif
810
811         if (smi_info->run_to_completion) {
812                 /* If we are running to completion, then throw it in
813                    the list and run transactions until everything is
814                    clear.  Priority doesn't matter here. */
815                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
816
817                 /* We have to release the msg lock and claim the smi
818                    lock in this case, because of race conditions. */
819                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
820
821                 spin_lock_irqsave(&(smi_info->si_lock), flags);
822                 result = smi_event_handler(smi_info, 0);
823                 while (result != SI_SM_IDLE) {
824                         udelay(SI_SHORT_TIMEOUT_USEC);
825                         result = smi_event_handler(smi_info,
826                                                    SI_SHORT_TIMEOUT_USEC);
827                 }
828                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
829                 return;
830         } else {
831                 if (priority > 0) {
832                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
833                 } else {
834                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
835                 }
836         }
837         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
838
839         spin_lock_irqsave(&(smi_info->si_lock), flags);
840         if ((smi_info->si_state == SI_NORMAL)
841             && (smi_info->curr_msg == NULL))
842         {
843                 start_next_msg(smi_info);
844         }
845         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
846 }
847
848 static void set_run_to_completion(void *send_info, int i_run_to_completion)
849 {
850         struct smi_info   *smi_info = send_info;
851         enum si_sm_result result;
852         unsigned long     flags;
853
854         spin_lock_irqsave(&(smi_info->si_lock), flags);
855
856         smi_info->run_to_completion = i_run_to_completion;
857         if (i_run_to_completion) {
858                 result = smi_event_handler(smi_info, 0);
859                 while (result != SI_SM_IDLE) {
860                         udelay(SI_SHORT_TIMEOUT_USEC);
861                         result = smi_event_handler(smi_info,
862                                                    SI_SHORT_TIMEOUT_USEC);
863                 }
864         }
865
866         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
867 }
868
869 static int ipmi_thread(void *data)
870 {
871         struct smi_info *smi_info = data;
872         unsigned long flags;
873         enum si_sm_result smi_result;
874
875         set_user_nice(current, 19);
876         while (!kthread_should_stop()) {
877                 spin_lock_irqsave(&(smi_info->si_lock), flags);
878                 smi_result = smi_event_handler(smi_info, 0);
879                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
880                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
881                         /* do nothing */
882                 }
883                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
884                         schedule();
885                 else
886                         schedule_timeout_interruptible(1);
887         }
888         return 0;
889 }
890
891
892 static void poll(void *send_info)
893 {
894         struct smi_info *smi_info = send_info;
895
896         /*
897          * Make sure there is some delay in the poll loop so we can
898          * drive time forward and timeout things.
899          */
900         udelay(10);
901         smi_event_handler(smi_info, 10);
902 }
903
904 static void request_events(void *send_info)
905 {
906         struct smi_info *smi_info = send_info;
907
908         if (atomic_read(&smi_info->stop_operation))
909                 return;
910
911         atomic_set(&smi_info->req_events, 1);
912 }
913
914 static int initialized;
915
916 static void smi_timeout(unsigned long data)
917 {
918         struct smi_info   *smi_info = (struct smi_info *) data;
919         enum si_sm_result smi_result;
920         unsigned long     flags;
921         unsigned long     jiffies_now;
922         long              time_diff;
923 #ifdef DEBUG_TIMING
924         struct timeval    t;
925 #endif
926
927         spin_lock_irqsave(&(smi_info->si_lock), flags);
928 #ifdef DEBUG_TIMING
929         do_gettimeofday(&t);
930         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
931 #endif
932         jiffies_now = jiffies;
933         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
934                      * SI_USEC_PER_JIFFY);
935         smi_result = smi_event_handler(smi_info, time_diff);
936
937         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
938
939         smi_info->last_timeout_jiffies = jiffies_now;
940
941         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
942                 /* Running with interrupts, only do long timeouts. */
943                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
944                 spin_lock_irqsave(&smi_info->count_lock, flags);
945                 smi_info->long_timeouts++;
946                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
947                 goto do_add_timer;
948         }
949
950         /* If the state machine asks for a short delay, then shorten
951            the timer timeout. */
952         if (smi_result == SI_SM_CALL_WITH_DELAY) {
953                 spin_lock_irqsave(&smi_info->count_lock, flags);
954                 smi_info->short_timeouts++;
955                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
956                 smi_info->si_timer.expires = jiffies + 1;
957         } else {
958                 spin_lock_irqsave(&smi_info->count_lock, flags);
959                 smi_info->long_timeouts++;
960                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
961                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
962         }
963
964  do_add_timer:
965         add_timer(&(smi_info->si_timer));
966 }
967
968 static irqreturn_t si_irq_handler(int irq, void *data)
969 {
970         struct smi_info *smi_info = data;
971         unsigned long   flags;
972 #ifdef DEBUG_TIMING
973         struct timeval  t;
974 #endif
975
976         spin_lock_irqsave(&(smi_info->si_lock), flags);
977
978         spin_lock(&smi_info->count_lock);
979         smi_info->interrupts++;
980         spin_unlock(&smi_info->count_lock);
981
982 #ifdef DEBUG_TIMING
983         do_gettimeofday(&t);
984         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
985 #endif
986         smi_event_handler(smi_info, 0);
987         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
988         return IRQ_HANDLED;
989 }
990
991 static irqreturn_t si_bt_irq_handler(int irq, void *data)
992 {
993         struct smi_info *smi_info = data;
994         /* We need to clear the IRQ flag for the BT interface. */
995         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
996                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
997                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
998         return si_irq_handler(irq, data);
999 }
1000
1001 static int smi_start_processing(void       *send_info,
1002                                 ipmi_smi_t intf)
1003 {
1004         struct smi_info *new_smi = send_info;
1005         int             enable = 0;
1006
1007         new_smi->intf = intf;
1008
1009         /* Set up the timer that drives the interface. */
1010         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1011         new_smi->last_timeout_jiffies = jiffies;
1012         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1013
1014         /*
1015          * Check if the user forcefully enabled the daemon.
1016          */
1017         if (new_smi->intf_num < num_force_kipmid)
1018                 enable = force_kipmid[new_smi->intf_num];
1019         /*
1020          * The BT interface is efficient enough to not need a thread,
1021          * and there is no need for a thread if we have interrupts.
1022          */
1023         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1024                 enable = 1;
1025
1026         if (enable) {
1027                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1028                                               "kipmi%d", new_smi->intf_num);
1029                 if (IS_ERR(new_smi->thread)) {
1030                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1031                                " kernel thread due to error %ld, only using"
1032                                " timers to drive the interface\n",
1033                                PTR_ERR(new_smi->thread));
1034                         new_smi->thread = NULL;
1035                 }
1036         }
1037
1038         return 0;
1039 }
1040
1041 static void set_maintenance_mode(void *send_info, int enable)
1042 {
1043         struct smi_info   *smi_info = send_info;
1044
1045         if (!enable)
1046                 atomic_set(&smi_info->req_events, 0);
1047 }
1048
1049 static struct ipmi_smi_handlers handlers =
1050 {
1051         .owner                  = THIS_MODULE,
1052         .start_processing       = smi_start_processing,
1053         .sender                 = sender,
1054         .request_events         = request_events,
1055         .set_maintenance_mode   = set_maintenance_mode,
1056         .set_run_to_completion  = set_run_to_completion,
1057         .poll                   = poll,
1058 };
1059
1060 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1061    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1062
1063 static LIST_HEAD(smi_infos);
1064 static DEFINE_MUTEX(smi_infos_lock);
1065 static int smi_num; /* Used to sequence the SMIs */
1066
1067 #define DEFAULT_REGSPACING      1
1068 #define DEFAULT_REGSIZE         1
1069
1070 static int           si_trydefaults = 1;
1071 static char          *si_type[SI_MAX_PARMS];
1072 #define MAX_SI_TYPE_STR 30
1073 static char          si_type_str[MAX_SI_TYPE_STR];
1074 static unsigned long addrs[SI_MAX_PARMS];
1075 static int num_addrs;
1076 static unsigned int  ports[SI_MAX_PARMS];
1077 static int num_ports;
1078 static int           irqs[SI_MAX_PARMS];
1079 static int num_irqs;
1080 static int           regspacings[SI_MAX_PARMS];
1081 static int num_regspacings;
1082 static int           regsizes[SI_MAX_PARMS];
1083 static int num_regsizes;
1084 static int           regshifts[SI_MAX_PARMS];
1085 static int num_regshifts;
1086 static int slave_addrs[SI_MAX_PARMS];
1087 static int num_slave_addrs;
1088
1089 #define IPMI_IO_ADDR_SPACE  0
1090 #define IPMI_MEM_ADDR_SPACE 1
1091 static char *addr_space_to_str[] = { "i/o", "mem" };
1092
1093 static int hotmod_handler(const char *val, struct kernel_param *kp);
1094
1095 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1096 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1097                  " Documentation/IPMI.txt in the kernel sources for the"
1098                  " gory details.");
1099
1100 module_param_named(trydefaults, si_trydefaults, bool, 0);
1101 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1102                  " default scan of the KCS and SMIC interface at the standard"
1103                  " address");
1104 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1105 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1106                  " interface separated by commas.  The types are 'kcs',"
1107                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1108                  " the first interface to kcs and the second to bt");
1109 module_param_array(addrs, long, &num_addrs, 0);
1110 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1111                  " addresses separated by commas.  Only use if an interface"
1112                  " is in memory.  Otherwise, set it to zero or leave"
1113                  " it blank.");
1114 module_param_array(ports, int, &num_ports, 0);
1115 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1116                  " addresses separated by commas.  Only use if an interface"
1117                  " is a port.  Otherwise, set it to zero or leave"
1118                  " it blank.");
1119 module_param_array(irqs, int, &num_irqs, 0);
1120 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1121                  " addresses separated by commas.  Only use if an interface"
1122                  " has an interrupt.  Otherwise, set it to zero or leave"
1123                  " it blank.");
1124 module_param_array(regspacings, int, &num_regspacings, 0);
1125 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1126                  " and each successive register used by the interface.  For"
1127                  " instance, if the start address is 0xca2 and the spacing"
1128                  " is 2, then the second address is at 0xca4.  Defaults"
1129                  " to 1.");
1130 module_param_array(regsizes, int, &num_regsizes, 0);
1131 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1132                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1133                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1134                  " the 8-bit IPMI register has to be read from a larger"
1135                  " register.");
1136 module_param_array(regshifts, int, &num_regshifts, 0);
1137 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1138                  " IPMI register, in bits.  For instance, if the data"
1139                  " is read from a 32-bit word and the IPMI data is in"
1140                  " bit 8-15, then the shift would be 8");
1141 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1142 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1143                  " the controller.  Normally this is 0x20, but can be"
1144                  " overridden by this parm.  This is an array indexed"
1145                  " by interface number.");
1146 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1147 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1148                  " disabled(0).  Normally the IPMI driver auto-detects"
1149                  " this, but the value may be overridden by this parm.");
1150 module_param(unload_when_empty, int, 0);
1151 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1152                  " specified or found, default is 1.  Setting to 0"
1153                  " is useful for hot add of devices using hotmod.");
1154
1155
1156 static void std_irq_cleanup(struct smi_info *info)
1157 {
1158         if (info->si_type == SI_BT)
1159                 /* Disable the interrupt in the BT interface. */
1160                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1161         free_irq(info->irq, info);
1162 }
1163
1164 static int std_irq_setup(struct smi_info *info)
1165 {
1166         int rv;
1167
1168         if (!info->irq)
1169                 return 0;
1170
1171         if (info->si_type == SI_BT) {
1172                 rv = request_irq(info->irq,
1173                                  si_bt_irq_handler,
1174                                  IRQF_SHARED | IRQF_DISABLED,
1175                                  DEVICE_NAME,
1176                                  info);
1177                 if (!rv)
1178                         /* Enable the interrupt in the BT interface. */
1179                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1180                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1181         } else
1182                 rv = request_irq(info->irq,
1183                                  si_irq_handler,
1184                                  IRQF_SHARED | IRQF_DISABLED,
1185                                  DEVICE_NAME,
1186                                  info);
1187         if (rv) {
1188                 printk(KERN_WARNING
1189                        "ipmi_si: %s unable to claim interrupt %d,"
1190                        " running polled\n",
1191                        DEVICE_NAME, info->irq);
1192                 info->irq = 0;
1193         } else {
1194                 info->irq_cleanup = std_irq_cleanup;
1195                 printk("  Using irq %d\n", info->irq);
1196         }
1197
1198         return rv;
1199 }
1200
1201 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1202 {
1203         unsigned int addr = io->addr_data;
1204
1205         return inb(addr + (offset * io->regspacing));
1206 }
1207
1208 static void port_outb(struct si_sm_io *io, unsigned int offset,
1209                       unsigned char b)
1210 {
1211         unsigned int addr = io->addr_data;
1212
1213         outb(b, addr + (offset * io->regspacing));
1214 }
1215
1216 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1217 {
1218         unsigned int addr = io->addr_data;
1219
1220         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1221 }
1222
1223 static void port_outw(struct si_sm_io *io, unsigned int offset,
1224                       unsigned char b)
1225 {
1226         unsigned int addr = io->addr_data;
1227
1228         outw(b << io->regshift, addr + (offset * io->regspacing));
1229 }
1230
1231 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1232 {
1233         unsigned int addr = io->addr_data;
1234
1235         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1236 }
1237
1238 static void port_outl(struct si_sm_io *io, unsigned int offset,
1239                       unsigned char b)
1240 {
1241         unsigned int addr = io->addr_data;
1242
1243         outl(b << io->regshift, addr+(offset * io->regspacing));
1244 }
1245
1246 static void port_cleanup(struct smi_info *info)
1247 {
1248         unsigned int addr = info->io.addr_data;
1249         int          idx;
1250
1251         if (addr) {
1252                 for (idx = 0; idx < info->io_size; idx++) {
1253                         release_region(addr + idx * info->io.regspacing,
1254                                        info->io.regsize);
1255                 }
1256         }
1257 }
1258
1259 static int port_setup(struct smi_info *info)
1260 {
1261         unsigned int addr = info->io.addr_data;
1262         int          idx;
1263
1264         if (!addr)
1265                 return -ENODEV;
1266
1267         info->io_cleanup = port_cleanup;
1268
1269         /* Figure out the actual inb/inw/inl/etc routine to use based
1270            upon the register size. */
1271         switch (info->io.regsize) {
1272         case 1:
1273                 info->io.inputb = port_inb;
1274                 info->io.outputb = port_outb;
1275                 break;
1276         case 2:
1277                 info->io.inputb = port_inw;
1278                 info->io.outputb = port_outw;
1279                 break;
1280         case 4:
1281                 info->io.inputb = port_inl;
1282                 info->io.outputb = port_outl;
1283                 break;
1284         default:
1285                 printk("ipmi_si: Invalid register size: %d\n",
1286                        info->io.regsize);
1287                 return -EINVAL;
1288         }
1289
1290         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1291          * tables.  This causes problems when trying to register the
1292          * entire I/O region.  Therefore we must register each I/O
1293          * port separately.
1294          */
1295         for (idx = 0; idx < info->io_size; idx++) {
1296                 if (request_region(addr + idx * info->io.regspacing,
1297                                    info->io.regsize, DEVICE_NAME) == NULL) {
1298                         /* Undo allocations */
1299                         while (idx--) {
1300                                 release_region(addr + idx * info->io.regspacing,
1301                                                info->io.regsize);
1302                         }
1303                         return -EIO;
1304                 }
1305         }
1306         return 0;
1307 }
1308
1309 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1310 {
1311         return readb((io->addr)+(offset * io->regspacing));
1312 }
1313
1314 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1315                      unsigned char b)
1316 {
1317         writeb(b, (io->addr)+(offset * io->regspacing));
1318 }
1319
1320 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1321 {
1322         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1323                 & 0xff;
1324 }
1325
1326 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1327                      unsigned char b)
1328 {
1329         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1330 }
1331
1332 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1333 {
1334         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1335                 & 0xff;
1336 }
1337
1338 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1339                      unsigned char b)
1340 {
1341         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1342 }
1343
1344 #ifdef readq
1345 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1346 {
1347         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1348                 & 0xff;
1349 }
1350
1351 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1352                      unsigned char b)
1353 {
1354         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1355 }
1356 #endif
1357
1358 static void mem_cleanup(struct smi_info *info)
1359 {
1360         unsigned long addr = info->io.addr_data;
1361         int           mapsize;
1362
1363         if (info->io.addr) {
1364                 iounmap(info->io.addr);
1365
1366                 mapsize = ((info->io_size * info->io.regspacing)
1367                            - (info->io.regspacing - info->io.regsize));
1368
1369                 release_mem_region(addr, mapsize);
1370         }
1371 }
1372
1373 static int mem_setup(struct smi_info *info)
1374 {
1375         unsigned long addr = info->io.addr_data;
1376         int           mapsize;
1377
1378         if (!addr)
1379                 return -ENODEV;
1380
1381         info->io_cleanup = mem_cleanup;
1382
1383         /* Figure out the actual readb/readw/readl/etc routine to use based
1384            upon the register size. */
1385         switch (info->io.regsize) {
1386         case 1:
1387                 info->io.inputb = intf_mem_inb;
1388                 info->io.outputb = intf_mem_outb;
1389                 break;
1390         case 2:
1391                 info->io.inputb = intf_mem_inw;
1392                 info->io.outputb = intf_mem_outw;
1393                 break;
1394         case 4:
1395                 info->io.inputb = intf_mem_inl;
1396                 info->io.outputb = intf_mem_outl;
1397                 break;
1398 #ifdef readq
1399         case 8:
1400                 info->io.inputb = mem_inq;
1401                 info->io.outputb = mem_outq;
1402                 break;
1403 #endif
1404         default:
1405                 printk("ipmi_si: Invalid register size: %d\n",
1406                        info->io.regsize);
1407                 return -EINVAL;
1408         }
1409
1410         /* Calculate the total amount of memory to claim.  This is an
1411          * unusual looking calculation, but it avoids claiming any
1412          * more memory than it has to.  It will claim everything
1413          * between the first address to the end of the last full
1414          * register. */
1415         mapsize = ((info->io_size * info->io.regspacing)
1416                    - (info->io.regspacing - info->io.regsize));
1417
1418         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1419                 return -EIO;
1420
1421         info->io.addr = ioremap(addr, mapsize);
1422         if (info->io.addr == NULL) {
1423                 release_mem_region(addr, mapsize);
1424                 return -EIO;
1425         }
1426         return 0;
1427 }
1428
1429 /*
1430  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1431  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1432  * Options are:
1433  *   rsp=<regspacing>
1434  *   rsi=<regsize>
1435  *   rsh=<regshift>
1436  *   irq=<irq>
1437  *   ipmb=<ipmb addr>
1438  */
1439 enum hotmod_op { HM_ADD, HM_REMOVE };
1440 struct hotmod_vals {
1441         char *name;
1442         int  val;
1443 };
1444 static struct hotmod_vals hotmod_ops[] = {
1445         { "add",        HM_ADD },
1446         { "remove",     HM_REMOVE },
1447         { NULL }
1448 };
1449 static struct hotmod_vals hotmod_si[] = {
1450         { "kcs",        SI_KCS },
1451         { "smic",       SI_SMIC },
1452         { "bt",         SI_BT },
1453         { NULL }
1454 };
1455 static struct hotmod_vals hotmod_as[] = {
1456         { "mem",        IPMI_MEM_ADDR_SPACE },
1457         { "i/o",        IPMI_IO_ADDR_SPACE },
1458         { NULL }
1459 };
1460
1461 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1462 {
1463         char *s;
1464         int  i;
1465
1466         s = strchr(*curr, ',');
1467         if (!s) {
1468                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1469                 return -EINVAL;
1470         }
1471         *s = '\0';
1472         s++;
1473         for (i = 0; hotmod_ops[i].name; i++) {
1474                 if (strcmp(*curr, v[i].name) == 0) {
1475                         *val = v[i].val;
1476                         *curr = s;
1477                         return 0;
1478                 }
1479         }
1480
1481         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1482         return -EINVAL;
1483 }
1484
1485 static int check_hotmod_int_op(const char *curr, const char *option,
1486                                const char *name, int *val)
1487 {
1488         char *n;
1489
1490         if (strcmp(curr, name) == 0) {
1491                 if (!option) {
1492                         printk(KERN_WARNING PFX
1493                                "No option given for '%s'\n",
1494                                curr);
1495                         return -EINVAL;
1496                 }
1497                 *val = simple_strtoul(option, &n, 0);
1498                 if ((*n != '\0') || (*option == '\0')) {
1499                         printk(KERN_WARNING PFX
1500                                "Bad option given for '%s'\n",
1501                                curr);
1502                         return -EINVAL;
1503                 }
1504                 return 1;
1505         }
1506         return 0;
1507 }
1508
1509 static int hotmod_handler(const char *val, struct kernel_param *kp)
1510 {
1511         char *str = kstrdup(val, GFP_KERNEL);
1512         int  rv;
1513         char *next, *curr, *s, *n, *o;
1514         enum hotmod_op op;
1515         enum si_type si_type;
1516         int  addr_space;
1517         unsigned long addr;
1518         int regspacing;
1519         int regsize;
1520         int regshift;
1521         int irq;
1522         int ipmb;
1523         int ival;
1524         int len;
1525         struct smi_info *info;
1526
1527         if (!str)
1528                 return -ENOMEM;
1529
1530         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1531         len = strlen(str);
1532         ival = len - 1;
1533         while ((ival >= 0) && isspace(str[ival])) {
1534                 str[ival] = '\0';
1535                 ival--;
1536         }
1537
1538         for (curr = str; curr; curr = next) {
1539                 regspacing = 1;
1540                 regsize = 1;
1541                 regshift = 0;
1542                 irq = 0;
1543                 ipmb = 0x20;
1544
1545                 next = strchr(curr, ':');
1546                 if (next) {
1547                         *next = '\0';
1548                         next++;
1549                 }
1550
1551                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1552                 if (rv)
1553                         break;
1554                 op = ival;
1555
1556                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1557                 if (rv)
1558                         break;
1559                 si_type = ival;
1560
1561                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1562                 if (rv)
1563                         break;
1564
1565                 s = strchr(curr, ',');
1566                 if (s) {
1567                         *s = '\0';
1568                         s++;
1569                 }
1570                 addr = simple_strtoul(curr, &n, 0);
1571                 if ((*n != '\0') || (*curr == '\0')) {
1572                         printk(KERN_WARNING PFX "Invalid hotmod address"
1573                                " '%s'\n", curr);
1574                         break;
1575                 }
1576
1577                 while (s) {
1578                         curr = s;
1579                         s = strchr(curr, ',');
1580                         if (s) {
1581                                 *s = '\0';
1582                                 s++;
1583                         }
1584                         o = strchr(curr, '=');
1585                         if (o) {
1586                                 *o = '\0';
1587                                 o++;
1588                         }
1589                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1590                         if (rv < 0)
1591                                 goto out;
1592                         else if (rv)
1593                                 continue;
1594                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1595                         if (rv < 0)
1596                                 goto out;
1597                         else if (rv)
1598                                 continue;
1599                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1600                         if (rv < 0)
1601                                 goto out;
1602                         else if (rv)
1603                                 continue;
1604                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1605                         if (rv < 0)
1606                                 goto out;
1607                         else if (rv)
1608                                 continue;
1609                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1610                         if (rv < 0)
1611                                 goto out;
1612                         else if (rv)
1613                                 continue;
1614
1615                         rv = -EINVAL;
1616                         printk(KERN_WARNING PFX
1617                                "Invalid hotmod option '%s'\n",
1618                                curr);
1619                         goto out;
1620                 }
1621
1622                 if (op == HM_ADD) {
1623                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1624                         if (!info) {
1625                                 rv = -ENOMEM;
1626                                 goto out;
1627                         }
1628
1629                         info->addr_source = "hotmod";
1630                         info->si_type = si_type;
1631                         info->io.addr_data = addr;
1632                         info->io.addr_type = addr_space;
1633                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1634                                 info->io_setup = mem_setup;
1635                         else
1636                                 info->io_setup = port_setup;
1637
1638                         info->io.addr = NULL;
1639                         info->io.regspacing = regspacing;
1640                         if (!info->io.regspacing)
1641                                 info->io.regspacing = DEFAULT_REGSPACING;
1642                         info->io.regsize = regsize;
1643                         if (!info->io.regsize)
1644                                 info->io.regsize = DEFAULT_REGSPACING;
1645                         info->io.regshift = regshift;
1646                         info->irq = irq;
1647                         if (info->irq)
1648                                 info->irq_setup = std_irq_setup;
1649                         info->slave_addr = ipmb;
1650
1651                         try_smi_init(info);
1652                 } else {
1653                         /* remove */
1654                         struct smi_info *e, *tmp_e;
1655
1656                         mutex_lock(&smi_infos_lock);
1657                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1658                                 if (e->io.addr_type != addr_space)
1659                                         continue;
1660                                 if (e->si_type != si_type)
1661                                         continue;
1662                                 if (e->io.addr_data == addr)
1663                                         cleanup_one_si(e);
1664                         }
1665                         mutex_unlock(&smi_infos_lock);
1666                 }
1667         }
1668         rv = len;
1669  out:
1670         kfree(str);
1671         return rv;
1672 }
1673
1674 static __devinit void hardcode_find_bmc(void)
1675 {
1676         int             i;
1677         struct smi_info *info;
1678
1679         for (i = 0; i < SI_MAX_PARMS; i++) {
1680                 if (!ports[i] && !addrs[i])
1681                         continue;
1682
1683                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1684                 if (!info)
1685                         return;
1686
1687                 info->addr_source = "hardcoded";
1688
1689                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1690                         info->si_type = SI_KCS;
1691                 } else if (strcmp(si_type[i], "smic") == 0) {
1692                         info->si_type = SI_SMIC;
1693                 } else if (strcmp(si_type[i], "bt") == 0) {
1694                         info->si_type = SI_BT;
1695                 } else {
1696                         printk(KERN_WARNING
1697                                "ipmi_si: Interface type specified "
1698                                "for interface %d, was invalid: %s\n",
1699                                i, si_type[i]);
1700                         kfree(info);
1701                         continue;
1702                 }
1703
1704                 if (ports[i]) {
1705                         /* An I/O port */
1706                         info->io_setup = port_setup;
1707                         info->io.addr_data = ports[i];
1708                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1709                 } else if (addrs[i]) {
1710                         /* A memory port */
1711                         info->io_setup = mem_setup;
1712                         info->io.addr_data = addrs[i];
1713                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1714                 } else {
1715                         printk(KERN_WARNING
1716                                "ipmi_si: Interface type specified "
1717                                "for interface %d, "
1718                                "but port and address were not set or "
1719                                "set to zero.\n", i);
1720                         kfree(info);
1721                         continue;
1722                 }
1723
1724                 info->io.addr = NULL;
1725                 info->io.regspacing = regspacings[i];
1726                 if (!info->io.regspacing)
1727                         info->io.regspacing = DEFAULT_REGSPACING;
1728                 info->io.regsize = regsizes[i];
1729                 if (!info->io.regsize)
1730                         info->io.regsize = DEFAULT_REGSPACING;
1731                 info->io.regshift = regshifts[i];
1732                 info->irq = irqs[i];
1733                 if (info->irq)
1734                         info->irq_setup = std_irq_setup;
1735
1736                 try_smi_init(info);
1737         }
1738 }
1739
1740 #ifdef CONFIG_ACPI
1741
1742 #include <linux/acpi.h>
1743
1744 /* Once we get an ACPI failure, we don't try any more, because we go
1745    through the tables sequentially.  Once we don't find a table, there
1746    are no more. */
1747 static int acpi_failure;
1748
1749 /* For GPE-type interrupts. */
1750 static u32 ipmi_acpi_gpe(void *context)
1751 {
1752         struct smi_info *smi_info = context;
1753         unsigned long   flags;
1754 #ifdef DEBUG_TIMING
1755         struct timeval t;
1756 #endif
1757
1758         spin_lock_irqsave(&(smi_info->si_lock), flags);
1759
1760         spin_lock(&smi_info->count_lock);
1761         smi_info->interrupts++;
1762         spin_unlock(&smi_info->count_lock);
1763
1764 #ifdef DEBUG_TIMING
1765         do_gettimeofday(&t);
1766         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1767 #endif
1768         smi_event_handler(smi_info, 0);
1769         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1770
1771         return ACPI_INTERRUPT_HANDLED;
1772 }
1773
1774 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1775 {
1776         if (!info->irq)
1777                 return;
1778
1779         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1780 }
1781
1782 static int acpi_gpe_irq_setup(struct smi_info *info)
1783 {
1784         acpi_status status;
1785
1786         if (!info->irq)
1787                 return 0;
1788
1789         /* FIXME - is level triggered right? */
1790         status = acpi_install_gpe_handler(NULL,
1791                                           info->irq,
1792                                           ACPI_GPE_LEVEL_TRIGGERED,
1793                                           &ipmi_acpi_gpe,
1794                                           info);
1795         if (status != AE_OK) {
1796                 printk(KERN_WARNING
1797                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1798                        " running polled\n",
1799                        DEVICE_NAME, info->irq);
1800                 info->irq = 0;
1801                 return -EINVAL;
1802         } else {
1803                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1804                 printk("  Using ACPI GPE %d\n", info->irq);
1805                 return 0;
1806         }
1807 }
1808
1809 /*
1810  * Defined at
1811  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1812  */
1813 struct SPMITable {
1814         s8      Signature[4];
1815         u32     Length;
1816         u8      Revision;
1817         u8      Checksum;
1818         s8      OEMID[6];
1819         s8      OEMTableID[8];
1820         s8      OEMRevision[4];
1821         s8      CreatorID[4];
1822         s8      CreatorRevision[4];
1823         u8      InterfaceType;
1824         u8      IPMIlegacy;
1825         s16     SpecificationRevision;
1826
1827         /*
1828          * Bit 0 - SCI interrupt supported
1829          * Bit 1 - I/O APIC/SAPIC
1830          */
1831         u8      InterruptType;
1832
1833         /* If bit 0 of InterruptType is set, then this is the SCI
1834            interrupt in the GPEx_STS register. */
1835         u8      GPE;
1836
1837         s16     Reserved;
1838
1839         /* If bit 1 of InterruptType is set, then this is the I/O
1840            APIC/SAPIC interrupt. */
1841         u32     GlobalSystemInterrupt;
1842
1843         /* The actual register address. */
1844         struct acpi_generic_address addr;
1845
1846         u8      UID[4];
1847
1848         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1849 };
1850
1851 static __devinit int try_init_acpi(struct SPMITable *spmi)
1852 {
1853         struct smi_info  *info;
1854         u8               addr_space;
1855
1856         if (spmi->IPMIlegacy != 1) {
1857             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1858             return -ENODEV;
1859         }
1860
1861         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1862                 addr_space = IPMI_MEM_ADDR_SPACE;
1863         else
1864                 addr_space = IPMI_IO_ADDR_SPACE;
1865
1866         info = kzalloc(sizeof(*info), GFP_KERNEL);
1867         if (!info) {
1868                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1869                 return -ENOMEM;
1870         }
1871
1872         info->addr_source = "ACPI";
1873
1874         /* Figure out the interface type. */
1875         switch (spmi->InterfaceType)
1876         {
1877         case 1: /* KCS */
1878                 info->si_type = SI_KCS;
1879                 break;
1880         case 2: /* SMIC */
1881                 info->si_type = SI_SMIC;
1882                 break;
1883         case 3: /* BT */
1884                 info->si_type = SI_BT;
1885                 break;
1886         default:
1887                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1888                         spmi->InterfaceType);
1889                 kfree(info);
1890                 return -EIO;
1891         }
1892
1893         if (spmi->InterruptType & 1) {
1894                 /* We've got a GPE interrupt. */
1895                 info->irq = spmi->GPE;
1896                 info->irq_setup = acpi_gpe_irq_setup;
1897         } else if (spmi->InterruptType & 2) {
1898                 /* We've got an APIC/SAPIC interrupt. */
1899                 info->irq = spmi->GlobalSystemInterrupt;
1900                 info->irq_setup = std_irq_setup;
1901         } else {
1902                 /* Use the default interrupt setting. */
1903                 info->irq = 0;
1904                 info->irq_setup = NULL;
1905         }
1906
1907         if (spmi->addr.bit_width) {
1908                 /* A (hopefully) properly formed register bit width. */
1909                 info->io.regspacing = spmi->addr.bit_width / 8;
1910         } else {
1911                 info->io.regspacing = DEFAULT_REGSPACING;
1912         }
1913         info->io.regsize = info->io.regspacing;
1914         info->io.regshift = spmi->addr.bit_offset;
1915
1916         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1917                 info->io_setup = mem_setup;
1918                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1919         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1920                 info->io_setup = port_setup;
1921                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1922         } else {
1923                 kfree(info);
1924                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1925                 return -EIO;
1926         }
1927         info->io.addr_data = spmi->addr.address;
1928
1929         try_smi_init(info);
1930
1931         return 0;
1932 }
1933
1934 static __devinit void acpi_find_bmc(void)
1935 {
1936         acpi_status      status;
1937         struct SPMITable *spmi;
1938         int              i;
1939
1940         if (acpi_disabled)
1941                 return;
1942
1943         if (acpi_failure)
1944                 return;
1945
1946         for (i = 0; ; i++) {
1947                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1948                                         (struct acpi_table_header **)&spmi);
1949                 if (status != AE_OK)
1950                         return;
1951
1952                 try_init_acpi(spmi);
1953         }
1954 }
1955 #endif
1956
1957 #ifdef CONFIG_DMI
1958 struct dmi_ipmi_data
1959 {
1960         u8              type;
1961         u8              addr_space;
1962         unsigned long   base_addr;
1963         u8              irq;
1964         u8              offset;
1965         u8              slave_addr;
1966 };
1967
1968 static int __devinit decode_dmi(struct dmi_header *dm,
1969                                 struct dmi_ipmi_data *dmi)
1970 {
1971         u8              *data = (u8 *)dm;
1972         unsigned long   base_addr;
1973         u8              reg_spacing;
1974         u8              len = dm->length;
1975
1976         dmi->type = data[4];
1977
1978         memcpy(&base_addr, data+8, sizeof(unsigned long));
1979         if (len >= 0x11) {
1980                 if (base_addr & 1) {
1981                         /* I/O */
1982                         base_addr &= 0xFFFE;
1983                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1984                 }
1985                 else {
1986                         /* Memory */
1987                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1988                 }
1989                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1990                    is odd. */
1991                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1992
1993                 dmi->irq = data[0x11];
1994
1995                 /* The top two bits of byte 0x10 hold the register spacing. */
1996                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1997                 switch(reg_spacing){
1998                 case 0x00: /* Byte boundaries */
1999                     dmi->offset = 1;
2000                     break;
2001                 case 0x01: /* 32-bit boundaries */
2002                     dmi->offset = 4;
2003                     break;
2004                 case 0x02: /* 16-byte boundaries */
2005                     dmi->offset = 16;
2006                     break;
2007                 default:
2008                     /* Some other interface, just ignore it. */
2009                     return -EIO;
2010                 }
2011         } else {
2012                 /* Old DMI spec. */
2013                 /* Note that technically, the lower bit of the base
2014                  * address should be 1 if the address is I/O and 0 if
2015                  * the address is in memory.  So many systems get that
2016                  * wrong (and all that I have seen are I/O) so we just
2017                  * ignore that bit and assume I/O.  Systems that use
2018                  * memory should use the newer spec, anyway. */
2019                 dmi->base_addr = base_addr & 0xfffe;
2020                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2021                 dmi->offset = 1;
2022         }
2023
2024         dmi->slave_addr = data[6];
2025
2026         return 0;
2027 }
2028
2029 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2030 {
2031         struct smi_info *info;
2032
2033         info = kzalloc(sizeof(*info), GFP_KERNEL);
2034         if (!info) {
2035                 printk(KERN_ERR
2036                        "ipmi_si: Could not allocate SI data\n");
2037                 return;
2038         }
2039
2040         info->addr_source = "SMBIOS";
2041
2042         switch (ipmi_data->type) {
2043         case 0x01: /* KCS */
2044                 info->si_type = SI_KCS;
2045                 break;
2046         case 0x02: /* SMIC */
2047                 info->si_type = SI_SMIC;
2048                 break;
2049         case 0x03: /* BT */
2050                 info->si_type = SI_BT;
2051                 break;
2052         default:
2053                 return;
2054         }
2055
2056         switch (ipmi_data->addr_space) {
2057         case IPMI_MEM_ADDR_SPACE:
2058                 info->io_setup = mem_setup;
2059                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2060                 break;
2061
2062         case IPMI_IO_ADDR_SPACE:
2063                 info->io_setup = port_setup;
2064                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2065                 break;
2066
2067         default:
2068                 kfree(info);
2069                 printk(KERN_WARNING
2070                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2071                        ipmi_data->addr_space);
2072                 return;
2073         }
2074         info->io.addr_data = ipmi_data->base_addr;
2075
2076         info->io.regspacing = ipmi_data->offset;
2077         if (!info->io.regspacing)
2078                 info->io.regspacing = DEFAULT_REGSPACING;
2079         info->io.regsize = DEFAULT_REGSPACING;
2080         info->io.regshift = 0;
2081
2082         info->slave_addr = ipmi_data->slave_addr;
2083
2084         info->irq = ipmi_data->irq;
2085         if (info->irq)
2086                 info->irq_setup = std_irq_setup;
2087
2088         try_smi_init(info);
2089 }
2090
2091 static void __devinit dmi_find_bmc(void)
2092 {
2093         struct dmi_device    *dev = NULL;
2094         struct dmi_ipmi_data data;
2095         int                  rv;
2096
2097         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2098                 memset(&data, 0, sizeof(data));
2099                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2100                 if (!rv)
2101                         try_init_dmi(&data);
2102         }
2103 }
2104 #endif /* CONFIG_DMI */
2105
2106 #ifdef CONFIG_PCI
2107
2108 #define PCI_ERMC_CLASSCODE              0x0C0700
2109 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2110 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2111 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2112 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2113 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2114
2115 #define PCI_HP_VENDOR_ID    0x103C
2116 #define PCI_MMC_DEVICE_ID   0x121A
2117 #define PCI_MMC_ADDR_CW     0x10
2118
2119 static void ipmi_pci_cleanup(struct smi_info *info)
2120 {
2121         struct pci_dev *pdev = info->addr_source_data;
2122
2123         pci_disable_device(pdev);
2124 }
2125
2126 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2127                                     const struct pci_device_id *ent)
2128 {
2129         int rv;
2130         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2131         struct smi_info *info;
2132         int first_reg_offset = 0;
2133
2134         info = kzalloc(sizeof(*info), GFP_KERNEL);
2135         if (!info)
2136                 return -ENOMEM;
2137
2138         info->addr_source = "PCI";
2139
2140         switch (class_type) {
2141         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2142                 info->si_type = SI_SMIC;
2143                 break;
2144
2145         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2146                 info->si_type = SI_KCS;
2147                 break;
2148
2149         case PCI_ERMC_CLASSCODE_TYPE_BT:
2150                 info->si_type = SI_BT;
2151                 break;
2152
2153         default:
2154                 kfree(info);
2155                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2156                        pci_name(pdev), class_type);
2157                 return -ENOMEM;
2158         }
2159
2160         rv = pci_enable_device(pdev);
2161         if (rv) {
2162                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2163                        pci_name(pdev));
2164                 kfree(info);
2165                 return rv;
2166         }
2167
2168         info->addr_source_cleanup = ipmi_pci_cleanup;
2169         info->addr_source_data = pdev;
2170
2171         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2172                 first_reg_offset = 1;
2173
2174         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2175                 info->io_setup = port_setup;
2176                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2177         } else {
2178                 info->io_setup = mem_setup;
2179                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2180         }
2181         info->io.addr_data = pci_resource_start(pdev, 0);
2182
2183         info->io.regspacing = DEFAULT_REGSPACING;
2184         info->io.regsize = DEFAULT_REGSPACING;
2185         info->io.regshift = 0;
2186
2187         info->irq = pdev->irq;
2188         if (info->irq)
2189                 info->irq_setup = std_irq_setup;
2190
2191         info->dev = &pdev->dev;
2192         pci_set_drvdata(pdev, info);
2193
2194         return try_smi_init(info);
2195 }
2196
2197 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2198 {
2199         struct smi_info *info = pci_get_drvdata(pdev);
2200         cleanup_one_si(info);
2201 }
2202
2203 #ifdef CONFIG_PM
2204 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2205 {
2206         return 0;
2207 }
2208
2209 static int ipmi_pci_resume(struct pci_dev *pdev)
2210 {
2211         return 0;
2212 }
2213 #endif
2214
2215 static struct pci_device_id ipmi_pci_devices[] = {
2216         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2217         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2218 };
2219 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2220
2221 static struct pci_driver ipmi_pci_driver = {
2222         .name =         DEVICE_NAME,
2223         .id_table =     ipmi_pci_devices,
2224         .probe =        ipmi_pci_probe,
2225         .remove =       __devexit_p(ipmi_pci_remove),
2226 #ifdef CONFIG_PM
2227         .suspend =      ipmi_pci_suspend,
2228         .resume =       ipmi_pci_resume,
2229 #endif
2230 };
2231 #endif /* CONFIG_PCI */
2232
2233
2234 #ifdef CONFIG_PPC_OF
2235 static int __devinit ipmi_of_probe(struct of_device *dev,
2236                          const struct of_device_id *match)
2237 {
2238         struct smi_info *info;
2239         struct resource resource;
2240         const int *regsize, *regspacing, *regshift;
2241         struct device_node *np = dev->node;
2242         int ret;
2243         int proplen;
2244
2245         dev_info(&dev->dev, PFX "probing via device tree\n");
2246
2247         ret = of_address_to_resource(np, 0, &resource);
2248         if (ret) {
2249                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2250                 return ret;
2251         }
2252
2253         regsize = get_property(np, "reg-size", &proplen);
2254         if (regsize && proplen != 4) {
2255                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2256                 return -EINVAL;
2257         }
2258
2259         regspacing = get_property(np, "reg-spacing", &proplen);
2260         if (regspacing && proplen != 4) {
2261                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2262                 return -EINVAL;
2263         }
2264
2265         regshift = get_property(np, "reg-shift", &proplen);
2266         if (regshift && proplen != 4) {
2267                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2268                 return -EINVAL;
2269         }
2270
2271         info = kzalloc(sizeof(*info), GFP_KERNEL);
2272
2273         if (!info) {
2274                 dev_err(&dev->dev,
2275                         PFX "could not allocate memory for OF probe\n");
2276                 return -ENOMEM;
2277         }
2278
2279         info->si_type           = (enum si_type) match->data;
2280         info->addr_source       = "device-tree";
2281         info->io_setup          = mem_setup;
2282         info->irq_setup         = std_irq_setup;
2283
2284         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2285         info->io.addr_data      = resource.start;
2286
2287         info->io.regsize        = regsize ? *regsize : DEFAULT_REGSIZE;
2288         info->io.regspacing     = regspacing ? *regspacing : DEFAULT_REGSPACING;
2289         info->io.regshift       = regshift ? *regshift : 0;
2290
2291         info->irq               = irq_of_parse_and_map(dev->node, 0);
2292         info->dev               = &dev->dev;
2293
2294         dev_dbg(&dev->dev, "addr 0x%lx regsize %ld spacing %ld irq %x\n",
2295                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2296                 info->irq);
2297
2298         dev->dev.driver_data = (void*) info;
2299
2300         return try_smi_init(info);
2301 }
2302
2303 static int __devexit ipmi_of_remove(struct of_device *dev)
2304 {
2305         cleanup_one_si(dev->dev.driver_data);
2306         return 0;
2307 }
2308
2309 static struct of_device_id ipmi_match[] =
2310 {
2311         { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
2312         { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2313         { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
2314         {},
2315 };
2316
2317 static struct of_platform_driver ipmi_of_platform_driver =
2318 {
2319         .name           = "ipmi",
2320         .match_table    = ipmi_match,
2321         .probe          = ipmi_of_probe,
2322         .remove         = __devexit_p(ipmi_of_remove),
2323 };
2324 #endif /* CONFIG_PPC_OF */
2325
2326
2327 static int try_get_dev_id(struct smi_info *smi_info)
2328 {
2329         unsigned char         msg[2];
2330         unsigned char         *resp;
2331         unsigned long         resp_len;
2332         enum si_sm_result     smi_result;
2333         int                   rv = 0;
2334
2335         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2336         if (!resp)
2337                 return -ENOMEM;
2338
2339         /* Do a Get Device ID command, since it comes back with some
2340            useful info. */
2341         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2342         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2343         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2344
2345         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2346         for (;;)
2347         {
2348                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2349                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2350                         schedule_timeout_uninterruptible(1);
2351                         smi_result = smi_info->handlers->event(
2352                                 smi_info->si_sm, 100);
2353                 }
2354                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2355                 {
2356                         smi_result = smi_info->handlers->event(
2357                                 smi_info->si_sm, 0);
2358                 }
2359                 else
2360                         break;
2361         }
2362         if (smi_result == SI_SM_HOSED) {
2363                 /* We couldn't get the state machine to run, so whatever's at
2364                    the port is probably not an IPMI SMI interface. */
2365                 rv = -ENODEV;
2366                 goto out;
2367         }
2368
2369         /* Otherwise, we got some data. */
2370         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2371                                                   resp, IPMI_MAX_MSG_LENGTH);
2372         if (resp_len < 14) {
2373                 /* That's odd, it should be longer. */
2374                 rv = -EINVAL;
2375                 goto out;
2376         }
2377
2378         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2379                 /* That's odd, it shouldn't be able to fail. */
2380                 rv = -EINVAL;
2381                 goto out;
2382         }
2383
2384         /* Record info from the get device id, in case we need it. */
2385         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2386
2387  out:
2388         kfree(resp);
2389         return rv;
2390 }
2391
2392 static int type_file_read_proc(char *page, char **start, off_t off,
2393                                int count, int *eof, void *data)
2394 {
2395         struct smi_info *smi = data;
2396
2397         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2398 }
2399
2400 static int stat_file_read_proc(char *page, char **start, off_t off,
2401                                int count, int *eof, void *data)
2402 {
2403         char            *out = (char *) page;
2404         struct smi_info *smi = data;
2405
2406         out += sprintf(out, "interrupts_enabled:    %d\n",
2407                        smi->irq && !smi->interrupt_disabled);
2408         out += sprintf(out, "short_timeouts:        %ld\n",
2409                        smi->short_timeouts);
2410         out += sprintf(out, "long_timeouts:         %ld\n",
2411                        smi->long_timeouts);
2412         out += sprintf(out, "timeout_restarts:      %ld\n",
2413                        smi->timeout_restarts);
2414         out += sprintf(out, "idles:                 %ld\n",
2415                        smi->idles);
2416         out += sprintf(out, "interrupts:            %ld\n",
2417                        smi->interrupts);
2418         out += sprintf(out, "attentions:            %ld\n",
2419                        smi->attentions);
2420         out += sprintf(out, "flag_fetches:          %ld\n",
2421                        smi->flag_fetches);
2422         out += sprintf(out, "hosed_count:           %ld\n",
2423                        smi->hosed_count);
2424         out += sprintf(out, "complete_transactions: %ld\n",
2425                        smi->complete_transactions);
2426         out += sprintf(out, "events:                %ld\n",
2427                        smi->events);
2428         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2429                        smi->watchdog_pretimeouts);
2430         out += sprintf(out, "incoming_messages:     %ld\n",
2431                        smi->incoming_messages);
2432
2433         return out - page;
2434 }
2435
2436 static int param_read_proc(char *page, char **start, off_t off,
2437                            int count, int *eof, void *data)
2438 {
2439         struct smi_info *smi = data;
2440
2441         return sprintf(page,
2442                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2443                        si_to_str[smi->si_type],
2444                        addr_space_to_str[smi->io.addr_type],
2445                        smi->io.addr_data,
2446                        smi->io.regspacing,
2447                        smi->io.regsize,
2448                        smi->io.regshift,
2449                        smi->irq,
2450                        smi->slave_addr);
2451 }
2452
2453 /*
2454  * oem_data_avail_to_receive_msg_avail
2455  * @info - smi_info structure with msg_flags set
2456  *
2457  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2458  * Returns 1 indicating need to re-run handle_flags().
2459  */
2460 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2461 {
2462         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2463                                 RECEIVE_MSG_AVAIL);
2464         return 1;
2465 }
2466
2467 /*
2468  * setup_dell_poweredge_oem_data_handler
2469  * @info - smi_info.device_id must be populated
2470  *
2471  * Systems that match, but have firmware version < 1.40 may assert
2472  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2473  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2474  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2475  * as RECEIVE_MSG_AVAIL instead.
2476  *
2477  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2478  * assert the OEM[012] bits, and if it did, the driver would have to
2479  * change to handle that properly, we don't actually check for the
2480  * firmware version.
2481  * Device ID = 0x20                BMC on PowerEdge 8G servers
2482  * Device Revision = 0x80
2483  * Firmware Revision1 = 0x01       BMC version 1.40
2484  * Firmware Revision2 = 0x40       BCD encoded
2485  * IPMI Version = 0x51             IPMI 1.5
2486  * Manufacturer ID = A2 02 00      Dell IANA
2487  *
2488  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2489  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2490  *
2491  */
2492 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2493 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2494 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2495 #define DELL_IANA_MFR_ID 0x0002a2
2496 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2497 {
2498         struct ipmi_device_id *id = &smi_info->device_id;
2499         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2500                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2501                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2502                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2503                         smi_info->oem_data_avail_handler =
2504                                 oem_data_avail_to_receive_msg_avail;
2505                 }
2506                 else if (ipmi_version_major(id) < 1 ||
2507                          (ipmi_version_major(id) == 1 &&
2508                           ipmi_version_minor(id) < 5)) {
2509                         smi_info->oem_data_avail_handler =
2510                                 oem_data_avail_to_receive_msg_avail;
2511                 }
2512         }
2513 }
2514
2515 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2516 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2517 {
2518         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2519
2520         /* Make it a reponse */
2521         msg->rsp[0] = msg->data[0] | 4;
2522         msg->rsp[1] = msg->data[1];
2523         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2524         msg->rsp_size = 3;
2525         smi_info->curr_msg = NULL;
2526         deliver_recv_msg(smi_info, msg);
2527 }
2528
2529 /*
2530  * dell_poweredge_bt_xaction_handler
2531  * @info - smi_info.device_id must be populated
2532  *
2533  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2534  * not respond to a Get SDR command if the length of the data
2535  * requested is exactly 0x3A, which leads to command timeouts and no
2536  * data returned.  This intercepts such commands, and causes userspace
2537  * callers to try again with a different-sized buffer, which succeeds.
2538  */
2539
2540 #define STORAGE_NETFN 0x0A
2541 #define STORAGE_CMD_GET_SDR 0x23
2542 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2543                                              unsigned long unused,
2544                                              void *in)
2545 {
2546         struct smi_info *smi_info = in;
2547         unsigned char *data = smi_info->curr_msg->data;
2548         unsigned int size   = smi_info->curr_msg->data_size;
2549         if (size >= 8 &&
2550             (data[0]>>2) == STORAGE_NETFN &&
2551             data[1] == STORAGE_CMD_GET_SDR &&
2552             data[7] == 0x3A) {
2553                 return_hosed_msg_badsize(smi_info);
2554                 return NOTIFY_STOP;
2555         }
2556         return NOTIFY_DONE;
2557 }
2558
2559 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2560         .notifier_call  = dell_poweredge_bt_xaction_handler,
2561 };
2562
2563 /*
2564  * setup_dell_poweredge_bt_xaction_handler
2565  * @info - smi_info.device_id must be filled in already
2566  *
2567  * Fills in smi_info.device_id.start_transaction_pre_hook
2568  * when we know what function to use there.
2569  */
2570 static void
2571 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2572 {
2573         struct ipmi_device_id *id = &smi_info->device_id;
2574         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2575             smi_info->si_type == SI_BT)
2576                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2577 }
2578
2579 /*
2580  * setup_oem_data_handler
2581  * @info - smi_info.device_id must be filled in already
2582  *
2583  * Fills in smi_info.device_id.oem_data_available_handler
2584  * when we know what function to use there.
2585  */
2586
2587 static void setup_oem_data_handler(struct smi_info *smi_info)
2588 {
2589         setup_dell_poweredge_oem_data_handler(smi_info);
2590 }
2591
2592 static void setup_xaction_handlers(struct smi_info *smi_info)
2593 {
2594         setup_dell_poweredge_bt_xaction_handler(smi_info);
2595 }
2596
2597 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2598 {
2599         if (smi_info->intf) {
2600                 /* The timer and thread are only running if the
2601                    interface has been started up and registered. */
2602                 if (smi_info->thread != NULL)
2603                         kthread_stop(smi_info->thread);
2604                 del_timer_sync(&smi_info->si_timer);
2605         }
2606 }
2607
2608 static __devinitdata struct ipmi_default_vals
2609 {
2610         int type;
2611         int port;
2612 } ipmi_defaults[] =
2613 {
2614         { .type = SI_KCS, .port = 0xca2 },
2615         { .type = SI_SMIC, .port = 0xca9 },
2616         { .type = SI_BT, .port = 0xe4 },
2617         { .port = 0 }
2618 };
2619
2620 static __devinit void default_find_bmc(void)
2621 {
2622         struct smi_info *info;
2623         int             i;
2624
2625         for (i = 0; ; i++) {
2626                 if (!ipmi_defaults[i].port)
2627                         break;
2628
2629                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2630                 if (!info)
2631                         return;
2632
2633 #ifdef CONFIG_PPC_MERGE
2634                 if (check_legacy_ioport(ipmi_defaults[i].port))
2635                         continue;
2636 #endif
2637
2638                 info->addr_source = NULL;
2639
2640                 info->si_type = ipmi_defaults[i].type;
2641                 info->io_setup = port_setup;
2642                 info->io.addr_data = ipmi_defaults[i].port;
2643                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2644
2645                 info->io.addr = NULL;
2646                 info->io.regspacing = DEFAULT_REGSPACING;
2647                 info->io.regsize = DEFAULT_REGSPACING;
2648                 info->io.regshift = 0;
2649
2650                 if (try_smi_init(info) == 0) {
2651                         /* Found one... */
2652                         printk(KERN_INFO "ipmi_si: Found default %s state"
2653                                " machine at %s address 0x%lx\n",
2654                                si_to_str[info->si_type],
2655                                addr_space_to_str[info->io.addr_type],
2656                                info->io.addr_data);
2657                         return;
2658                 }
2659         }
2660 }
2661
2662 static int is_new_interface(struct smi_info *info)
2663 {
2664         struct smi_info *e;
2665
2666         list_for_each_entry(e, &smi_infos, link) {
2667                 if (e->io.addr_type != info->io.addr_type)
2668                         continue;
2669                 if (e->io.addr_data == info->io.addr_data)
2670                         return 0;
2671         }
2672
2673         return 1;
2674 }
2675
2676 static int try_smi_init(struct smi_info *new_smi)
2677 {
2678         int rv;
2679
2680         if (new_smi->addr_source) {
2681                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2682                        " machine at %s address 0x%lx, slave address 0x%x,"
2683                        " irq %d\n",
2684                        new_smi->addr_source,
2685                        si_to_str[new_smi->si_type],
2686                        addr_space_to_str[new_smi->io.addr_type],
2687                        new_smi->io.addr_data,
2688                        new_smi->slave_addr, new_smi->irq);
2689         }
2690
2691         mutex_lock(&smi_infos_lock);
2692         if (!is_new_interface(new_smi)) {
2693                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2694                 rv = -EBUSY;
2695                 goto out_err;
2696         }
2697
2698         /* So we know not to free it unless we have allocated one. */
2699         new_smi->intf = NULL;
2700         new_smi->si_sm = NULL;
2701         new_smi->handlers = NULL;
2702
2703         switch (new_smi->si_type) {
2704         case SI_KCS:
2705                 new_smi->handlers = &kcs_smi_handlers;
2706                 break;
2707
2708         case SI_SMIC:
2709                 new_smi->handlers = &smic_smi_handlers;
2710                 break;
2711
2712         case SI_BT:
2713                 new_smi->handlers = &bt_smi_handlers;
2714                 break;
2715
2716         default:
2717                 /* No support for anything else yet. */
2718                 rv = -EIO;
2719                 goto out_err;
2720         }
2721
2722         /* Allocate the state machine's data and initialize it. */
2723         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2724         if (!new_smi->si_sm) {
2725                 printk(" Could not allocate state machine memory\n");
2726                 rv = -ENOMEM;
2727                 goto out_err;
2728         }
2729         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2730                                                         &new_smi->io);
2731
2732         /* Now that we know the I/O size, we can set up the I/O. */
2733         rv = new_smi->io_setup(new_smi);
2734         if (rv) {
2735                 printk(" Could not set up I/O space\n");
2736                 goto out_err;
2737         }
2738
2739         spin_lock_init(&(new_smi->si_lock));
2740         spin_lock_init(&(new_smi->msg_lock));
2741         spin_lock_init(&(new_smi->count_lock));
2742
2743         /* Do low-level detection first. */
2744         if (new_smi->handlers->detect(new_smi->si_sm)) {
2745                 if (new_smi->addr_source)
2746                         printk(KERN_INFO "ipmi_si: Interface detection"
2747                                " failed\n");
2748                 rv = -ENODEV;
2749                 goto out_err;
2750         }
2751
2752         /* Attempt a get device id command.  If it fails, we probably
2753            don't have a BMC here. */
2754         rv = try_get_dev_id(new_smi);
2755         if (rv) {
2756                 if (new_smi->addr_source)
2757                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2758                                " at this location\n");
2759                 goto out_err;
2760         }
2761
2762         setup_oem_data_handler(new_smi);
2763         setup_xaction_handlers(new_smi);
2764
2765         /* Try to claim any interrupts. */
2766         if (new_smi->irq_setup)
2767                 new_smi->irq_setup(new_smi);
2768
2769         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2770         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2771         new_smi->curr_msg = NULL;
2772         atomic_set(&new_smi->req_events, 0);
2773         new_smi->run_to_completion = 0;
2774
2775         new_smi->interrupt_disabled = 0;
2776         atomic_set(&new_smi->stop_operation, 0);
2777         new_smi->intf_num = smi_num;
2778         smi_num++;
2779
2780         /* Start clearing the flags before we enable interrupts or the
2781            timer to avoid racing with the timer. */
2782         start_clear_flags(new_smi);
2783         /* IRQ is defined to be set when non-zero. */
2784         if (new_smi->irq)
2785                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2786
2787         if (!new_smi->dev) {
2788                 /* If we don't already have a device from something
2789                  * else (like PCI), then register a new one. */
2790                 new_smi->pdev = platform_device_alloc("ipmi_si",
2791                                                       new_smi->intf_num);
2792                 if (rv) {
2793                         printk(KERN_ERR
2794                                "ipmi_si_intf:"
2795                                " Unable to allocate platform device\n");
2796                         goto out_err;
2797                 }
2798                 new_smi->dev = &new_smi->pdev->dev;
2799                 new_smi->dev->driver = &ipmi_driver;
2800
2801                 rv = platform_device_add(new_smi->pdev);
2802                 if (rv) {
2803                         printk(KERN_ERR
2804                                "ipmi_si_intf:"
2805                                " Unable to register system interface device:"
2806                                " %d\n",
2807                                rv);
2808                         goto out_err;
2809                 }
2810                 new_smi->dev_registered = 1;
2811         }
2812
2813         rv = ipmi_register_smi(&handlers,
2814                                new_smi,
2815                                &new_smi->device_id,
2816                                new_smi->dev,
2817                                "bmc",
2818                                new_smi->slave_addr);
2819         if (rv) {
2820                 printk(KERN_ERR
2821                        "ipmi_si: Unable to register device: error %d\n",
2822                        rv);
2823                 goto out_err_stop_timer;
2824         }
2825
2826         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2827                                      type_file_read_proc, NULL,
2828                                      new_smi, THIS_MODULE);
2829         if (rv) {
2830                 printk(KERN_ERR
2831                        "ipmi_si: Unable to create proc entry: %d\n",
2832                        rv);
2833                 goto out_err_stop_timer;
2834         }
2835
2836         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2837                                      stat_file_read_proc, NULL,
2838                                      new_smi, THIS_MODULE);
2839         if (rv) {
2840                 printk(KERN_ERR
2841                        "ipmi_si: Unable to create proc entry: %d\n",
2842                        rv);
2843                 goto out_err_stop_timer;
2844         }
2845
2846         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2847                                      param_read_proc, NULL,
2848                                      new_smi, THIS_MODULE);
2849         if (rv) {
2850                 printk(KERN_ERR
2851                        "ipmi_si: Unable to create proc entry: %d\n",
2852                        rv);
2853                 goto out_err_stop_timer;
2854         }
2855
2856         list_add_tail(&new_smi->link, &smi_infos);
2857
2858         mutex_unlock(&smi_infos_lock);
2859
2860         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2861
2862         return 0;
2863
2864  out_err_stop_timer:
2865         atomic_inc(&new_smi->stop_operation);
2866         wait_for_timer_and_thread(new_smi);
2867
2868  out_err:
2869         if (new_smi->intf)
2870                 ipmi_unregister_smi(new_smi->intf);
2871
2872         if (new_smi->irq_cleanup)
2873                 new_smi->irq_cleanup(new_smi);
2874
2875         /* Wait until we know that we are out of any interrupt
2876            handlers might have been running before we freed the
2877            interrupt. */
2878         synchronize_sched();
2879
2880         if (new_smi->si_sm) {
2881                 if (new_smi->handlers)
2882                         new_smi->handlers->cleanup(new_smi->si_sm);
2883                 kfree(new_smi->si_sm);
2884         }
2885         if (new_smi->addr_source_cleanup)
2886                 new_smi->addr_source_cleanup(new_smi);
2887         if (new_smi->io_cleanup)
2888                 new_smi->io_cleanup(new_smi);
2889
2890         if (new_smi->dev_registered)
2891                 platform_device_unregister(new_smi->pdev);
2892
2893         kfree(new_smi);
2894
2895         mutex_unlock(&smi_infos_lock);
2896
2897         return rv;
2898 }
2899
2900 static __devinit int init_ipmi_si(void)
2901 {
2902         int  i;
2903         char *str;
2904         int  rv;
2905
2906         if (initialized)
2907                 return 0;
2908         initialized = 1;
2909
2910         /* Register the device drivers. */
2911         rv = driver_register(&ipmi_driver);
2912         if (rv) {
2913                 printk(KERN_ERR
2914                        "init_ipmi_si: Unable to register driver: %d\n",
2915                        rv);
2916                 return rv;
2917         }
2918
2919
2920         /* Parse out the si_type string into its components. */
2921         str = si_type_str;
2922         if (*str != '\0') {
2923                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2924                         si_type[i] = str;
2925                         str = strchr(str, ',');
2926                         if (str) {
2927                                 *str = '\0';
2928                                 str++;
2929                         } else {
2930                                 break;
2931                         }
2932                 }
2933         }
2934
2935         printk(KERN_INFO "IPMI System Interface driver.\n");
2936
2937         hardcode_find_bmc();
2938
2939 #ifdef CONFIG_DMI
2940         dmi_find_bmc();
2941 #endif
2942
2943 #ifdef CONFIG_ACPI
2944         acpi_find_bmc();
2945 #endif
2946
2947 #ifdef CONFIG_PCI
2948         rv = pci_register_driver(&ipmi_pci_driver);
2949         if (rv){
2950                 printk(KERN_ERR
2951                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2952                        rv);
2953         }
2954 #endif
2955
2956 #ifdef CONFIG_PPC_OF
2957         of_register_platform_driver(&ipmi_of_platform_driver);
2958 #endif
2959
2960         if (si_trydefaults) {
2961                 mutex_lock(&smi_infos_lock);
2962                 if (list_empty(&smi_infos)) {
2963                         /* No BMC was found, try defaults. */
2964                         mutex_unlock(&smi_infos_lock);
2965                         default_find_bmc();
2966                 } else {
2967                         mutex_unlock(&smi_infos_lock);
2968                 }
2969         }
2970
2971         mutex_lock(&smi_infos_lock);
2972         if (unload_when_empty && list_empty(&smi_infos)) {
2973                 mutex_unlock(&smi_infos_lock);
2974 #ifdef CONFIG_PCI
2975                 pci_unregister_driver(&ipmi_pci_driver);
2976 #endif
2977                 driver_unregister(&ipmi_driver);
2978                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2979                 return -ENODEV;
2980         } else {
2981                 mutex_unlock(&smi_infos_lock);
2982                 return 0;
2983         }
2984 }
2985 module_init(init_ipmi_si);
2986
2987 static void cleanup_one_si(struct smi_info *to_clean)
2988 {
2989         int           rv;
2990         unsigned long flags;
2991
2992         if (!to_clean)
2993                 return;
2994
2995         list_del(&to_clean->link);
2996
2997         /* Tell the driver that we are shutting down. */
2998         atomic_inc(&to_clean->stop_operation);
2999
3000         /* Make sure the timer and thread are stopped and will not run
3001            again. */
3002         wait_for_timer_and_thread(to_clean);
3003
3004         /* Timeouts are stopped, now make sure the interrupts are off
3005            for the device.  A little tricky with locks to make sure
3006            there are no races. */
3007         spin_lock_irqsave(&to_clean->si_lock, flags);
3008         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3009                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3010                 poll(to_clean);
3011                 schedule_timeout_uninterruptible(1);
3012                 spin_lock_irqsave(&to_clean->si_lock, flags);
3013         }
3014         disable_si_irq(to_clean);
3015         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3016         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3017                 poll(to_clean);
3018                 schedule_timeout_uninterruptible(1);
3019         }
3020
3021         /* Clean up interrupts and make sure that everything is done. */
3022         if (to_clean->irq_cleanup)
3023                 to_clean->irq_cleanup(to_clean);
3024         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3025                 poll(to_clean);
3026                 schedule_timeout_uninterruptible(1);
3027         }
3028
3029         rv = ipmi_unregister_smi(to_clean->intf);
3030         if (rv) {
3031                 printk(KERN_ERR
3032                        "ipmi_si: Unable to unregister device: errno=%d\n",
3033                        rv);
3034         }
3035
3036         to_clean->handlers->cleanup(to_clean->si_sm);
3037
3038         kfree(to_clean->si_sm);
3039
3040         if (to_clean->addr_source_cleanup)
3041                 to_clean->addr_source_cleanup(to_clean);
3042         if (to_clean->io_cleanup)
3043                 to_clean->io_cleanup(to_clean);
3044
3045         if (to_clean->dev_registered)
3046                 platform_device_unregister(to_clean->pdev);
3047
3048         kfree(to_clean);
3049 }
3050
3051 static __exit void cleanup_ipmi_si(void)
3052 {
3053         struct smi_info *e, *tmp_e;
3054
3055         if (!initialized)
3056                 return;
3057
3058 #ifdef CONFIG_PCI
3059         pci_unregister_driver(&ipmi_pci_driver);
3060 #endif
3061
3062 #ifdef CONFIG_PPC_OF
3063         of_unregister_platform_driver(&ipmi_of_platform_driver);
3064 #endif
3065
3066         mutex_lock(&smi_infos_lock);
3067         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3068                 cleanup_one_si(e);
3069         mutex_unlock(&smi_infos_lock);
3070
3071         driver_unregister(&ipmi_driver);
3072 }
3073 module_exit(cleanup_ipmi_si);
3074
3075 MODULE_LICENSE("GPL");
3076 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3077 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");