Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfashe...
[linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <asm/system.h>
44 #include <linux/sched.h>
45 #include <linux/timer.h>
46 #include <linux/errno.h>
47 #include <linux/spinlock.h>
48 #include <linux/slab.h>
49 #include <linux/delay.h>
50 #include <linux/list.h>
51 #include <linux/pci.h>
52 #include <linux/ioport.h>
53 #include <linux/notifier.h>
54 #include <linux/mutex.h>
55 #include <linux/kthread.h>
56 #include <asm/irq.h>
57 #include <linux/interrupt.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ipmi_smi.h>
60 #include <asm/io.h>
61 #include "ipmi_si_sm.h"
62 #include <linux/init.h>
63 #include <linux/dmi.h>
64 #include <linux/string.h>
65 #include <linux/ctype.h>
66
67 #define PFX "ipmi_si: "
68
69 /* Measure times between events in the driver. */
70 #undef DEBUG_TIMING
71
72 /* Call every 10 ms. */
73 #define SI_TIMEOUT_TIME_USEC    10000
74 #define SI_USEC_PER_JIFFY       (1000000/HZ)
75 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
76 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
77                                        short timeout */
78
79 enum si_intf_state {
80         SI_NORMAL,
81         SI_GETTING_FLAGS,
82         SI_GETTING_EVENTS,
83         SI_CLEARING_FLAGS,
84         SI_CLEARING_FLAGS_THEN_SET_IRQ,
85         SI_GETTING_MESSAGES,
86         SI_ENABLE_INTERRUPTS1,
87         SI_ENABLE_INTERRUPTS2
88         /* FIXME - add watchdog stuff. */
89 };
90
91 /* Some BT-specific defines we need here. */
92 #define IPMI_BT_INTMASK_REG             2
93 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
94 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
95
96 enum si_type {
97     SI_KCS, SI_SMIC, SI_BT
98 };
99 static char *si_to_str[] = { "kcs", "smic", "bt" };
100
101 #define DEVICE_NAME "ipmi_si"
102
103 static struct device_driver ipmi_driver =
104 {
105         .name = DEVICE_NAME,
106         .bus = &platform_bus_type
107 };
108
109 struct smi_info
110 {
111         int                    intf_num;
112         ipmi_smi_t             intf;
113         struct si_sm_data      *si_sm;
114         struct si_sm_handlers  *handlers;
115         enum si_type           si_type;
116         spinlock_t             si_lock;
117         spinlock_t             msg_lock;
118         struct list_head       xmit_msgs;
119         struct list_head       hp_xmit_msgs;
120         struct ipmi_smi_msg    *curr_msg;
121         enum si_intf_state     si_state;
122
123         /* Used to handle the various types of I/O that can occur with
124            IPMI */
125         struct si_sm_io io;
126         int (*io_setup)(struct smi_info *info);
127         void (*io_cleanup)(struct smi_info *info);
128         int (*irq_setup)(struct smi_info *info);
129         void (*irq_cleanup)(struct smi_info *info);
130         unsigned int io_size;
131         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
132         void (*addr_source_cleanup)(struct smi_info *info);
133         void *addr_source_data;
134
135         /* Per-OEM handler, called from handle_flags().
136            Returns 1 when handle_flags() needs to be re-run
137            or 0 indicating it set si_state itself.
138         */
139         int (*oem_data_avail_handler)(struct smi_info *smi_info);
140
141         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
142            is set to hold the flags until we are done handling everything
143            from the flags. */
144 #define RECEIVE_MSG_AVAIL       0x01
145 #define EVENT_MSG_BUFFER_FULL   0x02
146 #define WDT_PRE_TIMEOUT_INT     0x08
147 #define OEM0_DATA_AVAIL     0x20
148 #define OEM1_DATA_AVAIL     0x40
149 #define OEM2_DATA_AVAIL     0x80
150 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
151                              OEM1_DATA_AVAIL | \
152                              OEM2_DATA_AVAIL)
153         unsigned char       msg_flags;
154
155         /* If set to true, this will request events the next time the
156            state machine is idle. */
157         atomic_t            req_events;
158
159         /* If true, run the state machine to completion on every send
160            call.  Generally used after a panic to make sure stuff goes
161            out. */
162         int                 run_to_completion;
163
164         /* The I/O port of an SI interface. */
165         int                 port;
166
167         /* The space between start addresses of the two ports.  For
168            instance, if the first port is 0xca2 and the spacing is 4, then
169            the second port is 0xca6. */
170         unsigned int        spacing;
171
172         /* zero if no irq; */
173         int                 irq;
174
175         /* The timer for this si. */
176         struct timer_list   si_timer;
177
178         /* The time (in jiffies) the last timeout occurred at. */
179         unsigned long       last_timeout_jiffies;
180
181         /* Used to gracefully stop the timer without race conditions. */
182         atomic_t            stop_operation;
183
184         /* The driver will disable interrupts when it gets into a
185            situation where it cannot handle messages due to lack of
186            memory.  Once that situation clears up, it will re-enable
187            interrupts. */
188         int interrupt_disabled;
189
190         /* From the get device id response... */
191         struct ipmi_device_id device_id;
192
193         /* Driver model stuff. */
194         struct device *dev;
195         struct platform_device *pdev;
196
197          /* True if we allocated the device, false if it came from
198           * someplace else (like PCI). */
199         int dev_registered;
200
201         /* Slave address, could be reported from DMI. */
202         unsigned char slave_addr;
203
204         /* Counters and things for the proc filesystem. */
205         spinlock_t count_lock;
206         unsigned long short_timeouts;
207         unsigned long long_timeouts;
208         unsigned long timeout_restarts;
209         unsigned long idles;
210         unsigned long interrupts;
211         unsigned long attentions;
212         unsigned long flag_fetches;
213         unsigned long hosed_count;
214         unsigned long complete_transactions;
215         unsigned long events;
216         unsigned long watchdog_pretimeouts;
217         unsigned long incoming_messages;
218
219         struct task_struct *thread;
220
221         struct list_head link;
222 };
223
224 #define SI_MAX_PARMS 4
225
226 static int force_kipmid[SI_MAX_PARMS];
227 static int num_force_kipmid;
228
229 static int unload_when_empty = 1;
230
231 static int try_smi_init(struct smi_info *smi);
232 static void cleanup_one_si(struct smi_info *to_clean);
233
234 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
235 static int register_xaction_notifier(struct notifier_block * nb)
236 {
237         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
238 }
239
240 static void deliver_recv_msg(struct smi_info *smi_info,
241                              struct ipmi_smi_msg *msg)
242 {
243         /* Deliver the message to the upper layer with the lock
244            released. */
245         spin_unlock(&(smi_info->si_lock));
246         ipmi_smi_msg_received(smi_info->intf, msg);
247         spin_lock(&(smi_info->si_lock));
248 }
249
250 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
251 {
252         struct ipmi_smi_msg *msg = smi_info->curr_msg;
253
254         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
255                 cCode = IPMI_ERR_UNSPECIFIED;
256         /* else use it as is */
257
258         /* Make it a reponse */
259         msg->rsp[0] = msg->data[0] | 4;
260         msg->rsp[1] = msg->data[1];
261         msg->rsp[2] = cCode;
262         msg->rsp_size = 3;
263
264         smi_info->curr_msg = NULL;
265         deliver_recv_msg(smi_info, msg);
266 }
267
268 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
269 {
270         int              rv;
271         struct list_head *entry = NULL;
272 #ifdef DEBUG_TIMING
273         struct timeval t;
274 #endif
275
276         /* No need to save flags, we aleady have interrupts off and we
277            already hold the SMI lock. */
278         spin_lock(&(smi_info->msg_lock));
279
280         /* Pick the high priority queue first. */
281         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
282                 entry = smi_info->hp_xmit_msgs.next;
283         } else if (!list_empty(&(smi_info->xmit_msgs))) {
284                 entry = smi_info->xmit_msgs.next;
285         }
286
287         if (!entry) {
288                 smi_info->curr_msg = NULL;
289                 rv = SI_SM_IDLE;
290         } else {
291                 int err;
292
293                 list_del(entry);
294                 smi_info->curr_msg = list_entry(entry,
295                                                 struct ipmi_smi_msg,
296                                                 link);
297 #ifdef DEBUG_TIMING
298                 do_gettimeofday(&t);
299                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
300 #endif
301                 err = atomic_notifier_call_chain(&xaction_notifier_list,
302                                 0, smi_info);
303                 if (err & NOTIFY_STOP_MASK) {
304                         rv = SI_SM_CALL_WITHOUT_DELAY;
305                         goto out;
306                 }
307                 err = smi_info->handlers->start_transaction(
308                         smi_info->si_sm,
309                         smi_info->curr_msg->data,
310                         smi_info->curr_msg->data_size);
311                 if (err) {
312                         return_hosed_msg(smi_info, err);
313                 }
314
315                 rv = SI_SM_CALL_WITHOUT_DELAY;
316         }
317         out:
318         spin_unlock(&(smi_info->msg_lock));
319
320         return rv;
321 }
322
323 static void start_enable_irq(struct smi_info *smi_info)
324 {
325         unsigned char msg[2];
326
327         /* If we are enabling interrupts, we have to tell the
328            BMC to use them. */
329         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
330         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
331
332         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
333         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
334 }
335
336 static void start_clear_flags(struct smi_info *smi_info)
337 {
338         unsigned char msg[3];
339
340         /* Make sure the watchdog pre-timeout flag is not set at startup. */
341         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
342         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
343         msg[2] = WDT_PRE_TIMEOUT_INT;
344
345         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
346         smi_info->si_state = SI_CLEARING_FLAGS;
347 }
348
349 /* When we have a situtaion where we run out of memory and cannot
350    allocate messages, we just leave them in the BMC and run the system
351    polled until we can allocate some memory.  Once we have some
352    memory, we will re-enable the interrupt. */
353 static inline void disable_si_irq(struct smi_info *smi_info)
354 {
355         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
356                 disable_irq_nosync(smi_info->irq);
357                 smi_info->interrupt_disabled = 1;
358         }
359 }
360
361 static inline void enable_si_irq(struct smi_info *smi_info)
362 {
363         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
364                 enable_irq(smi_info->irq);
365                 smi_info->interrupt_disabled = 0;
366         }
367 }
368
369 static void handle_flags(struct smi_info *smi_info)
370 {
371  retry:
372         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
373                 /* Watchdog pre-timeout */
374                 spin_lock(&smi_info->count_lock);
375                 smi_info->watchdog_pretimeouts++;
376                 spin_unlock(&smi_info->count_lock);
377
378                 start_clear_flags(smi_info);
379                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
380                 spin_unlock(&(smi_info->si_lock));
381                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
382                 spin_lock(&(smi_info->si_lock));
383         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
384                 /* Messages available. */
385                 smi_info->curr_msg = ipmi_alloc_smi_msg();
386                 if (!smi_info->curr_msg) {
387                         disable_si_irq(smi_info);
388                         smi_info->si_state = SI_NORMAL;
389                         return;
390                 }
391                 enable_si_irq(smi_info);
392
393                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
394                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
395                 smi_info->curr_msg->data_size = 2;
396
397                 smi_info->handlers->start_transaction(
398                         smi_info->si_sm,
399                         smi_info->curr_msg->data,
400                         smi_info->curr_msg->data_size);
401                 smi_info->si_state = SI_GETTING_MESSAGES;
402         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
403                 /* Events available. */
404                 smi_info->curr_msg = ipmi_alloc_smi_msg();
405                 if (!smi_info->curr_msg) {
406                         disable_si_irq(smi_info);
407                         smi_info->si_state = SI_NORMAL;
408                         return;
409                 }
410                 enable_si_irq(smi_info);
411
412                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
413                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
414                 smi_info->curr_msg->data_size = 2;
415
416                 smi_info->handlers->start_transaction(
417                         smi_info->si_sm,
418                         smi_info->curr_msg->data,
419                         smi_info->curr_msg->data_size);
420                 smi_info->si_state = SI_GETTING_EVENTS;
421         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
422                    smi_info->oem_data_avail_handler) {
423                 if (smi_info->oem_data_avail_handler(smi_info))
424                         goto retry;
425         } else {
426                 smi_info->si_state = SI_NORMAL;
427         }
428 }
429
430 static void handle_transaction_done(struct smi_info *smi_info)
431 {
432         struct ipmi_smi_msg *msg;
433 #ifdef DEBUG_TIMING
434         struct timeval t;
435
436         do_gettimeofday(&t);
437         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
438 #endif
439         switch (smi_info->si_state) {
440         case SI_NORMAL:
441                 if (!smi_info->curr_msg)
442                         break;
443
444                 smi_info->curr_msg->rsp_size
445                         = smi_info->handlers->get_result(
446                                 smi_info->si_sm,
447                                 smi_info->curr_msg->rsp,
448                                 IPMI_MAX_MSG_LENGTH);
449
450                 /* Do this here becase deliver_recv_msg() releases the
451                    lock, and a new message can be put in during the
452                    time the lock is released. */
453                 msg = smi_info->curr_msg;
454                 smi_info->curr_msg = NULL;
455                 deliver_recv_msg(smi_info, msg);
456                 break;
457
458         case SI_GETTING_FLAGS:
459         {
460                 unsigned char msg[4];
461                 unsigned int  len;
462
463                 /* We got the flags from the SMI, now handle them. */
464                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
465                 if (msg[2] != 0) {
466                         /* Error fetching flags, just give up for
467                            now. */
468                         smi_info->si_state = SI_NORMAL;
469                 } else if (len < 4) {
470                         /* Hmm, no flags.  That's technically illegal, but
471                            don't use uninitialized data. */
472                         smi_info->si_state = SI_NORMAL;
473                 } else {
474                         smi_info->msg_flags = msg[3];
475                         handle_flags(smi_info);
476                 }
477                 break;
478         }
479
480         case SI_CLEARING_FLAGS:
481         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
482         {
483                 unsigned char msg[3];
484
485                 /* We cleared the flags. */
486                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
487                 if (msg[2] != 0) {
488                         /* Error clearing flags */
489                         printk(KERN_WARNING
490                                "ipmi_si: Error clearing flags: %2.2x\n",
491                                msg[2]);
492                 }
493                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
494                         start_enable_irq(smi_info);
495                 else
496                         smi_info->si_state = SI_NORMAL;
497                 break;
498         }
499
500         case SI_GETTING_EVENTS:
501         {
502                 smi_info->curr_msg->rsp_size
503                         = smi_info->handlers->get_result(
504                                 smi_info->si_sm,
505                                 smi_info->curr_msg->rsp,
506                                 IPMI_MAX_MSG_LENGTH);
507
508                 /* Do this here becase deliver_recv_msg() releases the
509                    lock, and a new message can be put in during the
510                    time the lock is released. */
511                 msg = smi_info->curr_msg;
512                 smi_info->curr_msg = NULL;
513                 if (msg->rsp[2] != 0) {
514                         /* Error getting event, probably done. */
515                         msg->done(msg);
516
517                         /* Take off the event flag. */
518                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
519                         handle_flags(smi_info);
520                 } else {
521                         spin_lock(&smi_info->count_lock);
522                         smi_info->events++;
523                         spin_unlock(&smi_info->count_lock);
524
525                         /* Do this before we deliver the message
526                            because delivering the message releases the
527                            lock and something else can mess with the
528                            state. */
529                         handle_flags(smi_info);
530
531                         deliver_recv_msg(smi_info, msg);
532                 }
533                 break;
534         }
535
536         case SI_GETTING_MESSAGES:
537         {
538                 smi_info->curr_msg->rsp_size
539                         = smi_info->handlers->get_result(
540                                 smi_info->si_sm,
541                                 smi_info->curr_msg->rsp,
542                                 IPMI_MAX_MSG_LENGTH);
543
544                 /* Do this here becase deliver_recv_msg() releases the
545                    lock, and a new message can be put in during the
546                    time the lock is released. */
547                 msg = smi_info->curr_msg;
548                 smi_info->curr_msg = NULL;
549                 if (msg->rsp[2] != 0) {
550                         /* Error getting event, probably done. */
551                         msg->done(msg);
552
553                         /* Take off the msg flag. */
554                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
555                         handle_flags(smi_info);
556                 } else {
557                         spin_lock(&smi_info->count_lock);
558                         smi_info->incoming_messages++;
559                         spin_unlock(&smi_info->count_lock);
560
561                         /* Do this before we deliver the message
562                            because delivering the message releases the
563                            lock and something else can mess with the
564                            state. */
565                         handle_flags(smi_info);
566
567                         deliver_recv_msg(smi_info, msg);
568                 }
569                 break;
570         }
571
572         case SI_ENABLE_INTERRUPTS1:
573         {
574                 unsigned char msg[4];
575
576                 /* We got the flags from the SMI, now handle them. */
577                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
578                 if (msg[2] != 0) {
579                         printk(KERN_WARNING
580                                "ipmi_si: Could not enable interrupts"
581                                ", failed get, using polled mode.\n");
582                         smi_info->si_state = SI_NORMAL;
583                 } else {
584                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
585                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
586                         msg[2] = msg[3] | 1; /* enable msg queue int */
587                         smi_info->handlers->start_transaction(
588                                 smi_info->si_sm, msg, 3);
589                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
590                 }
591                 break;
592         }
593
594         case SI_ENABLE_INTERRUPTS2:
595         {
596                 unsigned char msg[4];
597
598                 /* We got the flags from the SMI, now handle them. */
599                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
600                 if (msg[2] != 0) {
601                         printk(KERN_WARNING
602                                "ipmi_si: Could not enable interrupts"
603                                ", failed set, using polled mode.\n");
604                 }
605                 smi_info->si_state = SI_NORMAL;
606                 break;
607         }
608         }
609 }
610
611 /* Called on timeouts and events.  Timeouts should pass the elapsed
612    time, interrupts should pass in zero. */
613 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
614                                            int time)
615 {
616         enum si_sm_result si_sm_result;
617
618  restart:
619         /* There used to be a loop here that waited a little while
620            (around 25us) before giving up.  That turned out to be
621            pointless, the minimum delays I was seeing were in the 300us
622            range, which is far too long to wait in an interrupt.  So
623            we just run until the state machine tells us something
624            happened or it needs a delay. */
625         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
626         time = 0;
627         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
628         {
629                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
630         }
631
632         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
633         {
634                 spin_lock(&smi_info->count_lock);
635                 smi_info->complete_transactions++;
636                 spin_unlock(&smi_info->count_lock);
637
638                 handle_transaction_done(smi_info);
639                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
640         }
641         else if (si_sm_result == SI_SM_HOSED)
642         {
643                 spin_lock(&smi_info->count_lock);
644                 smi_info->hosed_count++;
645                 spin_unlock(&smi_info->count_lock);
646
647                 /* Do the before return_hosed_msg, because that
648                    releases the lock. */
649                 smi_info->si_state = SI_NORMAL;
650                 if (smi_info->curr_msg != NULL) {
651                         /* If we were handling a user message, format
652                            a response to send to the upper layer to
653                            tell it about the error. */
654                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
655                 }
656                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
657         }
658
659         /* We prefer handling attn over new messages. */
660         if (si_sm_result == SI_SM_ATTN)
661         {
662                 unsigned char msg[2];
663
664                 spin_lock(&smi_info->count_lock);
665                 smi_info->attentions++;
666                 spin_unlock(&smi_info->count_lock);
667
668                 /* Got a attn, send down a get message flags to see
669                    what's causing it.  It would be better to handle
670                    this in the upper layer, but due to the way
671                    interrupts work with the SMI, that's not really
672                    possible. */
673                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
674                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
675
676                 smi_info->handlers->start_transaction(
677                         smi_info->si_sm, msg, 2);
678                 smi_info->si_state = SI_GETTING_FLAGS;
679                 goto restart;
680         }
681
682         /* If we are currently idle, try to start the next message. */
683         if (si_sm_result == SI_SM_IDLE) {
684                 spin_lock(&smi_info->count_lock);
685                 smi_info->idles++;
686                 spin_unlock(&smi_info->count_lock);
687
688                 si_sm_result = start_next_msg(smi_info);
689                 if (si_sm_result != SI_SM_IDLE)
690                         goto restart;
691         }
692
693         if ((si_sm_result == SI_SM_IDLE)
694             && (atomic_read(&smi_info->req_events)))
695         {
696                 /* We are idle and the upper layer requested that I fetch
697                    events, so do so. */
698                 atomic_set(&smi_info->req_events, 0);
699
700                 smi_info->curr_msg = ipmi_alloc_smi_msg();
701                 if (!smi_info->curr_msg)
702                         goto out;
703
704                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
705                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
706                 smi_info->curr_msg->data_size = 2;
707
708                 smi_info->handlers->start_transaction(
709                         smi_info->si_sm,
710                         smi_info->curr_msg->data,
711                         smi_info->curr_msg->data_size);
712                 smi_info->si_state = SI_GETTING_EVENTS;
713                 goto restart;
714         }
715  out:
716         return si_sm_result;
717 }
718
719 static void sender(void                *send_info,
720                    struct ipmi_smi_msg *msg,
721                    int                 priority)
722 {
723         struct smi_info   *smi_info = send_info;
724         enum si_sm_result result;
725         unsigned long     flags;
726 #ifdef DEBUG_TIMING
727         struct timeval    t;
728 #endif
729
730         if (atomic_read(&smi_info->stop_operation)) {
731                 msg->rsp[0] = msg->data[0] | 4;
732                 msg->rsp[1] = msg->data[1];
733                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
734                 msg->rsp_size = 3;
735                 deliver_recv_msg(smi_info, msg);
736                 return;
737         }
738
739         spin_lock_irqsave(&(smi_info->msg_lock), flags);
740 #ifdef DEBUG_TIMING
741         do_gettimeofday(&t);
742         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
743 #endif
744
745         if (smi_info->run_to_completion) {
746                 /* If we are running to completion, then throw it in
747                    the list and run transactions until everything is
748                    clear.  Priority doesn't matter here. */
749                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
750
751                 /* We have to release the msg lock and claim the smi
752                    lock in this case, because of race conditions. */
753                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
754
755                 spin_lock_irqsave(&(smi_info->si_lock), flags);
756                 result = smi_event_handler(smi_info, 0);
757                 while (result != SI_SM_IDLE) {
758                         udelay(SI_SHORT_TIMEOUT_USEC);
759                         result = smi_event_handler(smi_info,
760                                                    SI_SHORT_TIMEOUT_USEC);
761                 }
762                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
763                 return;
764         } else {
765                 if (priority > 0) {
766                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
767                 } else {
768                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
769                 }
770         }
771         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
772
773         spin_lock_irqsave(&(smi_info->si_lock), flags);
774         if ((smi_info->si_state == SI_NORMAL)
775             && (smi_info->curr_msg == NULL))
776         {
777                 start_next_msg(smi_info);
778         }
779         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
780 }
781
782 static void set_run_to_completion(void *send_info, int i_run_to_completion)
783 {
784         struct smi_info   *smi_info = send_info;
785         enum si_sm_result result;
786         unsigned long     flags;
787
788         spin_lock_irqsave(&(smi_info->si_lock), flags);
789
790         smi_info->run_to_completion = i_run_to_completion;
791         if (i_run_to_completion) {
792                 result = smi_event_handler(smi_info, 0);
793                 while (result != SI_SM_IDLE) {
794                         udelay(SI_SHORT_TIMEOUT_USEC);
795                         result = smi_event_handler(smi_info,
796                                                    SI_SHORT_TIMEOUT_USEC);
797                 }
798         }
799
800         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
801 }
802
803 static int ipmi_thread(void *data)
804 {
805         struct smi_info *smi_info = data;
806         unsigned long flags;
807         enum si_sm_result smi_result;
808
809         set_user_nice(current, 19);
810         while (!kthread_should_stop()) {
811                 spin_lock_irqsave(&(smi_info->si_lock), flags);
812                 smi_result = smi_event_handler(smi_info, 0);
813                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
814                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
815                         /* do nothing */
816                 }
817                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
818                         schedule();
819                 else
820                         schedule_timeout_interruptible(1);
821         }
822         return 0;
823 }
824
825
826 static void poll(void *send_info)
827 {
828         struct smi_info *smi_info = send_info;
829
830         /*
831          * Make sure there is some delay in the poll loop so we can
832          * drive time forward and timeout things.
833          */
834         udelay(10);
835         smi_event_handler(smi_info, 10);
836 }
837
838 static void request_events(void *send_info)
839 {
840         struct smi_info *smi_info = send_info;
841
842         if (atomic_read(&smi_info->stop_operation))
843                 return;
844
845         atomic_set(&smi_info->req_events, 1);
846 }
847
848 static int initialized;
849
850 static void smi_timeout(unsigned long data)
851 {
852         struct smi_info   *smi_info = (struct smi_info *) data;
853         enum si_sm_result smi_result;
854         unsigned long     flags;
855         unsigned long     jiffies_now;
856         long              time_diff;
857 #ifdef DEBUG_TIMING
858         struct timeval    t;
859 #endif
860
861         if (atomic_read(&smi_info->stop_operation))
862                 return;
863
864         spin_lock_irqsave(&(smi_info->si_lock), flags);
865 #ifdef DEBUG_TIMING
866         do_gettimeofday(&t);
867         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
868 #endif
869         jiffies_now = jiffies;
870         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
871                      * SI_USEC_PER_JIFFY);
872         smi_result = smi_event_handler(smi_info, time_diff);
873
874         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
875
876         smi_info->last_timeout_jiffies = jiffies_now;
877
878         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
879                 /* Running with interrupts, only do long timeouts. */
880                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
881                 spin_lock_irqsave(&smi_info->count_lock, flags);
882                 smi_info->long_timeouts++;
883                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
884                 goto do_add_timer;
885         }
886
887         /* If the state machine asks for a short delay, then shorten
888            the timer timeout. */
889         if (smi_result == SI_SM_CALL_WITH_DELAY) {
890                 spin_lock_irqsave(&smi_info->count_lock, flags);
891                 smi_info->short_timeouts++;
892                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
893                 smi_info->si_timer.expires = jiffies + 1;
894         } else {
895                 spin_lock_irqsave(&smi_info->count_lock, flags);
896                 smi_info->long_timeouts++;
897                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
898                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
899         }
900
901  do_add_timer:
902         add_timer(&(smi_info->si_timer));
903 }
904
905 static irqreturn_t si_irq_handler(int irq, void *data)
906 {
907         struct smi_info *smi_info = data;
908         unsigned long   flags;
909 #ifdef DEBUG_TIMING
910         struct timeval  t;
911 #endif
912
913         spin_lock_irqsave(&(smi_info->si_lock), flags);
914
915         spin_lock(&smi_info->count_lock);
916         smi_info->interrupts++;
917         spin_unlock(&smi_info->count_lock);
918
919         if (atomic_read(&smi_info->stop_operation))
920                 goto out;
921
922 #ifdef DEBUG_TIMING
923         do_gettimeofday(&t);
924         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
925 #endif
926         smi_event_handler(smi_info, 0);
927  out:
928         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
929         return IRQ_HANDLED;
930 }
931
932 static irqreturn_t si_bt_irq_handler(int irq, void *data)
933 {
934         struct smi_info *smi_info = data;
935         /* We need to clear the IRQ flag for the BT interface. */
936         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
937                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
938                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
939         return si_irq_handler(irq, data);
940 }
941
942 static int smi_start_processing(void       *send_info,
943                                 ipmi_smi_t intf)
944 {
945         struct smi_info *new_smi = send_info;
946         int             enable = 0;
947
948         new_smi->intf = intf;
949
950         /* Set up the timer that drives the interface. */
951         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
952         new_smi->last_timeout_jiffies = jiffies;
953         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
954
955         /*
956          * Check if the user forcefully enabled the daemon.
957          */
958         if (new_smi->intf_num < num_force_kipmid)
959                 enable = force_kipmid[new_smi->intf_num];
960         /*
961          * The BT interface is efficient enough to not need a thread,
962          * and there is no need for a thread if we have interrupts.
963          */
964         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
965                 enable = 1;
966
967         if (enable) {
968                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
969                                               "kipmi%d", new_smi->intf_num);
970                 if (IS_ERR(new_smi->thread)) {
971                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
972                                " kernel thread due to error %ld, only using"
973                                " timers to drive the interface\n",
974                                PTR_ERR(new_smi->thread));
975                         new_smi->thread = NULL;
976                 }
977         }
978
979         return 0;
980 }
981
982 static void set_maintenance_mode(void *send_info, int enable)
983 {
984         struct smi_info   *smi_info = send_info;
985
986         if (!enable)
987                 atomic_set(&smi_info->req_events, 0);
988 }
989
990 static struct ipmi_smi_handlers handlers =
991 {
992         .owner                  = THIS_MODULE,
993         .start_processing       = smi_start_processing,
994         .sender                 = sender,
995         .request_events         = request_events,
996         .set_maintenance_mode   = set_maintenance_mode,
997         .set_run_to_completion  = set_run_to_completion,
998         .poll                   = poll,
999 };
1000
1001 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1002    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1003
1004 static LIST_HEAD(smi_infos);
1005 static DEFINE_MUTEX(smi_infos_lock);
1006 static int smi_num; /* Used to sequence the SMIs */
1007
1008 #define DEFAULT_REGSPACING      1
1009
1010 static int           si_trydefaults = 1;
1011 static char          *si_type[SI_MAX_PARMS];
1012 #define MAX_SI_TYPE_STR 30
1013 static char          si_type_str[MAX_SI_TYPE_STR];
1014 static unsigned long addrs[SI_MAX_PARMS];
1015 static int num_addrs;
1016 static unsigned int  ports[SI_MAX_PARMS];
1017 static int num_ports;
1018 static int           irqs[SI_MAX_PARMS];
1019 static int num_irqs;
1020 static int           regspacings[SI_MAX_PARMS];
1021 static int num_regspacings;
1022 static int           regsizes[SI_MAX_PARMS];
1023 static int num_regsizes;
1024 static int           regshifts[SI_MAX_PARMS];
1025 static int num_regshifts;
1026 static int slave_addrs[SI_MAX_PARMS];
1027 static int num_slave_addrs;
1028
1029 #define IPMI_IO_ADDR_SPACE  0
1030 #define IPMI_MEM_ADDR_SPACE 1
1031 static char *addr_space_to_str[] = { "i/o", "mem" };
1032
1033 static int hotmod_handler(const char *val, struct kernel_param *kp);
1034
1035 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1036 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1037                  " Documentation/IPMI.txt in the kernel sources for the"
1038                  " gory details.");
1039
1040 module_param_named(trydefaults, si_trydefaults, bool, 0);
1041 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1042                  " default scan of the KCS and SMIC interface at the standard"
1043                  " address");
1044 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1045 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1046                  " interface separated by commas.  The types are 'kcs',"
1047                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1048                  " the first interface to kcs and the second to bt");
1049 module_param_array(addrs, long, &num_addrs, 0);
1050 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1051                  " addresses separated by commas.  Only use if an interface"
1052                  " is in memory.  Otherwise, set it to zero or leave"
1053                  " it blank.");
1054 module_param_array(ports, int, &num_ports, 0);
1055 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1056                  " addresses separated by commas.  Only use if an interface"
1057                  " is a port.  Otherwise, set it to zero or leave"
1058                  " it blank.");
1059 module_param_array(irqs, int, &num_irqs, 0);
1060 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1061                  " addresses separated by commas.  Only use if an interface"
1062                  " has an interrupt.  Otherwise, set it to zero or leave"
1063                  " it blank.");
1064 module_param_array(regspacings, int, &num_regspacings, 0);
1065 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1066                  " and each successive register used by the interface.  For"
1067                  " instance, if the start address is 0xca2 and the spacing"
1068                  " is 2, then the second address is at 0xca4.  Defaults"
1069                  " to 1.");
1070 module_param_array(regsizes, int, &num_regsizes, 0);
1071 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1072                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1073                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1074                  " the 8-bit IPMI register has to be read from a larger"
1075                  " register.");
1076 module_param_array(regshifts, int, &num_regshifts, 0);
1077 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1078                  " IPMI register, in bits.  For instance, if the data"
1079                  " is read from a 32-bit word and the IPMI data is in"
1080                  " bit 8-15, then the shift would be 8");
1081 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1082 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1083                  " the controller.  Normally this is 0x20, but can be"
1084                  " overridden by this parm.  This is an array indexed"
1085                  " by interface number.");
1086 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1087 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1088                  " disabled(0).  Normally the IPMI driver auto-detects"
1089                  " this, but the value may be overridden by this parm.");
1090 module_param(unload_when_empty, int, 0);
1091 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1092                  " specified or found, default is 1.  Setting to 0"
1093                  " is useful for hot add of devices using hotmod.");
1094
1095
1096 static void std_irq_cleanup(struct smi_info *info)
1097 {
1098         if (info->si_type == SI_BT)
1099                 /* Disable the interrupt in the BT interface. */
1100                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1101         free_irq(info->irq, info);
1102 }
1103
1104 static int std_irq_setup(struct smi_info *info)
1105 {
1106         int rv;
1107
1108         if (!info->irq)
1109                 return 0;
1110
1111         if (info->si_type == SI_BT) {
1112                 rv = request_irq(info->irq,
1113                                  si_bt_irq_handler,
1114                                  IRQF_DISABLED,
1115                                  DEVICE_NAME,
1116                                  info);
1117                 if (!rv)
1118                         /* Enable the interrupt in the BT interface. */
1119                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1120                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1121         } else
1122                 rv = request_irq(info->irq,
1123                                  si_irq_handler,
1124                                  IRQF_DISABLED,
1125                                  DEVICE_NAME,
1126                                  info);
1127         if (rv) {
1128                 printk(KERN_WARNING
1129                        "ipmi_si: %s unable to claim interrupt %d,"
1130                        " running polled\n",
1131                        DEVICE_NAME, info->irq);
1132                 info->irq = 0;
1133         } else {
1134                 info->irq_cleanup = std_irq_cleanup;
1135                 printk("  Using irq %d\n", info->irq);
1136         }
1137
1138         return rv;
1139 }
1140
1141 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1142 {
1143         unsigned int addr = io->addr_data;
1144
1145         return inb(addr + (offset * io->regspacing));
1146 }
1147
1148 static void port_outb(struct si_sm_io *io, unsigned int offset,
1149                       unsigned char b)
1150 {
1151         unsigned int addr = io->addr_data;
1152
1153         outb(b, addr + (offset * io->regspacing));
1154 }
1155
1156 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1157 {
1158         unsigned int addr = io->addr_data;
1159
1160         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1161 }
1162
1163 static void port_outw(struct si_sm_io *io, unsigned int offset,
1164                       unsigned char b)
1165 {
1166         unsigned int addr = io->addr_data;
1167
1168         outw(b << io->regshift, addr + (offset * io->regspacing));
1169 }
1170
1171 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1172 {
1173         unsigned int addr = io->addr_data;
1174
1175         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1176 }
1177
1178 static void port_outl(struct si_sm_io *io, unsigned int offset,
1179                       unsigned char b)
1180 {
1181         unsigned int addr = io->addr_data;
1182
1183         outl(b << io->regshift, addr+(offset * io->regspacing));
1184 }
1185
1186 static void port_cleanup(struct smi_info *info)
1187 {
1188         unsigned int addr = info->io.addr_data;
1189         int          idx;
1190
1191         if (addr) {
1192                 for (idx = 0; idx < info->io_size; idx++) {
1193                         release_region(addr + idx * info->io.regspacing,
1194                                        info->io.regsize);
1195                 }
1196         }
1197 }
1198
1199 static int port_setup(struct smi_info *info)
1200 {
1201         unsigned int addr = info->io.addr_data;
1202         int          idx;
1203
1204         if (!addr)
1205                 return -ENODEV;
1206
1207         info->io_cleanup = port_cleanup;
1208
1209         /* Figure out the actual inb/inw/inl/etc routine to use based
1210            upon the register size. */
1211         switch (info->io.regsize) {
1212         case 1:
1213                 info->io.inputb = port_inb;
1214                 info->io.outputb = port_outb;
1215                 break;
1216         case 2:
1217                 info->io.inputb = port_inw;
1218                 info->io.outputb = port_outw;
1219                 break;
1220         case 4:
1221                 info->io.inputb = port_inl;
1222                 info->io.outputb = port_outl;
1223                 break;
1224         default:
1225                 printk("ipmi_si: Invalid register size: %d\n",
1226                        info->io.regsize);
1227                 return -EINVAL;
1228         }
1229
1230         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1231          * tables.  This causes problems when trying to register the
1232          * entire I/O region.  Therefore we must register each I/O
1233          * port separately.
1234          */
1235         for (idx = 0; idx < info->io_size; idx++) {
1236                 if (request_region(addr + idx * info->io.regspacing,
1237                                    info->io.regsize, DEVICE_NAME) == NULL) {
1238                         /* Undo allocations */
1239                         while (idx--) {
1240                                 release_region(addr + idx * info->io.regspacing,
1241                                                info->io.regsize);
1242                         }
1243                         return -EIO;
1244                 }
1245         }
1246         return 0;
1247 }
1248
1249 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1250 {
1251         return readb((io->addr)+(offset * io->regspacing));
1252 }
1253
1254 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1255                      unsigned char b)
1256 {
1257         writeb(b, (io->addr)+(offset * io->regspacing));
1258 }
1259
1260 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1261 {
1262         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1263                 & 0xff;
1264 }
1265
1266 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1267                      unsigned char b)
1268 {
1269         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1270 }
1271
1272 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1273 {
1274         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1275                 & 0xff;
1276 }
1277
1278 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1279                      unsigned char b)
1280 {
1281         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1282 }
1283
1284 #ifdef readq
1285 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1286 {
1287         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1288                 & 0xff;
1289 }
1290
1291 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1292                      unsigned char b)
1293 {
1294         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1295 }
1296 #endif
1297
1298 static void mem_cleanup(struct smi_info *info)
1299 {
1300         unsigned long addr = info->io.addr_data;
1301         int           mapsize;
1302
1303         if (info->io.addr) {
1304                 iounmap(info->io.addr);
1305
1306                 mapsize = ((info->io_size * info->io.regspacing)
1307                            - (info->io.regspacing - info->io.regsize));
1308
1309                 release_mem_region(addr, mapsize);
1310         }
1311 }
1312
1313 static int mem_setup(struct smi_info *info)
1314 {
1315         unsigned long addr = info->io.addr_data;
1316         int           mapsize;
1317
1318         if (!addr)
1319                 return -ENODEV;
1320
1321         info->io_cleanup = mem_cleanup;
1322
1323         /* Figure out the actual readb/readw/readl/etc routine to use based
1324            upon the register size. */
1325         switch (info->io.regsize) {
1326         case 1:
1327                 info->io.inputb = intf_mem_inb;
1328                 info->io.outputb = intf_mem_outb;
1329                 break;
1330         case 2:
1331                 info->io.inputb = intf_mem_inw;
1332                 info->io.outputb = intf_mem_outw;
1333                 break;
1334         case 4:
1335                 info->io.inputb = intf_mem_inl;
1336                 info->io.outputb = intf_mem_outl;
1337                 break;
1338 #ifdef readq
1339         case 8:
1340                 info->io.inputb = mem_inq;
1341                 info->io.outputb = mem_outq;
1342                 break;
1343 #endif
1344         default:
1345                 printk("ipmi_si: Invalid register size: %d\n",
1346                        info->io.regsize);
1347                 return -EINVAL;
1348         }
1349
1350         /* Calculate the total amount of memory to claim.  This is an
1351          * unusual looking calculation, but it avoids claiming any
1352          * more memory than it has to.  It will claim everything
1353          * between the first address to the end of the last full
1354          * register. */
1355         mapsize = ((info->io_size * info->io.regspacing)
1356                    - (info->io.regspacing - info->io.regsize));
1357
1358         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1359                 return -EIO;
1360
1361         info->io.addr = ioremap(addr, mapsize);
1362         if (info->io.addr == NULL) {
1363                 release_mem_region(addr, mapsize);
1364                 return -EIO;
1365         }
1366         return 0;
1367 }
1368
1369 /*
1370  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1371  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1372  * Options are:
1373  *   rsp=<regspacing>
1374  *   rsi=<regsize>
1375  *   rsh=<regshift>
1376  *   irq=<irq>
1377  *   ipmb=<ipmb addr>
1378  */
1379 enum hotmod_op { HM_ADD, HM_REMOVE };
1380 struct hotmod_vals {
1381         char *name;
1382         int  val;
1383 };
1384 static struct hotmod_vals hotmod_ops[] = {
1385         { "add",        HM_ADD },
1386         { "remove",     HM_REMOVE },
1387         { NULL }
1388 };
1389 static struct hotmod_vals hotmod_si[] = {
1390         { "kcs",        SI_KCS },
1391         { "smic",       SI_SMIC },
1392         { "bt",         SI_BT },
1393         { NULL }
1394 };
1395 static struct hotmod_vals hotmod_as[] = {
1396         { "mem",        IPMI_MEM_ADDR_SPACE },
1397         { "i/o",        IPMI_IO_ADDR_SPACE },
1398         { NULL }
1399 };
1400
1401 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1402 {
1403         char *s;
1404         int  i;
1405
1406         s = strchr(*curr, ',');
1407         if (!s) {
1408                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1409                 return -EINVAL;
1410         }
1411         *s = '\0';
1412         s++;
1413         for (i = 0; hotmod_ops[i].name; i++) {
1414                 if (strcmp(*curr, v[i].name) == 0) {
1415                         *val = v[i].val;
1416                         *curr = s;
1417                         return 0;
1418                 }
1419         }
1420
1421         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1422         return -EINVAL;
1423 }
1424
1425 static int check_hotmod_int_op(const char *curr, const char *option,
1426                                const char *name, int *val)
1427 {
1428         char *n;
1429
1430         if (strcmp(curr, name) == 0) {
1431                 if (!option) {
1432                         printk(KERN_WARNING PFX
1433                                "No option given for '%s'\n",
1434                                curr);
1435                         return -EINVAL;
1436                 }
1437                 *val = simple_strtoul(option, &n, 0);
1438                 if ((*n != '\0') || (*option == '\0')) {
1439                         printk(KERN_WARNING PFX
1440                                "Bad option given for '%s'\n",
1441                                curr);
1442                         return -EINVAL;
1443                 }
1444                 return 1;
1445         }
1446         return 0;
1447 }
1448
1449 static int hotmod_handler(const char *val, struct kernel_param *kp)
1450 {
1451         char *str = kstrdup(val, GFP_KERNEL);
1452         int  rv;
1453         char *next, *curr, *s, *n, *o;
1454         enum hotmod_op op;
1455         enum si_type si_type;
1456         int  addr_space;
1457         unsigned long addr;
1458         int regspacing;
1459         int regsize;
1460         int regshift;
1461         int irq;
1462         int ipmb;
1463         int ival;
1464         int len;
1465         struct smi_info *info;
1466
1467         if (!str)
1468                 return -ENOMEM;
1469
1470         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1471         len = strlen(str);
1472         ival = len - 1;
1473         while ((ival >= 0) && isspace(str[ival])) {
1474                 str[ival] = '\0';
1475                 ival--;
1476         }
1477
1478         for (curr = str; curr; curr = next) {
1479                 regspacing = 1;
1480                 regsize = 1;
1481                 regshift = 0;
1482                 irq = 0;
1483                 ipmb = 0x20;
1484
1485                 next = strchr(curr, ':');
1486                 if (next) {
1487                         *next = '\0';
1488                         next++;
1489                 }
1490
1491                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1492                 if (rv)
1493                         break;
1494                 op = ival;
1495
1496                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1497                 if (rv)
1498                         break;
1499                 si_type = ival;
1500
1501                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1502                 if (rv)
1503                         break;
1504
1505                 s = strchr(curr, ',');
1506                 if (s) {
1507                         *s = '\0';
1508                         s++;
1509                 }
1510                 addr = simple_strtoul(curr, &n, 0);
1511                 if ((*n != '\0') || (*curr == '\0')) {
1512                         printk(KERN_WARNING PFX "Invalid hotmod address"
1513                                " '%s'\n", curr);
1514                         break;
1515                 }
1516
1517                 while (s) {
1518                         curr = s;
1519                         s = strchr(curr, ',');
1520                         if (s) {
1521                                 *s = '\0';
1522                                 s++;
1523                         }
1524                         o = strchr(curr, '=');
1525                         if (o) {
1526                                 *o = '\0';
1527                                 o++;
1528                         }
1529                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1530                         if (rv < 0)
1531                                 goto out;
1532                         else if (rv)
1533                                 continue;
1534                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1535                         if (rv < 0)
1536                                 goto out;
1537                         else if (rv)
1538                                 continue;
1539                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1540                         if (rv < 0)
1541                                 goto out;
1542                         else if (rv)
1543                                 continue;
1544                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1545                         if (rv < 0)
1546                                 goto out;
1547                         else if (rv)
1548                                 continue;
1549                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1550                         if (rv < 0)
1551                                 goto out;
1552                         else if (rv)
1553                                 continue;
1554
1555                         rv = -EINVAL;
1556                         printk(KERN_WARNING PFX
1557                                "Invalid hotmod option '%s'\n",
1558                                curr);
1559                         goto out;
1560                 }
1561
1562                 if (op == HM_ADD) {
1563                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1564                         if (!info) {
1565                                 rv = -ENOMEM;
1566                                 goto out;
1567                         }
1568
1569                         info->addr_source = "hotmod";
1570                         info->si_type = si_type;
1571                         info->io.addr_data = addr;
1572                         info->io.addr_type = addr_space;
1573                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1574                                 info->io_setup = mem_setup;
1575                         else
1576                                 info->io_setup = port_setup;
1577
1578                         info->io.addr = NULL;
1579                         info->io.regspacing = regspacing;
1580                         if (!info->io.regspacing)
1581                                 info->io.regspacing = DEFAULT_REGSPACING;
1582                         info->io.regsize = regsize;
1583                         if (!info->io.regsize)
1584                                 info->io.regsize = DEFAULT_REGSPACING;
1585                         info->io.regshift = regshift;
1586                         info->irq = irq;
1587                         if (info->irq)
1588                                 info->irq_setup = std_irq_setup;
1589                         info->slave_addr = ipmb;
1590
1591                         try_smi_init(info);
1592                 } else {
1593                         /* remove */
1594                         struct smi_info *e, *tmp_e;
1595
1596                         mutex_lock(&smi_infos_lock);
1597                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1598                                 if (e->io.addr_type != addr_space)
1599                                         continue;
1600                                 if (e->si_type != si_type)
1601                                         continue;
1602                                 if (e->io.addr_data == addr)
1603                                         cleanup_one_si(e);
1604                         }
1605                         mutex_unlock(&smi_infos_lock);
1606                 }
1607         }
1608         rv = len;
1609  out:
1610         kfree(str);
1611         return rv;
1612 }
1613
1614 static __devinit void hardcode_find_bmc(void)
1615 {
1616         int             i;
1617         struct smi_info *info;
1618
1619         for (i = 0; i < SI_MAX_PARMS; i++) {
1620                 if (!ports[i] && !addrs[i])
1621                         continue;
1622
1623                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1624                 if (!info)
1625                         return;
1626
1627                 info->addr_source = "hardcoded";
1628
1629                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1630                         info->si_type = SI_KCS;
1631                 } else if (strcmp(si_type[i], "smic") == 0) {
1632                         info->si_type = SI_SMIC;
1633                 } else if (strcmp(si_type[i], "bt") == 0) {
1634                         info->si_type = SI_BT;
1635                 } else {
1636                         printk(KERN_WARNING
1637                                "ipmi_si: Interface type specified "
1638                                "for interface %d, was invalid: %s\n",
1639                                i, si_type[i]);
1640                         kfree(info);
1641                         continue;
1642                 }
1643
1644                 if (ports[i]) {
1645                         /* An I/O port */
1646                         info->io_setup = port_setup;
1647                         info->io.addr_data = ports[i];
1648                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1649                 } else if (addrs[i]) {
1650                         /* A memory port */
1651                         info->io_setup = mem_setup;
1652                         info->io.addr_data = addrs[i];
1653                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1654                 } else {
1655                         printk(KERN_WARNING
1656                                "ipmi_si: Interface type specified "
1657                                "for interface %d, "
1658                                "but port and address were not set or "
1659                                "set to zero.\n", i);
1660                         kfree(info);
1661                         continue;
1662                 }
1663
1664                 info->io.addr = NULL;
1665                 info->io.regspacing = regspacings[i];
1666                 if (!info->io.regspacing)
1667                         info->io.regspacing = DEFAULT_REGSPACING;
1668                 info->io.regsize = regsizes[i];
1669                 if (!info->io.regsize)
1670                         info->io.regsize = DEFAULT_REGSPACING;
1671                 info->io.regshift = regshifts[i];
1672                 info->irq = irqs[i];
1673                 if (info->irq)
1674                         info->irq_setup = std_irq_setup;
1675
1676                 try_smi_init(info);
1677         }
1678 }
1679
1680 #ifdef CONFIG_ACPI
1681
1682 #include <linux/acpi.h>
1683
1684 /* Once we get an ACPI failure, we don't try any more, because we go
1685    through the tables sequentially.  Once we don't find a table, there
1686    are no more. */
1687 static int acpi_failure;
1688
1689 /* For GPE-type interrupts. */
1690 static u32 ipmi_acpi_gpe(void *context)
1691 {
1692         struct smi_info *smi_info = context;
1693         unsigned long   flags;
1694 #ifdef DEBUG_TIMING
1695         struct timeval t;
1696 #endif
1697
1698         spin_lock_irqsave(&(smi_info->si_lock), flags);
1699
1700         spin_lock(&smi_info->count_lock);
1701         smi_info->interrupts++;
1702         spin_unlock(&smi_info->count_lock);
1703
1704         if (atomic_read(&smi_info->stop_operation))
1705                 goto out;
1706
1707 #ifdef DEBUG_TIMING
1708         do_gettimeofday(&t);
1709         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1710 #endif
1711         smi_event_handler(smi_info, 0);
1712  out:
1713         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1714
1715         return ACPI_INTERRUPT_HANDLED;
1716 }
1717
1718 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1719 {
1720         if (!info->irq)
1721                 return;
1722
1723         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1724 }
1725
1726 static int acpi_gpe_irq_setup(struct smi_info *info)
1727 {
1728         acpi_status status;
1729
1730         if (!info->irq)
1731                 return 0;
1732
1733         /* FIXME - is level triggered right? */
1734         status = acpi_install_gpe_handler(NULL,
1735                                           info->irq,
1736                                           ACPI_GPE_LEVEL_TRIGGERED,
1737                                           &ipmi_acpi_gpe,
1738                                           info);
1739         if (status != AE_OK) {
1740                 printk(KERN_WARNING
1741                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1742                        " running polled\n",
1743                        DEVICE_NAME, info->irq);
1744                 info->irq = 0;
1745                 return -EINVAL;
1746         } else {
1747                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1748                 printk("  Using ACPI GPE %d\n", info->irq);
1749                 return 0;
1750         }
1751 }
1752
1753 /*
1754  * Defined at
1755  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1756  */
1757 struct SPMITable {
1758         s8      Signature[4];
1759         u32     Length;
1760         u8      Revision;
1761         u8      Checksum;
1762         s8      OEMID[6];
1763         s8      OEMTableID[8];
1764         s8      OEMRevision[4];
1765         s8      CreatorID[4];
1766         s8      CreatorRevision[4];
1767         u8      InterfaceType;
1768         u8      IPMIlegacy;
1769         s16     SpecificationRevision;
1770
1771         /*
1772          * Bit 0 - SCI interrupt supported
1773          * Bit 1 - I/O APIC/SAPIC
1774          */
1775         u8      InterruptType;
1776
1777         /* If bit 0 of InterruptType is set, then this is the SCI
1778            interrupt in the GPEx_STS register. */
1779         u8      GPE;
1780
1781         s16     Reserved;
1782
1783         /* If bit 1 of InterruptType is set, then this is the I/O
1784            APIC/SAPIC interrupt. */
1785         u32     GlobalSystemInterrupt;
1786
1787         /* The actual register address. */
1788         struct acpi_generic_address addr;
1789
1790         u8      UID[4];
1791
1792         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1793 };
1794
1795 static __devinit int try_init_acpi(struct SPMITable *spmi)
1796 {
1797         struct smi_info  *info;
1798         u8               addr_space;
1799
1800         if (spmi->IPMIlegacy != 1) {
1801             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1802             return -ENODEV;
1803         }
1804
1805         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1806                 addr_space = IPMI_MEM_ADDR_SPACE;
1807         else
1808                 addr_space = IPMI_IO_ADDR_SPACE;
1809
1810         info = kzalloc(sizeof(*info), GFP_KERNEL);
1811         if (!info) {
1812                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1813                 return -ENOMEM;
1814         }
1815
1816         info->addr_source = "ACPI";
1817
1818         /* Figure out the interface type. */
1819         switch (spmi->InterfaceType)
1820         {
1821         case 1: /* KCS */
1822                 info->si_type = SI_KCS;
1823                 break;
1824         case 2: /* SMIC */
1825                 info->si_type = SI_SMIC;
1826                 break;
1827         case 3: /* BT */
1828                 info->si_type = SI_BT;
1829                 break;
1830         default:
1831                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1832                         spmi->InterfaceType);
1833                 kfree(info);
1834                 return -EIO;
1835         }
1836
1837         if (spmi->InterruptType & 1) {
1838                 /* We've got a GPE interrupt. */
1839                 info->irq = spmi->GPE;
1840                 info->irq_setup = acpi_gpe_irq_setup;
1841         } else if (spmi->InterruptType & 2) {
1842                 /* We've got an APIC/SAPIC interrupt. */
1843                 info->irq = spmi->GlobalSystemInterrupt;
1844                 info->irq_setup = std_irq_setup;
1845         } else {
1846                 /* Use the default interrupt setting. */
1847                 info->irq = 0;
1848                 info->irq_setup = NULL;
1849         }
1850
1851         if (spmi->addr.register_bit_width) {
1852                 /* A (hopefully) properly formed register bit width. */
1853                 info->io.regspacing = spmi->addr.register_bit_width / 8;
1854         } else {
1855                 info->io.regspacing = DEFAULT_REGSPACING;
1856         }
1857         info->io.regsize = info->io.regspacing;
1858         info->io.regshift = spmi->addr.register_bit_offset;
1859
1860         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1861                 info->io_setup = mem_setup;
1862                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1863         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1864                 info->io_setup = port_setup;
1865                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1866         } else {
1867                 kfree(info);
1868                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1869                 return -EIO;
1870         }
1871         info->io.addr_data = spmi->addr.address;
1872
1873         try_smi_init(info);
1874
1875         return 0;
1876 }
1877
1878 static __devinit void acpi_find_bmc(void)
1879 {
1880         acpi_status      status;
1881         struct SPMITable *spmi;
1882         int              i;
1883
1884         if (acpi_disabled)
1885                 return;
1886
1887         if (acpi_failure)
1888                 return;
1889
1890         for (i = 0; ; i++) {
1891                 status = acpi_get_firmware_table("SPMI", i+1,
1892                                                  ACPI_LOGICAL_ADDRESSING,
1893                                                  (struct acpi_table_header **)
1894                                                  &spmi);
1895                 if (status != AE_OK)
1896                         return;
1897
1898                 try_init_acpi(spmi);
1899         }
1900 }
1901 #endif
1902
1903 #ifdef CONFIG_DMI
1904 struct dmi_ipmi_data
1905 {
1906         u8              type;
1907         u8              addr_space;
1908         unsigned long   base_addr;
1909         u8              irq;
1910         u8              offset;
1911         u8              slave_addr;
1912 };
1913
1914 static int __devinit decode_dmi(struct dmi_header *dm,
1915                                 struct dmi_ipmi_data *dmi)
1916 {
1917         u8              *data = (u8 *)dm;
1918         unsigned long   base_addr;
1919         u8              reg_spacing;
1920         u8              len = dm->length;
1921
1922         dmi->type = data[4];
1923
1924         memcpy(&base_addr, data+8, sizeof(unsigned long));
1925         if (len >= 0x11) {
1926                 if (base_addr & 1) {
1927                         /* I/O */
1928                         base_addr &= 0xFFFE;
1929                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1930                 }
1931                 else {
1932                         /* Memory */
1933                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1934                 }
1935                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1936                    is odd. */
1937                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1938
1939                 dmi->irq = data[0x11];
1940
1941                 /* The top two bits of byte 0x10 hold the register spacing. */
1942                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1943                 switch(reg_spacing){
1944                 case 0x00: /* Byte boundaries */
1945                     dmi->offset = 1;
1946                     break;
1947                 case 0x01: /* 32-bit boundaries */
1948                     dmi->offset = 4;
1949                     break;
1950                 case 0x02: /* 16-byte boundaries */
1951                     dmi->offset = 16;
1952                     break;
1953                 default:
1954                     /* Some other interface, just ignore it. */
1955                     return -EIO;
1956                 }
1957         } else {
1958                 /* Old DMI spec. */
1959                 /* Note that technically, the lower bit of the base
1960                  * address should be 1 if the address is I/O and 0 if
1961                  * the address is in memory.  So many systems get that
1962                  * wrong (and all that I have seen are I/O) so we just
1963                  * ignore that bit and assume I/O.  Systems that use
1964                  * memory should use the newer spec, anyway. */
1965                 dmi->base_addr = base_addr & 0xfffe;
1966                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1967                 dmi->offset = 1;
1968         }
1969
1970         dmi->slave_addr = data[6];
1971
1972         return 0;
1973 }
1974
1975 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1976 {
1977         struct smi_info *info;
1978
1979         info = kzalloc(sizeof(*info), GFP_KERNEL);
1980         if (!info) {
1981                 printk(KERN_ERR
1982                        "ipmi_si: Could not allocate SI data\n");
1983                 return;
1984         }
1985
1986         info->addr_source = "SMBIOS";
1987
1988         switch (ipmi_data->type) {
1989         case 0x01: /* KCS */
1990                 info->si_type = SI_KCS;
1991                 break;
1992         case 0x02: /* SMIC */
1993                 info->si_type = SI_SMIC;
1994                 break;
1995         case 0x03: /* BT */
1996                 info->si_type = SI_BT;
1997                 break;
1998         default:
1999                 return;
2000         }
2001
2002         switch (ipmi_data->addr_space) {
2003         case IPMI_MEM_ADDR_SPACE:
2004                 info->io_setup = mem_setup;
2005                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2006                 break;
2007
2008         case IPMI_IO_ADDR_SPACE:
2009                 info->io_setup = port_setup;
2010                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2011                 break;
2012
2013         default:
2014                 kfree(info);
2015                 printk(KERN_WARNING
2016                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2017                        ipmi_data->addr_space);
2018                 return;
2019         }
2020         info->io.addr_data = ipmi_data->base_addr;
2021
2022         info->io.regspacing = ipmi_data->offset;
2023         if (!info->io.regspacing)
2024                 info->io.regspacing = DEFAULT_REGSPACING;
2025         info->io.regsize = DEFAULT_REGSPACING;
2026         info->io.regshift = 0;
2027
2028         info->slave_addr = ipmi_data->slave_addr;
2029
2030         info->irq = ipmi_data->irq;
2031         if (info->irq)
2032                 info->irq_setup = std_irq_setup;
2033
2034         try_smi_init(info);
2035 }
2036
2037 static void __devinit dmi_find_bmc(void)
2038 {
2039         struct dmi_device    *dev = NULL;
2040         struct dmi_ipmi_data data;
2041         int                  rv;
2042
2043         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2044                 memset(&data, 0, sizeof(data));
2045                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2046                 if (!rv)
2047                         try_init_dmi(&data);
2048         }
2049 }
2050 #endif /* CONFIG_DMI */
2051
2052 #ifdef CONFIG_PCI
2053
2054 #define PCI_ERMC_CLASSCODE              0x0C0700
2055 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2056 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2057 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2058 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2059 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2060
2061 #define PCI_HP_VENDOR_ID    0x103C
2062 #define PCI_MMC_DEVICE_ID   0x121A
2063 #define PCI_MMC_ADDR_CW     0x10
2064
2065 static void ipmi_pci_cleanup(struct smi_info *info)
2066 {
2067         struct pci_dev *pdev = info->addr_source_data;
2068
2069         pci_disable_device(pdev);
2070 }
2071
2072 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2073                                     const struct pci_device_id *ent)
2074 {
2075         int rv;
2076         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2077         struct smi_info *info;
2078         int first_reg_offset = 0;
2079
2080         info = kzalloc(sizeof(*info), GFP_KERNEL);
2081         if (!info)
2082                 return -ENOMEM;
2083
2084         info->addr_source = "PCI";
2085
2086         switch (class_type) {
2087         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2088                 info->si_type = SI_SMIC;
2089                 break;
2090
2091         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2092                 info->si_type = SI_KCS;
2093                 break;
2094
2095         case PCI_ERMC_CLASSCODE_TYPE_BT:
2096                 info->si_type = SI_BT;
2097                 break;
2098
2099         default:
2100                 kfree(info);
2101                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2102                        pci_name(pdev), class_type);
2103                 return -ENOMEM;
2104         }
2105
2106         rv = pci_enable_device(pdev);
2107         if (rv) {
2108                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2109                        pci_name(pdev));
2110                 kfree(info);
2111                 return rv;
2112         }
2113
2114         info->addr_source_cleanup = ipmi_pci_cleanup;
2115         info->addr_source_data = pdev;
2116
2117         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2118                 first_reg_offset = 1;
2119
2120         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2121                 info->io_setup = port_setup;
2122                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2123         } else {
2124                 info->io_setup = mem_setup;
2125                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2126         }
2127         info->io.addr_data = pci_resource_start(pdev, 0);
2128
2129         info->io.regspacing = DEFAULT_REGSPACING;
2130         info->io.regsize = DEFAULT_REGSPACING;
2131         info->io.regshift = 0;
2132
2133         info->irq = pdev->irq;
2134         if (info->irq)
2135                 info->irq_setup = std_irq_setup;
2136
2137         info->dev = &pdev->dev;
2138
2139         return try_smi_init(info);
2140 }
2141
2142 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2143 {
2144 }
2145
2146 #ifdef CONFIG_PM
2147 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2148 {
2149         return 0;
2150 }
2151
2152 static int ipmi_pci_resume(struct pci_dev *pdev)
2153 {
2154         return 0;
2155 }
2156 #endif
2157
2158 static struct pci_device_id ipmi_pci_devices[] = {
2159         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2160         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2161 };
2162 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2163
2164 static struct pci_driver ipmi_pci_driver = {
2165         .name =         DEVICE_NAME,
2166         .id_table =     ipmi_pci_devices,
2167         .probe =        ipmi_pci_probe,
2168         .remove =       __devexit_p(ipmi_pci_remove),
2169 #ifdef CONFIG_PM
2170         .suspend =      ipmi_pci_suspend,
2171         .resume =       ipmi_pci_resume,
2172 #endif
2173 };
2174 #endif /* CONFIG_PCI */
2175
2176
2177 static int try_get_dev_id(struct smi_info *smi_info)
2178 {
2179         unsigned char         msg[2];
2180         unsigned char         *resp;
2181         unsigned long         resp_len;
2182         enum si_sm_result     smi_result;
2183         int                   rv = 0;
2184
2185         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2186         if (!resp)
2187                 return -ENOMEM;
2188
2189         /* Do a Get Device ID command, since it comes back with some
2190            useful info. */
2191         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2192         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2193         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2194
2195         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2196         for (;;)
2197         {
2198                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2199                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2200                         schedule_timeout_uninterruptible(1);
2201                         smi_result = smi_info->handlers->event(
2202                                 smi_info->si_sm, 100);
2203                 }
2204                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2205                 {
2206                         smi_result = smi_info->handlers->event(
2207                                 smi_info->si_sm, 0);
2208                 }
2209                 else
2210                         break;
2211         }
2212         if (smi_result == SI_SM_HOSED) {
2213                 /* We couldn't get the state machine to run, so whatever's at
2214                    the port is probably not an IPMI SMI interface. */
2215                 rv = -ENODEV;
2216                 goto out;
2217         }
2218
2219         /* Otherwise, we got some data. */
2220         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2221                                                   resp, IPMI_MAX_MSG_LENGTH);
2222         if (resp_len < 14) {
2223                 /* That's odd, it should be longer. */
2224                 rv = -EINVAL;
2225                 goto out;
2226         }
2227
2228         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2229                 /* That's odd, it shouldn't be able to fail. */
2230                 rv = -EINVAL;
2231                 goto out;
2232         }
2233
2234         /* Record info from the get device id, in case we need it. */
2235         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2236
2237  out:
2238         kfree(resp);
2239         return rv;
2240 }
2241
2242 static int type_file_read_proc(char *page, char **start, off_t off,
2243                                int count, int *eof, void *data)
2244 {
2245         struct smi_info *smi = data;
2246
2247         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2248 }
2249
2250 static int stat_file_read_proc(char *page, char **start, off_t off,
2251                                int count, int *eof, void *data)
2252 {
2253         char            *out = (char *) page;
2254         struct smi_info *smi = data;
2255
2256         out += sprintf(out, "interrupts_enabled:    %d\n",
2257                        smi->irq && !smi->interrupt_disabled);
2258         out += sprintf(out, "short_timeouts:        %ld\n",
2259                        smi->short_timeouts);
2260         out += sprintf(out, "long_timeouts:         %ld\n",
2261                        smi->long_timeouts);
2262         out += sprintf(out, "timeout_restarts:      %ld\n",
2263                        smi->timeout_restarts);
2264         out += sprintf(out, "idles:                 %ld\n",
2265                        smi->idles);
2266         out += sprintf(out, "interrupts:            %ld\n",
2267                        smi->interrupts);
2268         out += sprintf(out, "attentions:            %ld\n",
2269                        smi->attentions);
2270         out += sprintf(out, "flag_fetches:          %ld\n",
2271                        smi->flag_fetches);
2272         out += sprintf(out, "hosed_count:           %ld\n",
2273                        smi->hosed_count);
2274         out += sprintf(out, "complete_transactions: %ld\n",
2275                        smi->complete_transactions);
2276         out += sprintf(out, "events:                %ld\n",
2277                        smi->events);
2278         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2279                        smi->watchdog_pretimeouts);
2280         out += sprintf(out, "incoming_messages:     %ld\n",
2281                        smi->incoming_messages);
2282
2283         return out - page;
2284 }
2285
2286 static int param_read_proc(char *page, char **start, off_t off,
2287                            int count, int *eof, void *data)
2288 {
2289         struct smi_info *smi = data;
2290
2291         return sprintf(page,
2292                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2293                        si_to_str[smi->si_type],
2294                        addr_space_to_str[smi->io.addr_type],
2295                        smi->io.addr_data,
2296                        smi->io.regspacing,
2297                        smi->io.regsize,
2298                        smi->io.regshift,
2299                        smi->irq,
2300                        smi->slave_addr);
2301 }
2302
2303 /*
2304  * oem_data_avail_to_receive_msg_avail
2305  * @info - smi_info structure with msg_flags set
2306  *
2307  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2308  * Returns 1 indicating need to re-run handle_flags().
2309  */
2310 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2311 {
2312         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2313                                 RECEIVE_MSG_AVAIL);
2314         return 1;
2315 }
2316
2317 /*
2318  * setup_dell_poweredge_oem_data_handler
2319  * @info - smi_info.device_id must be populated
2320  *
2321  * Systems that match, but have firmware version < 1.40 may assert
2322  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2323  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2324  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2325  * as RECEIVE_MSG_AVAIL instead.
2326  *
2327  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2328  * assert the OEM[012] bits, and if it did, the driver would have to
2329  * change to handle that properly, we don't actually check for the
2330  * firmware version.
2331  * Device ID = 0x20                BMC on PowerEdge 8G servers
2332  * Device Revision = 0x80
2333  * Firmware Revision1 = 0x01       BMC version 1.40
2334  * Firmware Revision2 = 0x40       BCD encoded
2335  * IPMI Version = 0x51             IPMI 1.5
2336  * Manufacturer ID = A2 02 00      Dell IANA
2337  *
2338  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2339  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2340  *
2341  */
2342 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2343 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2344 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2345 #define DELL_IANA_MFR_ID 0x0002a2
2346 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2347 {
2348         struct ipmi_device_id *id = &smi_info->device_id;
2349         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2350                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2351                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2352                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2353                         smi_info->oem_data_avail_handler =
2354                                 oem_data_avail_to_receive_msg_avail;
2355                 }
2356                 else if (ipmi_version_major(id) < 1 ||
2357                          (ipmi_version_major(id) == 1 &&
2358                           ipmi_version_minor(id) < 5)) {
2359                         smi_info->oem_data_avail_handler =
2360                                 oem_data_avail_to_receive_msg_avail;
2361                 }
2362         }
2363 }
2364
2365 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2366 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2367 {
2368         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2369
2370         /* Make it a reponse */
2371         msg->rsp[0] = msg->data[0] | 4;
2372         msg->rsp[1] = msg->data[1];
2373         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2374         msg->rsp_size = 3;
2375         smi_info->curr_msg = NULL;
2376         deliver_recv_msg(smi_info, msg);
2377 }
2378
2379 /*
2380  * dell_poweredge_bt_xaction_handler
2381  * @info - smi_info.device_id must be populated
2382  *
2383  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2384  * not respond to a Get SDR command if the length of the data
2385  * requested is exactly 0x3A, which leads to command timeouts and no
2386  * data returned.  This intercepts such commands, and causes userspace
2387  * callers to try again with a different-sized buffer, which succeeds.
2388  */
2389
2390 #define STORAGE_NETFN 0x0A
2391 #define STORAGE_CMD_GET_SDR 0x23
2392 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2393                                              unsigned long unused,
2394                                              void *in)
2395 {
2396         struct smi_info *smi_info = in;
2397         unsigned char *data = smi_info->curr_msg->data;
2398         unsigned int size   = smi_info->curr_msg->data_size;
2399         if (size >= 8 &&
2400             (data[0]>>2) == STORAGE_NETFN &&
2401             data[1] == STORAGE_CMD_GET_SDR &&
2402             data[7] == 0x3A) {
2403                 return_hosed_msg_badsize(smi_info);
2404                 return NOTIFY_STOP;
2405         }
2406         return NOTIFY_DONE;
2407 }
2408
2409 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2410         .notifier_call  = dell_poweredge_bt_xaction_handler,
2411 };
2412
2413 /*
2414  * setup_dell_poweredge_bt_xaction_handler
2415  * @info - smi_info.device_id must be filled in already
2416  *
2417  * Fills in smi_info.device_id.start_transaction_pre_hook
2418  * when we know what function to use there.
2419  */
2420 static void
2421 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2422 {
2423         struct ipmi_device_id *id = &smi_info->device_id;
2424         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2425             smi_info->si_type == SI_BT)
2426                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2427 }
2428
2429 /*
2430  * setup_oem_data_handler
2431  * @info - smi_info.device_id must be filled in already
2432  *
2433  * Fills in smi_info.device_id.oem_data_available_handler
2434  * when we know what function to use there.
2435  */
2436
2437 static void setup_oem_data_handler(struct smi_info *smi_info)
2438 {
2439         setup_dell_poweredge_oem_data_handler(smi_info);
2440 }
2441
2442 static void setup_xaction_handlers(struct smi_info *smi_info)
2443 {
2444         setup_dell_poweredge_bt_xaction_handler(smi_info);
2445 }
2446
2447 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2448 {
2449         if (smi_info->intf) {
2450                 /* The timer and thread are only running if the
2451                    interface has been started up and registered. */
2452                 if (smi_info->thread != NULL)
2453                         kthread_stop(smi_info->thread);
2454                 del_timer_sync(&smi_info->si_timer);
2455         }
2456 }
2457
2458 static __devinitdata struct ipmi_default_vals
2459 {
2460         int type;
2461         int port;
2462 } ipmi_defaults[] =
2463 {
2464         { .type = SI_KCS, .port = 0xca2 },
2465         { .type = SI_SMIC, .port = 0xca9 },
2466         { .type = SI_BT, .port = 0xe4 },
2467         { .port = 0 }
2468 };
2469
2470 static __devinit void default_find_bmc(void)
2471 {
2472         struct smi_info *info;
2473         int             i;
2474
2475         for (i = 0; ; i++) {
2476                 if (!ipmi_defaults[i].port)
2477                         break;
2478
2479                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2480                 if (!info)
2481                         return;
2482
2483                 info->addr_source = NULL;
2484
2485                 info->si_type = ipmi_defaults[i].type;
2486                 info->io_setup = port_setup;
2487                 info->io.addr_data = ipmi_defaults[i].port;
2488                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2489
2490                 info->io.addr = NULL;
2491                 info->io.regspacing = DEFAULT_REGSPACING;
2492                 info->io.regsize = DEFAULT_REGSPACING;
2493                 info->io.regshift = 0;
2494
2495                 if (try_smi_init(info) == 0) {
2496                         /* Found one... */
2497                         printk(KERN_INFO "ipmi_si: Found default %s state"
2498                                " machine at %s address 0x%lx\n",
2499                                si_to_str[info->si_type],
2500                                addr_space_to_str[info->io.addr_type],
2501                                info->io.addr_data);
2502                         return;
2503                 }
2504         }
2505 }
2506
2507 static int is_new_interface(struct smi_info *info)
2508 {
2509         struct smi_info *e;
2510
2511         list_for_each_entry(e, &smi_infos, link) {
2512                 if (e->io.addr_type != info->io.addr_type)
2513                         continue;
2514                 if (e->io.addr_data == info->io.addr_data)
2515                         return 0;
2516         }
2517
2518         return 1;
2519 }
2520
2521 static int try_smi_init(struct smi_info *new_smi)
2522 {
2523         int rv;
2524
2525         if (new_smi->addr_source) {
2526                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2527                        " machine at %s address 0x%lx, slave address 0x%x,"
2528                        " irq %d\n",
2529                        new_smi->addr_source,
2530                        si_to_str[new_smi->si_type],
2531                        addr_space_to_str[new_smi->io.addr_type],
2532                        new_smi->io.addr_data,
2533                        new_smi->slave_addr, new_smi->irq);
2534         }
2535
2536         mutex_lock(&smi_infos_lock);
2537         if (!is_new_interface(new_smi)) {
2538                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2539                 rv = -EBUSY;
2540                 goto out_err;
2541         }
2542
2543         /* So we know not to free it unless we have allocated one. */
2544         new_smi->intf = NULL;
2545         new_smi->si_sm = NULL;
2546         new_smi->handlers = NULL;
2547
2548         switch (new_smi->si_type) {
2549         case SI_KCS:
2550                 new_smi->handlers = &kcs_smi_handlers;
2551                 break;
2552
2553         case SI_SMIC:
2554                 new_smi->handlers = &smic_smi_handlers;
2555                 break;
2556
2557         case SI_BT:
2558                 new_smi->handlers = &bt_smi_handlers;
2559                 break;
2560
2561         default:
2562                 /* No support for anything else yet. */
2563                 rv = -EIO;
2564                 goto out_err;
2565         }
2566
2567         /* Allocate the state machine's data and initialize it. */
2568         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2569         if (!new_smi->si_sm) {
2570                 printk(" Could not allocate state machine memory\n");
2571                 rv = -ENOMEM;
2572                 goto out_err;
2573         }
2574         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2575                                                         &new_smi->io);
2576
2577         /* Now that we know the I/O size, we can set up the I/O. */
2578         rv = new_smi->io_setup(new_smi);
2579         if (rv) {
2580                 printk(" Could not set up I/O space\n");
2581                 goto out_err;
2582         }
2583
2584         spin_lock_init(&(new_smi->si_lock));
2585         spin_lock_init(&(new_smi->msg_lock));
2586         spin_lock_init(&(new_smi->count_lock));
2587
2588         /* Do low-level detection first. */
2589         if (new_smi->handlers->detect(new_smi->si_sm)) {
2590                 if (new_smi->addr_source)
2591                         printk(KERN_INFO "ipmi_si: Interface detection"
2592                                " failed\n");
2593                 rv = -ENODEV;
2594                 goto out_err;
2595         }
2596
2597         /* Attempt a get device id command.  If it fails, we probably
2598            don't have a BMC here. */
2599         rv = try_get_dev_id(new_smi);
2600         if (rv) {
2601                 if (new_smi->addr_source)
2602                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2603                                " at this location\n");
2604                 goto out_err;
2605         }
2606
2607         setup_oem_data_handler(new_smi);
2608         setup_xaction_handlers(new_smi);
2609
2610         /* Try to claim any interrupts. */
2611         if (new_smi->irq_setup)
2612                 new_smi->irq_setup(new_smi);
2613
2614         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2615         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2616         new_smi->curr_msg = NULL;
2617         atomic_set(&new_smi->req_events, 0);
2618         new_smi->run_to_completion = 0;
2619
2620         new_smi->interrupt_disabled = 0;
2621         atomic_set(&new_smi->stop_operation, 0);
2622         new_smi->intf_num = smi_num;
2623         smi_num++;
2624
2625         /* Start clearing the flags before we enable interrupts or the
2626            timer to avoid racing with the timer. */
2627         start_clear_flags(new_smi);
2628         /* IRQ is defined to be set when non-zero. */
2629         if (new_smi->irq)
2630                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2631
2632         if (!new_smi->dev) {
2633                 /* If we don't already have a device from something
2634                  * else (like PCI), then register a new one. */
2635                 new_smi->pdev = platform_device_alloc("ipmi_si",
2636                                                       new_smi->intf_num);
2637                 if (rv) {
2638                         printk(KERN_ERR
2639                                "ipmi_si_intf:"
2640                                " Unable to allocate platform device\n");
2641                         goto out_err;
2642                 }
2643                 new_smi->dev = &new_smi->pdev->dev;
2644                 new_smi->dev->driver = &ipmi_driver;
2645
2646                 rv = platform_device_add(new_smi->pdev);
2647                 if (rv) {
2648                         printk(KERN_ERR
2649                                "ipmi_si_intf:"
2650                                " Unable to register system interface device:"
2651                                " %d\n",
2652                                rv);
2653                         goto out_err;
2654                 }
2655                 new_smi->dev_registered = 1;
2656         }
2657
2658         rv = ipmi_register_smi(&handlers,
2659                                new_smi,
2660                                &new_smi->device_id,
2661                                new_smi->dev,
2662                                "bmc",
2663                                new_smi->slave_addr);
2664         if (rv) {
2665                 printk(KERN_ERR
2666                        "ipmi_si: Unable to register device: error %d\n",
2667                        rv);
2668                 goto out_err_stop_timer;
2669         }
2670
2671         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2672                                      type_file_read_proc, NULL,
2673                                      new_smi, THIS_MODULE);
2674         if (rv) {
2675                 printk(KERN_ERR
2676                        "ipmi_si: Unable to create proc entry: %d\n",
2677                        rv);
2678                 goto out_err_stop_timer;
2679         }
2680
2681         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2682                                      stat_file_read_proc, NULL,
2683                                      new_smi, THIS_MODULE);
2684         if (rv) {
2685                 printk(KERN_ERR
2686                        "ipmi_si: Unable to create proc entry: %d\n",
2687                        rv);
2688                 goto out_err_stop_timer;
2689         }
2690
2691         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2692                                      param_read_proc, NULL,
2693                                      new_smi, THIS_MODULE);
2694         if (rv) {
2695                 printk(KERN_ERR
2696                        "ipmi_si: Unable to create proc entry: %d\n",
2697                        rv);
2698                 goto out_err_stop_timer;
2699         }
2700
2701         list_add_tail(&new_smi->link, &smi_infos);
2702
2703         mutex_unlock(&smi_infos_lock);
2704
2705         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2706
2707         return 0;
2708
2709  out_err_stop_timer:
2710         atomic_inc(&new_smi->stop_operation);
2711         wait_for_timer_and_thread(new_smi);
2712
2713  out_err:
2714         if (new_smi->intf)
2715                 ipmi_unregister_smi(new_smi->intf);
2716
2717         if (new_smi->irq_cleanup)
2718                 new_smi->irq_cleanup(new_smi);
2719
2720         /* Wait until we know that we are out of any interrupt
2721            handlers might have been running before we freed the
2722            interrupt. */
2723         synchronize_sched();
2724
2725         if (new_smi->si_sm) {
2726                 if (new_smi->handlers)
2727                         new_smi->handlers->cleanup(new_smi->si_sm);
2728                 kfree(new_smi->si_sm);
2729         }
2730         if (new_smi->addr_source_cleanup)
2731                 new_smi->addr_source_cleanup(new_smi);
2732         if (new_smi->io_cleanup)
2733                 new_smi->io_cleanup(new_smi);
2734
2735         if (new_smi->dev_registered)
2736                 platform_device_unregister(new_smi->pdev);
2737
2738         kfree(new_smi);
2739
2740         mutex_unlock(&smi_infos_lock);
2741
2742         return rv;
2743 }
2744
2745 static __devinit int init_ipmi_si(void)
2746 {
2747         int  i;
2748         char *str;
2749         int  rv;
2750
2751         if (initialized)
2752                 return 0;
2753         initialized = 1;
2754
2755         /* Register the device drivers. */
2756         rv = driver_register(&ipmi_driver);
2757         if (rv) {
2758                 printk(KERN_ERR
2759                        "init_ipmi_si: Unable to register driver: %d\n",
2760                        rv);
2761                 return rv;
2762         }
2763
2764
2765         /* Parse out the si_type string into its components. */
2766         str = si_type_str;
2767         if (*str != '\0') {
2768                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2769                         si_type[i] = str;
2770                         str = strchr(str, ',');
2771                         if (str) {
2772                                 *str = '\0';
2773                                 str++;
2774                         } else {
2775                                 break;
2776                         }
2777                 }
2778         }
2779
2780         printk(KERN_INFO "IPMI System Interface driver.\n");
2781
2782         hardcode_find_bmc();
2783
2784 #ifdef CONFIG_DMI
2785         dmi_find_bmc();
2786 #endif
2787
2788 #ifdef CONFIG_ACPI
2789         acpi_find_bmc();
2790 #endif
2791
2792 #ifdef CONFIG_PCI
2793         rv = pci_register_driver(&ipmi_pci_driver);
2794         if (rv){
2795                 printk(KERN_ERR
2796                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2797                        rv);
2798         }
2799 #endif
2800
2801         if (si_trydefaults) {
2802                 mutex_lock(&smi_infos_lock);
2803                 if (list_empty(&smi_infos)) {
2804                         /* No BMC was found, try defaults. */
2805                         mutex_unlock(&smi_infos_lock);
2806                         default_find_bmc();
2807                 } else {
2808                         mutex_unlock(&smi_infos_lock);
2809                 }
2810         }
2811
2812         mutex_lock(&smi_infos_lock);
2813         if (unload_when_empty && list_empty(&smi_infos)) {
2814                 mutex_unlock(&smi_infos_lock);
2815 #ifdef CONFIG_PCI
2816                 pci_unregister_driver(&ipmi_pci_driver);
2817 #endif
2818                 driver_unregister(&ipmi_driver);
2819                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2820                 return -ENODEV;
2821         } else {
2822                 mutex_unlock(&smi_infos_lock);
2823                 return 0;
2824         }
2825 }
2826 module_init(init_ipmi_si);
2827
2828 static void cleanup_one_si(struct smi_info *to_clean)
2829 {
2830         int           rv;
2831         unsigned long flags;
2832
2833         if (!to_clean)
2834                 return;
2835
2836         list_del(&to_clean->link);
2837
2838         /* Tell the timer and interrupt handlers that we are shutting
2839            down. */
2840         spin_lock_irqsave(&(to_clean->si_lock), flags);
2841         spin_lock(&(to_clean->msg_lock));
2842
2843         atomic_inc(&to_clean->stop_operation);
2844
2845         if (to_clean->irq_cleanup)
2846                 to_clean->irq_cleanup(to_clean);
2847
2848         spin_unlock(&(to_clean->msg_lock));
2849         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2850
2851         /* Wait until we know that we are out of any interrupt
2852            handlers might have been running before we freed the
2853            interrupt. */
2854         synchronize_sched();
2855
2856         wait_for_timer_and_thread(to_clean);
2857
2858         /* Interrupts and timeouts are stopped, now make sure the
2859            interface is in a clean state. */
2860         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2861                 poll(to_clean);
2862                 schedule_timeout_uninterruptible(1);
2863         }
2864
2865         rv = ipmi_unregister_smi(to_clean->intf);
2866         if (rv) {
2867                 printk(KERN_ERR
2868                        "ipmi_si: Unable to unregister device: errno=%d\n",
2869                        rv);
2870         }
2871
2872         to_clean->handlers->cleanup(to_clean->si_sm);
2873
2874         kfree(to_clean->si_sm);
2875
2876         if (to_clean->addr_source_cleanup)
2877                 to_clean->addr_source_cleanup(to_clean);
2878         if (to_clean->io_cleanup)
2879                 to_clean->io_cleanup(to_clean);
2880
2881         if (to_clean->dev_registered)
2882                 platform_device_unregister(to_clean->pdev);
2883
2884         kfree(to_clean);
2885 }
2886
2887 static __exit void cleanup_ipmi_si(void)
2888 {
2889         struct smi_info *e, *tmp_e;
2890
2891         if (!initialized)
2892                 return;
2893
2894 #ifdef CONFIG_PCI
2895         pci_unregister_driver(&ipmi_pci_driver);
2896 #endif
2897
2898         mutex_lock(&smi_infos_lock);
2899         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2900                 cleanup_one_si(e);
2901         mutex_unlock(&smi_infos_lock);
2902
2903         driver_unregister(&ipmi_driver);
2904 }
2905 module_exit(cleanup_ipmi_si);
2906
2907 MODULE_LICENSE("GPL");
2908 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2909 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");