Merge branch 'linus'
[linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <asm/system.h>
44 #include <linux/sched.h>
45 #include <linux/timer.h>
46 #include <linux/errno.h>
47 #include <linux/spinlock.h>
48 #include <linux/slab.h>
49 #include <linux/delay.h>
50 #include <linux/list.h>
51 #include <linux/pci.h>
52 #include <linux/ioport.h>
53 #include <linux/notifier.h>
54 #include <linux/mutex.h>
55 #include <linux/kthread.h>
56 #include <asm/irq.h>
57 #include <linux/interrupt.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ipmi_smi.h>
60 #include <asm/io.h>
61 #include "ipmi_si_sm.h"
62 #include <linux/init.h>
63 #include <linux/dmi.h>
64 #include <linux/string.h>
65 #include <linux/ctype.h>
66
67 #define PFX "ipmi_si: "
68
69 /* Measure times between events in the driver. */
70 #undef DEBUG_TIMING
71
72 /* Call every 10 ms. */
73 #define SI_TIMEOUT_TIME_USEC    10000
74 #define SI_USEC_PER_JIFFY       (1000000/HZ)
75 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
76 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
77                                        short timeout */
78
79 enum si_intf_state {
80         SI_NORMAL,
81         SI_GETTING_FLAGS,
82         SI_GETTING_EVENTS,
83         SI_CLEARING_FLAGS,
84         SI_CLEARING_FLAGS_THEN_SET_IRQ,
85         SI_GETTING_MESSAGES,
86         SI_ENABLE_INTERRUPTS1,
87         SI_ENABLE_INTERRUPTS2
88         /* FIXME - add watchdog stuff. */
89 };
90
91 /* Some BT-specific defines we need here. */
92 #define IPMI_BT_INTMASK_REG             2
93 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
94 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
95
96 enum si_type {
97     SI_KCS, SI_SMIC, SI_BT
98 };
99 static char *si_to_str[] = { "kcs", "smic", "bt" };
100
101 #define DEVICE_NAME "ipmi_si"
102
103 static struct device_driver ipmi_driver =
104 {
105         .name = DEVICE_NAME,
106         .bus = &platform_bus_type
107 };
108
109 struct smi_info
110 {
111         int                    intf_num;
112         ipmi_smi_t             intf;
113         struct si_sm_data      *si_sm;
114         struct si_sm_handlers  *handlers;
115         enum si_type           si_type;
116         spinlock_t             si_lock;
117         spinlock_t             msg_lock;
118         struct list_head       xmit_msgs;
119         struct list_head       hp_xmit_msgs;
120         struct ipmi_smi_msg    *curr_msg;
121         enum si_intf_state     si_state;
122
123         /* Used to handle the various types of I/O that can occur with
124            IPMI */
125         struct si_sm_io io;
126         int (*io_setup)(struct smi_info *info);
127         void (*io_cleanup)(struct smi_info *info);
128         int (*irq_setup)(struct smi_info *info);
129         void (*irq_cleanup)(struct smi_info *info);
130         unsigned int io_size;
131         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
132         void (*addr_source_cleanup)(struct smi_info *info);
133         void *addr_source_data;
134
135         /* Per-OEM handler, called from handle_flags().
136            Returns 1 when handle_flags() needs to be re-run
137            or 0 indicating it set si_state itself.
138         */
139         int (*oem_data_avail_handler)(struct smi_info *smi_info);
140
141         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
142            is set to hold the flags until we are done handling everything
143            from the flags. */
144 #define RECEIVE_MSG_AVAIL       0x01
145 #define EVENT_MSG_BUFFER_FULL   0x02
146 #define WDT_PRE_TIMEOUT_INT     0x08
147 #define OEM0_DATA_AVAIL     0x20
148 #define OEM1_DATA_AVAIL     0x40
149 #define OEM2_DATA_AVAIL     0x80
150 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
151                              OEM1_DATA_AVAIL | \
152                              OEM2_DATA_AVAIL)
153         unsigned char       msg_flags;
154
155         /* If set to true, this will request events the next time the
156            state machine is idle. */
157         atomic_t            req_events;
158
159         /* If true, run the state machine to completion on every send
160            call.  Generally used after a panic to make sure stuff goes
161            out. */
162         int                 run_to_completion;
163
164         /* The I/O port of an SI interface. */
165         int                 port;
166
167         /* The space between start addresses of the two ports.  For
168            instance, if the first port is 0xca2 and the spacing is 4, then
169            the second port is 0xca6. */
170         unsigned int        spacing;
171
172         /* zero if no irq; */
173         int                 irq;
174
175         /* The timer for this si. */
176         struct timer_list   si_timer;
177
178         /* The time (in jiffies) the last timeout occurred at. */
179         unsigned long       last_timeout_jiffies;
180
181         /* Used to gracefully stop the timer without race conditions. */
182         atomic_t            stop_operation;
183
184         /* The driver will disable interrupts when it gets into a
185            situation where it cannot handle messages due to lack of
186            memory.  Once that situation clears up, it will re-enable
187            interrupts. */
188         int interrupt_disabled;
189
190         /* From the get device id response... */
191         struct ipmi_device_id device_id;
192
193         /* Driver model stuff. */
194         struct device *dev;
195         struct platform_device *pdev;
196
197          /* True if we allocated the device, false if it came from
198           * someplace else (like PCI). */
199         int dev_registered;
200
201         /* Slave address, could be reported from DMI. */
202         unsigned char slave_addr;
203
204         /* Counters and things for the proc filesystem. */
205         spinlock_t count_lock;
206         unsigned long short_timeouts;
207         unsigned long long_timeouts;
208         unsigned long timeout_restarts;
209         unsigned long idles;
210         unsigned long interrupts;
211         unsigned long attentions;
212         unsigned long flag_fetches;
213         unsigned long hosed_count;
214         unsigned long complete_transactions;
215         unsigned long events;
216         unsigned long watchdog_pretimeouts;
217         unsigned long incoming_messages;
218
219         struct task_struct *thread;
220
221         struct list_head link;
222 };
223
224 #define SI_MAX_PARMS 4
225
226 static int force_kipmid[SI_MAX_PARMS];
227 static int num_force_kipmid;
228
229 static int unload_when_empty = 1;
230
231 static int try_smi_init(struct smi_info *smi);
232 static void cleanup_one_si(struct smi_info *to_clean);
233
234 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
235 static int register_xaction_notifier(struct notifier_block * nb)
236 {
237         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
238 }
239
240 static void deliver_recv_msg(struct smi_info *smi_info,
241                              struct ipmi_smi_msg *msg)
242 {
243         /* Deliver the message to the upper layer with the lock
244            released. */
245         spin_unlock(&(smi_info->si_lock));
246         ipmi_smi_msg_received(smi_info->intf, msg);
247         spin_lock(&(smi_info->si_lock));
248 }
249
250 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
251 {
252         struct ipmi_smi_msg *msg = smi_info->curr_msg;
253
254         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
255                 cCode = IPMI_ERR_UNSPECIFIED;
256         /* else use it as is */
257
258         /* Make it a reponse */
259         msg->rsp[0] = msg->data[0] | 4;
260         msg->rsp[1] = msg->data[1];
261         msg->rsp[2] = cCode;
262         msg->rsp_size = 3;
263
264         smi_info->curr_msg = NULL;
265         deliver_recv_msg(smi_info, msg);
266 }
267
268 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
269 {
270         int              rv;
271         struct list_head *entry = NULL;
272 #ifdef DEBUG_TIMING
273         struct timeval t;
274 #endif
275
276         /* No need to save flags, we aleady have interrupts off and we
277            already hold the SMI lock. */
278         spin_lock(&(smi_info->msg_lock));
279
280         /* Pick the high priority queue first. */
281         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
282                 entry = smi_info->hp_xmit_msgs.next;
283         } else if (!list_empty(&(smi_info->xmit_msgs))) {
284                 entry = smi_info->xmit_msgs.next;
285         }
286
287         if (!entry) {
288                 smi_info->curr_msg = NULL;
289                 rv = SI_SM_IDLE;
290         } else {
291                 int err;
292
293                 list_del(entry);
294                 smi_info->curr_msg = list_entry(entry,
295                                                 struct ipmi_smi_msg,
296                                                 link);
297 #ifdef DEBUG_TIMING
298                 do_gettimeofday(&t);
299                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
300 #endif
301                 err = atomic_notifier_call_chain(&xaction_notifier_list,
302                                 0, smi_info);
303                 if (err & NOTIFY_STOP_MASK) {
304                         rv = SI_SM_CALL_WITHOUT_DELAY;
305                         goto out;
306                 }
307                 err = smi_info->handlers->start_transaction(
308                         smi_info->si_sm,
309                         smi_info->curr_msg->data,
310                         smi_info->curr_msg->data_size);
311                 if (err) {
312                         return_hosed_msg(smi_info, err);
313                 }
314
315                 rv = SI_SM_CALL_WITHOUT_DELAY;
316         }
317         out:
318         spin_unlock(&(smi_info->msg_lock));
319
320         return rv;
321 }
322
323 static void start_enable_irq(struct smi_info *smi_info)
324 {
325         unsigned char msg[2];
326
327         /* If we are enabling interrupts, we have to tell the
328            BMC to use them. */
329         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
330         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
331
332         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
333         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
334 }
335
336 static void start_clear_flags(struct smi_info *smi_info)
337 {
338         unsigned char msg[3];
339
340         /* Make sure the watchdog pre-timeout flag is not set at startup. */
341         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
342         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
343         msg[2] = WDT_PRE_TIMEOUT_INT;
344
345         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
346         smi_info->si_state = SI_CLEARING_FLAGS;
347 }
348
349 /* When we have a situtaion where we run out of memory and cannot
350    allocate messages, we just leave them in the BMC and run the system
351    polled until we can allocate some memory.  Once we have some
352    memory, we will re-enable the interrupt. */
353 static inline void disable_si_irq(struct smi_info *smi_info)
354 {
355         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
356                 disable_irq_nosync(smi_info->irq);
357                 smi_info->interrupt_disabled = 1;
358         }
359 }
360
361 static inline void enable_si_irq(struct smi_info *smi_info)
362 {
363         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
364                 enable_irq(smi_info->irq);
365                 smi_info->interrupt_disabled = 0;
366         }
367 }
368
369 static void handle_flags(struct smi_info *smi_info)
370 {
371  retry:
372         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
373                 /* Watchdog pre-timeout */
374                 spin_lock(&smi_info->count_lock);
375                 smi_info->watchdog_pretimeouts++;
376                 spin_unlock(&smi_info->count_lock);
377
378                 start_clear_flags(smi_info);
379                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
380                 spin_unlock(&(smi_info->si_lock));
381                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
382                 spin_lock(&(smi_info->si_lock));
383         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
384                 /* Messages available. */
385                 smi_info->curr_msg = ipmi_alloc_smi_msg();
386                 if (!smi_info->curr_msg) {
387                         disable_si_irq(smi_info);
388                         smi_info->si_state = SI_NORMAL;
389                         return;
390                 }
391                 enable_si_irq(smi_info);
392
393                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
394                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
395                 smi_info->curr_msg->data_size = 2;
396
397                 smi_info->handlers->start_transaction(
398                         smi_info->si_sm,
399                         smi_info->curr_msg->data,
400                         smi_info->curr_msg->data_size);
401                 smi_info->si_state = SI_GETTING_MESSAGES;
402         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
403                 /* Events available. */
404                 smi_info->curr_msg = ipmi_alloc_smi_msg();
405                 if (!smi_info->curr_msg) {
406                         disable_si_irq(smi_info);
407                         smi_info->si_state = SI_NORMAL;
408                         return;
409                 }
410                 enable_si_irq(smi_info);
411
412                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
413                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
414                 smi_info->curr_msg->data_size = 2;
415
416                 smi_info->handlers->start_transaction(
417                         smi_info->si_sm,
418                         smi_info->curr_msg->data,
419                         smi_info->curr_msg->data_size);
420                 smi_info->si_state = SI_GETTING_EVENTS;
421         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
422                    smi_info->oem_data_avail_handler) {
423                 if (smi_info->oem_data_avail_handler(smi_info))
424                         goto retry;
425         } else {
426                 smi_info->si_state = SI_NORMAL;
427         }
428 }
429
430 static void handle_transaction_done(struct smi_info *smi_info)
431 {
432         struct ipmi_smi_msg *msg;
433 #ifdef DEBUG_TIMING
434         struct timeval t;
435
436         do_gettimeofday(&t);
437         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
438 #endif
439         switch (smi_info->si_state) {
440         case SI_NORMAL:
441                 if (!smi_info->curr_msg)
442                         break;
443
444                 smi_info->curr_msg->rsp_size
445                         = smi_info->handlers->get_result(
446                                 smi_info->si_sm,
447                                 smi_info->curr_msg->rsp,
448                                 IPMI_MAX_MSG_LENGTH);
449
450                 /* Do this here becase deliver_recv_msg() releases the
451                    lock, and a new message can be put in during the
452                    time the lock is released. */
453                 msg = smi_info->curr_msg;
454                 smi_info->curr_msg = NULL;
455                 deliver_recv_msg(smi_info, msg);
456                 break;
457
458         case SI_GETTING_FLAGS:
459         {
460                 unsigned char msg[4];
461                 unsigned int  len;
462
463                 /* We got the flags from the SMI, now handle them. */
464                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
465                 if (msg[2] != 0) {
466                         /* Error fetching flags, just give up for
467                            now. */
468                         smi_info->si_state = SI_NORMAL;
469                 } else if (len < 4) {
470                         /* Hmm, no flags.  That's technically illegal, but
471                            don't use uninitialized data. */
472                         smi_info->si_state = SI_NORMAL;
473                 } else {
474                         smi_info->msg_flags = msg[3];
475                         handle_flags(smi_info);
476                 }
477                 break;
478         }
479
480         case SI_CLEARING_FLAGS:
481         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
482         {
483                 unsigned char msg[3];
484
485                 /* We cleared the flags. */
486                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
487                 if (msg[2] != 0) {
488                         /* Error clearing flags */
489                         printk(KERN_WARNING
490                                "ipmi_si: Error clearing flags: %2.2x\n",
491                                msg[2]);
492                 }
493                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
494                         start_enable_irq(smi_info);
495                 else
496                         smi_info->si_state = SI_NORMAL;
497                 break;
498         }
499
500         case SI_GETTING_EVENTS:
501         {
502                 smi_info->curr_msg->rsp_size
503                         = smi_info->handlers->get_result(
504                                 smi_info->si_sm,
505                                 smi_info->curr_msg->rsp,
506                                 IPMI_MAX_MSG_LENGTH);
507
508                 /* Do this here becase deliver_recv_msg() releases the
509                    lock, and a new message can be put in during the
510                    time the lock is released. */
511                 msg = smi_info->curr_msg;
512                 smi_info->curr_msg = NULL;
513                 if (msg->rsp[2] != 0) {
514                         /* Error getting event, probably done. */
515                         msg->done(msg);
516
517                         /* Take off the event flag. */
518                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
519                         handle_flags(smi_info);
520                 } else {
521                         spin_lock(&smi_info->count_lock);
522                         smi_info->events++;
523                         spin_unlock(&smi_info->count_lock);
524
525                         /* Do this before we deliver the message
526                            because delivering the message releases the
527                            lock and something else can mess with the
528                            state. */
529                         handle_flags(smi_info);
530
531                         deliver_recv_msg(smi_info, msg);
532                 }
533                 break;
534         }
535
536         case SI_GETTING_MESSAGES:
537         {
538                 smi_info->curr_msg->rsp_size
539                         = smi_info->handlers->get_result(
540                                 smi_info->si_sm,
541                                 smi_info->curr_msg->rsp,
542                                 IPMI_MAX_MSG_LENGTH);
543
544                 /* Do this here becase deliver_recv_msg() releases the
545                    lock, and a new message can be put in during the
546                    time the lock is released. */
547                 msg = smi_info->curr_msg;
548                 smi_info->curr_msg = NULL;
549                 if (msg->rsp[2] != 0) {
550                         /* Error getting event, probably done. */
551                         msg->done(msg);
552
553                         /* Take off the msg flag. */
554                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
555                         handle_flags(smi_info);
556                 } else {
557                         spin_lock(&smi_info->count_lock);
558                         smi_info->incoming_messages++;
559                         spin_unlock(&smi_info->count_lock);
560
561                         /* Do this before we deliver the message
562                            because delivering the message releases the
563                            lock and something else can mess with the
564                            state. */
565                         handle_flags(smi_info);
566
567                         deliver_recv_msg(smi_info, msg);
568                 }
569                 break;
570         }
571
572         case SI_ENABLE_INTERRUPTS1:
573         {
574                 unsigned char msg[4];
575
576                 /* We got the flags from the SMI, now handle them. */
577                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
578                 if (msg[2] != 0) {
579                         printk(KERN_WARNING
580                                "ipmi_si: Could not enable interrupts"
581                                ", failed get, using polled mode.\n");
582                         smi_info->si_state = SI_NORMAL;
583                 } else {
584                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
585                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
586                         msg[2] = msg[3] | 1; /* enable msg queue int */
587                         smi_info->handlers->start_transaction(
588                                 smi_info->si_sm, msg, 3);
589                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
590                 }
591                 break;
592         }
593
594         case SI_ENABLE_INTERRUPTS2:
595         {
596                 unsigned char msg[4];
597
598                 /* We got the flags from the SMI, now handle them. */
599                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
600                 if (msg[2] != 0) {
601                         printk(KERN_WARNING
602                                "ipmi_si: Could not enable interrupts"
603                                ", failed set, using polled mode.\n");
604                 }
605                 smi_info->si_state = SI_NORMAL;
606                 break;
607         }
608         }
609 }
610
611 /* Called on timeouts and events.  Timeouts should pass the elapsed
612    time, interrupts should pass in zero. */
613 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
614                                            int time)
615 {
616         enum si_sm_result si_sm_result;
617
618  restart:
619         /* There used to be a loop here that waited a little while
620            (around 25us) before giving up.  That turned out to be
621            pointless, the minimum delays I was seeing were in the 300us
622            range, which is far too long to wait in an interrupt.  So
623            we just run until the state machine tells us something
624            happened or it needs a delay. */
625         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
626         time = 0;
627         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
628         {
629                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
630         }
631
632         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
633         {
634                 spin_lock(&smi_info->count_lock);
635                 smi_info->complete_transactions++;
636                 spin_unlock(&smi_info->count_lock);
637
638                 handle_transaction_done(smi_info);
639                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
640         }
641         else if (si_sm_result == SI_SM_HOSED)
642         {
643                 spin_lock(&smi_info->count_lock);
644                 smi_info->hosed_count++;
645                 spin_unlock(&smi_info->count_lock);
646
647                 /* Do the before return_hosed_msg, because that
648                    releases the lock. */
649                 smi_info->si_state = SI_NORMAL;
650                 if (smi_info->curr_msg != NULL) {
651                         /* If we were handling a user message, format
652                            a response to send to the upper layer to
653                            tell it about the error. */
654                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
655                 }
656                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
657         }
658
659         /* We prefer handling attn over new messages. */
660         if (si_sm_result == SI_SM_ATTN)
661         {
662                 unsigned char msg[2];
663
664                 spin_lock(&smi_info->count_lock);
665                 smi_info->attentions++;
666                 spin_unlock(&smi_info->count_lock);
667
668                 /* Got a attn, send down a get message flags to see
669                    what's causing it.  It would be better to handle
670                    this in the upper layer, but due to the way
671                    interrupts work with the SMI, that's not really
672                    possible. */
673                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
674                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
675
676                 smi_info->handlers->start_transaction(
677                         smi_info->si_sm, msg, 2);
678                 smi_info->si_state = SI_GETTING_FLAGS;
679                 goto restart;
680         }
681
682         /* If we are currently idle, try to start the next message. */
683         if (si_sm_result == SI_SM_IDLE) {
684                 spin_lock(&smi_info->count_lock);
685                 smi_info->idles++;
686                 spin_unlock(&smi_info->count_lock);
687
688                 si_sm_result = start_next_msg(smi_info);
689                 if (si_sm_result != SI_SM_IDLE)
690                         goto restart;
691         }
692
693         if ((si_sm_result == SI_SM_IDLE)
694             && (atomic_read(&smi_info->req_events)))
695         {
696                 /* We are idle and the upper layer requested that I fetch
697                    events, so do so. */
698                 atomic_set(&smi_info->req_events, 0);
699
700                 smi_info->curr_msg = ipmi_alloc_smi_msg();
701                 if (!smi_info->curr_msg)
702                         goto out;
703
704                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
705                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
706                 smi_info->curr_msg->data_size = 2;
707
708                 smi_info->handlers->start_transaction(
709                         smi_info->si_sm,
710                         smi_info->curr_msg->data,
711                         smi_info->curr_msg->data_size);
712                 smi_info->si_state = SI_GETTING_EVENTS;
713                 goto restart;
714         }
715  out:
716         return si_sm_result;
717 }
718
719 static void sender(void                *send_info,
720                    struct ipmi_smi_msg *msg,
721                    int                 priority)
722 {
723         struct smi_info   *smi_info = send_info;
724         enum si_sm_result result;
725         unsigned long     flags;
726 #ifdef DEBUG_TIMING
727         struct timeval    t;
728 #endif
729
730         if (atomic_read(&smi_info->stop_operation)) {
731                 msg->rsp[0] = msg->data[0] | 4;
732                 msg->rsp[1] = msg->data[1];
733                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
734                 msg->rsp_size = 3;
735                 deliver_recv_msg(smi_info, msg);
736                 return;
737         }
738
739         spin_lock_irqsave(&(smi_info->msg_lock), flags);
740 #ifdef DEBUG_TIMING
741         do_gettimeofday(&t);
742         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
743 #endif
744
745         if (smi_info->run_to_completion) {
746                 /* If we are running to completion, then throw it in
747                    the list and run transactions until everything is
748                    clear.  Priority doesn't matter here. */
749                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
750
751                 /* We have to release the msg lock and claim the smi
752                    lock in this case, because of race conditions. */
753                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
754
755                 spin_lock_irqsave(&(smi_info->si_lock), flags);
756                 result = smi_event_handler(smi_info, 0);
757                 while (result != SI_SM_IDLE) {
758                         udelay(SI_SHORT_TIMEOUT_USEC);
759                         result = smi_event_handler(smi_info,
760                                                    SI_SHORT_TIMEOUT_USEC);
761                 }
762                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
763                 return;
764         } else {
765                 if (priority > 0) {
766                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
767                 } else {
768                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
769                 }
770         }
771         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
772
773         spin_lock_irqsave(&(smi_info->si_lock), flags);
774         if ((smi_info->si_state == SI_NORMAL)
775             && (smi_info->curr_msg == NULL))
776         {
777                 start_next_msg(smi_info);
778         }
779         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
780 }
781
782 static void set_run_to_completion(void *send_info, int i_run_to_completion)
783 {
784         struct smi_info   *smi_info = send_info;
785         enum si_sm_result result;
786         unsigned long     flags;
787
788         spin_lock_irqsave(&(smi_info->si_lock), flags);
789
790         smi_info->run_to_completion = i_run_to_completion;
791         if (i_run_to_completion) {
792                 result = smi_event_handler(smi_info, 0);
793                 while (result != SI_SM_IDLE) {
794                         udelay(SI_SHORT_TIMEOUT_USEC);
795                         result = smi_event_handler(smi_info,
796                                                    SI_SHORT_TIMEOUT_USEC);
797                 }
798         }
799
800         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
801 }
802
803 static int ipmi_thread(void *data)
804 {
805         struct smi_info *smi_info = data;
806         unsigned long flags;
807         enum si_sm_result smi_result;
808
809         set_user_nice(current, 19);
810         while (!kthread_should_stop()) {
811                 spin_lock_irqsave(&(smi_info->si_lock), flags);
812                 smi_result = smi_event_handler(smi_info, 0);
813                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
814                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
815                         /* do nothing */
816                 }
817                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
818                         schedule();
819                 else
820                         schedule_timeout_interruptible(1);
821         }
822         return 0;
823 }
824
825
826 static void poll(void *send_info)
827 {
828         struct smi_info *smi_info = send_info;
829
830         /*
831          * Make sure there is some delay in the poll loop so we can
832          * drive time forward and timeout things.
833          */
834         udelay(10);
835         smi_event_handler(smi_info, 10);
836 }
837
838 static void request_events(void *send_info)
839 {
840         struct smi_info *smi_info = send_info;
841
842         if (atomic_read(&smi_info->stop_operation))
843                 return;
844
845         atomic_set(&smi_info->req_events, 1);
846 }
847
848 static int initialized;
849
850 static void smi_timeout(unsigned long data)
851 {
852         struct smi_info   *smi_info = (struct smi_info *) data;
853         enum si_sm_result smi_result;
854         unsigned long     flags;
855         unsigned long     jiffies_now;
856         long              time_diff;
857 #ifdef DEBUG_TIMING
858         struct timeval    t;
859 #endif
860
861         if (atomic_read(&smi_info->stop_operation))
862                 return;
863
864         spin_lock_irqsave(&(smi_info->si_lock), flags);
865 #ifdef DEBUG_TIMING
866         do_gettimeofday(&t);
867         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
868 #endif
869         jiffies_now = jiffies;
870         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
871                      * SI_USEC_PER_JIFFY);
872         smi_result = smi_event_handler(smi_info, time_diff);
873
874         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
875
876         smi_info->last_timeout_jiffies = jiffies_now;
877
878         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
879                 /* Running with interrupts, only do long timeouts. */
880                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
881                 spin_lock_irqsave(&smi_info->count_lock, flags);
882                 smi_info->long_timeouts++;
883                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
884                 goto do_add_timer;
885         }
886
887         /* If the state machine asks for a short delay, then shorten
888            the timer timeout. */
889         if (smi_result == SI_SM_CALL_WITH_DELAY) {
890                 spin_lock_irqsave(&smi_info->count_lock, flags);
891                 smi_info->short_timeouts++;
892                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
893                 smi_info->si_timer.expires = jiffies + 1;
894         } else {
895                 spin_lock_irqsave(&smi_info->count_lock, flags);
896                 smi_info->long_timeouts++;
897                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
898                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
899         }
900
901  do_add_timer:
902         add_timer(&(smi_info->si_timer));
903 }
904
905 static irqreturn_t si_irq_handler(int irq, void *data)
906 {
907         struct smi_info *smi_info = data;
908         unsigned long   flags;
909 #ifdef DEBUG_TIMING
910         struct timeval  t;
911 #endif
912
913         spin_lock_irqsave(&(smi_info->si_lock), flags);
914
915         spin_lock(&smi_info->count_lock);
916         smi_info->interrupts++;
917         spin_unlock(&smi_info->count_lock);
918
919         if (atomic_read(&smi_info->stop_operation))
920                 goto out;
921
922 #ifdef DEBUG_TIMING
923         do_gettimeofday(&t);
924         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
925 #endif
926         smi_event_handler(smi_info, 0);
927  out:
928         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
929         return IRQ_HANDLED;
930 }
931
932 static irqreturn_t si_bt_irq_handler(int irq, void *data)
933 {
934         struct smi_info *smi_info = data;
935         /* We need to clear the IRQ flag for the BT interface. */
936         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
937                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
938                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
939         return si_irq_handler(irq, data);
940 }
941
942 static int smi_start_processing(void       *send_info,
943                                 ipmi_smi_t intf)
944 {
945         struct smi_info *new_smi = send_info;
946         int             enable = 0;
947
948         new_smi->intf = intf;
949
950         /* Set up the timer that drives the interface. */
951         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
952         new_smi->last_timeout_jiffies = jiffies;
953         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
954
955         /*
956          * Check if the user forcefully enabled the daemon.
957          */
958         if (new_smi->intf_num < num_force_kipmid)
959                 enable = force_kipmid[new_smi->intf_num];
960         /*
961          * The BT interface is efficient enough to not need a thread,
962          * and there is no need for a thread if we have interrupts.
963          */
964         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
965                 enable = 1;
966
967         if (enable) {
968                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
969                                               "kipmi%d", new_smi->intf_num);
970                 if (IS_ERR(new_smi->thread)) {
971                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
972                                " kernel thread due to error %ld, only using"
973                                " timers to drive the interface\n",
974                                PTR_ERR(new_smi->thread));
975                         new_smi->thread = NULL;
976                 }
977         }
978
979         return 0;
980 }
981
982 static void set_maintenance_mode(void *send_info, int enable)
983 {
984         struct smi_info   *smi_info = send_info;
985
986         if (!enable)
987                 atomic_set(&smi_info->req_events, 0);
988 }
989
990 static struct ipmi_smi_handlers handlers =
991 {
992         .owner                  = THIS_MODULE,
993         .start_processing       = smi_start_processing,
994         .sender                 = sender,
995         .request_events         = request_events,
996         .set_maintenance_mode   = set_maintenance_mode,
997         .set_run_to_completion  = set_run_to_completion,
998         .poll                   = poll,
999 };
1000
1001 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1002    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1003
1004 static LIST_HEAD(smi_infos);
1005 static DEFINE_MUTEX(smi_infos_lock);
1006 static int smi_num; /* Used to sequence the SMIs */
1007
1008 #define DEFAULT_REGSPACING      1
1009
1010 static int           si_trydefaults = 1;
1011 static char          *si_type[SI_MAX_PARMS];
1012 #define MAX_SI_TYPE_STR 30
1013 static char          si_type_str[MAX_SI_TYPE_STR];
1014 static unsigned long addrs[SI_MAX_PARMS];
1015 static int num_addrs;
1016 static unsigned int  ports[SI_MAX_PARMS];
1017 static int num_ports;
1018 static int           irqs[SI_MAX_PARMS];
1019 static int num_irqs;
1020 static int           regspacings[SI_MAX_PARMS];
1021 static int num_regspacings;
1022 static int           regsizes[SI_MAX_PARMS];
1023 static int num_regsizes;
1024 static int           regshifts[SI_MAX_PARMS];
1025 static int num_regshifts;
1026 static int slave_addrs[SI_MAX_PARMS];
1027 static int num_slave_addrs;
1028
1029 #define IPMI_IO_ADDR_SPACE  0
1030 #define IPMI_MEM_ADDR_SPACE 1
1031 static char *addr_space_to_str[] = { "i/o", "mem" };
1032
1033 static int hotmod_handler(const char *val, struct kernel_param *kp);
1034
1035 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1036 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1037                  " Documentation/IPMI.txt in the kernel sources for the"
1038                  " gory details.");
1039
1040 module_param_named(trydefaults, si_trydefaults, bool, 0);
1041 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1042                  " default scan of the KCS and SMIC interface at the standard"
1043                  " address");
1044 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1045 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1046                  " interface separated by commas.  The types are 'kcs',"
1047                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1048                  " the first interface to kcs and the second to bt");
1049 module_param_array(addrs, long, &num_addrs, 0);
1050 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1051                  " addresses separated by commas.  Only use if an interface"
1052                  " is in memory.  Otherwise, set it to zero or leave"
1053                  " it blank.");
1054 module_param_array(ports, int, &num_ports, 0);
1055 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1056                  " addresses separated by commas.  Only use if an interface"
1057                  " is a port.  Otherwise, set it to zero or leave"
1058                  " it blank.");
1059 module_param_array(irqs, int, &num_irqs, 0);
1060 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1061                  " addresses separated by commas.  Only use if an interface"
1062                  " has an interrupt.  Otherwise, set it to zero or leave"
1063                  " it blank.");
1064 module_param_array(regspacings, int, &num_regspacings, 0);
1065 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1066                  " and each successive register used by the interface.  For"
1067                  " instance, if the start address is 0xca2 and the spacing"
1068                  " is 2, then the second address is at 0xca4.  Defaults"
1069                  " to 1.");
1070 module_param_array(regsizes, int, &num_regsizes, 0);
1071 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1072                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1073                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1074                  " the 8-bit IPMI register has to be read from a larger"
1075                  " register.");
1076 module_param_array(regshifts, int, &num_regshifts, 0);
1077 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1078                  " IPMI register, in bits.  For instance, if the data"
1079                  " is read from a 32-bit word and the IPMI data is in"
1080                  " bit 8-15, then the shift would be 8");
1081 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1082 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1083                  " the controller.  Normally this is 0x20, but can be"
1084                  " overridden by this parm.  This is an array indexed"
1085                  " by interface number.");
1086 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1087 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1088                  " disabled(0).  Normally the IPMI driver auto-detects"
1089                  " this, but the value may be overridden by this parm.");
1090 module_param(unload_when_empty, int, 0);
1091 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1092                  " specified or found, default is 1.  Setting to 0"
1093                  " is useful for hot add of devices using hotmod.");
1094
1095
1096 static void std_irq_cleanup(struct smi_info *info)
1097 {
1098         if (info->si_type == SI_BT)
1099                 /* Disable the interrupt in the BT interface. */
1100                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1101         free_irq(info->irq, info);
1102 }
1103
1104 static int std_irq_setup(struct smi_info *info)
1105 {
1106         int rv;
1107
1108         if (!info->irq)
1109                 return 0;
1110
1111         if (info->si_type == SI_BT) {
1112                 rv = request_irq(info->irq,
1113                                  si_bt_irq_handler,
1114                                  IRQF_DISABLED,
1115                                  DEVICE_NAME,
1116                                  info);
1117                 if (!rv)
1118                         /* Enable the interrupt in the BT interface. */
1119                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1120                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1121         } else
1122                 rv = request_irq(info->irq,
1123                                  si_irq_handler,
1124                                  IRQF_DISABLED,
1125                                  DEVICE_NAME,
1126                                  info);
1127         if (rv) {
1128                 printk(KERN_WARNING
1129                        "ipmi_si: %s unable to claim interrupt %d,"
1130                        " running polled\n",
1131                        DEVICE_NAME, info->irq);
1132                 info->irq = 0;
1133         } else {
1134                 info->irq_cleanup = std_irq_cleanup;
1135                 printk("  Using irq %d\n", info->irq);
1136         }
1137
1138         return rv;
1139 }
1140
1141 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1142 {
1143         unsigned int addr = io->addr_data;
1144
1145         return inb(addr + (offset * io->regspacing));
1146 }
1147
1148 static void port_outb(struct si_sm_io *io, unsigned int offset,
1149                       unsigned char b)
1150 {
1151         unsigned int addr = io->addr_data;
1152
1153         outb(b, addr + (offset * io->regspacing));
1154 }
1155
1156 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1157 {
1158         unsigned int addr = io->addr_data;
1159
1160         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1161 }
1162
1163 static void port_outw(struct si_sm_io *io, unsigned int offset,
1164                       unsigned char b)
1165 {
1166         unsigned int addr = io->addr_data;
1167
1168         outw(b << io->regshift, addr + (offset * io->regspacing));
1169 }
1170
1171 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1172 {
1173         unsigned int addr = io->addr_data;
1174
1175         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1176 }
1177
1178 static void port_outl(struct si_sm_io *io, unsigned int offset,
1179                       unsigned char b)
1180 {
1181         unsigned int addr = io->addr_data;
1182
1183         outl(b << io->regshift, addr+(offset * io->regspacing));
1184 }
1185
1186 static void port_cleanup(struct smi_info *info)
1187 {
1188         unsigned int addr = info->io.addr_data;
1189         int          idx;
1190
1191         if (addr) {
1192                 for (idx = 0; idx < info->io_size; idx++) {
1193                         release_region(addr + idx * info->io.regspacing,
1194                                        info->io.regsize);
1195                 }
1196         }
1197 }
1198
1199 static int port_setup(struct smi_info *info)
1200 {
1201         unsigned int addr = info->io.addr_data;
1202         int          idx;
1203
1204         if (!addr)
1205                 return -ENODEV;
1206
1207         info->io_cleanup = port_cleanup;
1208
1209         /* Figure out the actual inb/inw/inl/etc routine to use based
1210            upon the register size. */
1211         switch (info->io.regsize) {
1212         case 1:
1213                 info->io.inputb = port_inb;
1214                 info->io.outputb = port_outb;
1215                 break;
1216         case 2:
1217                 info->io.inputb = port_inw;
1218                 info->io.outputb = port_outw;
1219                 break;
1220         case 4:
1221                 info->io.inputb = port_inl;
1222                 info->io.outputb = port_outl;
1223                 break;
1224         default:
1225                 printk("ipmi_si: Invalid register size: %d\n",
1226                        info->io.regsize);
1227                 return -EINVAL;
1228         }
1229
1230         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1231          * tables.  This causes problems when trying to register the
1232          * entire I/O region.  Therefore we must register each I/O
1233          * port separately.
1234          */
1235         for (idx = 0; idx < info->io_size; idx++) {
1236                 if (request_region(addr + idx * info->io.regspacing,
1237                                    info->io.regsize, DEVICE_NAME) == NULL) {
1238                         /* Undo allocations */
1239                         while (idx--) {
1240                                 release_region(addr + idx * info->io.regspacing,
1241                                                info->io.regsize);
1242                         }
1243                         return -EIO;
1244                 }
1245         }
1246         return 0;
1247 }
1248
1249 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1250 {
1251         return readb((io->addr)+(offset * io->regspacing));
1252 }
1253
1254 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1255                      unsigned char b)
1256 {
1257         writeb(b, (io->addr)+(offset * io->regspacing));
1258 }
1259
1260 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1261 {
1262         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1263                 & 0xff;
1264 }
1265
1266 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1267                      unsigned char b)
1268 {
1269         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1270 }
1271
1272 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1273 {
1274         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1275                 & 0xff;
1276 }
1277
1278 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1279                      unsigned char b)
1280 {
1281         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1282 }
1283
1284 #ifdef readq
1285 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1286 {
1287         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1288                 & 0xff;
1289 }
1290
1291 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1292                      unsigned char b)
1293 {
1294         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1295 }
1296 #endif
1297
1298 static void mem_cleanup(struct smi_info *info)
1299 {
1300         unsigned long addr = info->io.addr_data;
1301         int           mapsize;
1302
1303         if (info->io.addr) {
1304                 iounmap(info->io.addr);
1305
1306                 mapsize = ((info->io_size * info->io.regspacing)
1307                            - (info->io.regspacing - info->io.regsize));
1308
1309                 release_mem_region(addr, mapsize);
1310         }
1311 }
1312
1313 static int mem_setup(struct smi_info *info)
1314 {
1315         unsigned long addr = info->io.addr_data;
1316         int           mapsize;
1317
1318         if (!addr)
1319                 return -ENODEV;
1320
1321         info->io_cleanup = mem_cleanup;
1322
1323         /* Figure out the actual readb/readw/readl/etc routine to use based
1324            upon the register size. */
1325         switch (info->io.regsize) {
1326         case 1:
1327                 info->io.inputb = intf_mem_inb;
1328                 info->io.outputb = intf_mem_outb;
1329                 break;
1330         case 2:
1331                 info->io.inputb = intf_mem_inw;
1332                 info->io.outputb = intf_mem_outw;
1333                 break;
1334         case 4:
1335                 info->io.inputb = intf_mem_inl;
1336                 info->io.outputb = intf_mem_outl;
1337                 break;
1338 #ifdef readq
1339         case 8:
1340                 info->io.inputb = mem_inq;
1341                 info->io.outputb = mem_outq;
1342                 break;
1343 #endif
1344         default:
1345                 printk("ipmi_si: Invalid register size: %d\n",
1346                        info->io.regsize);
1347                 return -EINVAL;
1348         }
1349
1350         /* Calculate the total amount of memory to claim.  This is an
1351          * unusual looking calculation, but it avoids claiming any
1352          * more memory than it has to.  It will claim everything
1353          * between the first address to the end of the last full
1354          * register. */
1355         mapsize = ((info->io_size * info->io.regspacing)
1356                    - (info->io.regspacing - info->io.regsize));
1357
1358         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1359                 return -EIO;
1360
1361         info->io.addr = ioremap(addr, mapsize);
1362         if (info->io.addr == NULL) {
1363                 release_mem_region(addr, mapsize);
1364                 return -EIO;
1365         }
1366         return 0;
1367 }
1368
1369 /*
1370  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1371  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1372  * Options are:
1373  *   rsp=<regspacing>
1374  *   rsi=<regsize>
1375  *   rsh=<regshift>
1376  *   irq=<irq>
1377  *   ipmb=<ipmb addr>
1378  */
1379 enum hotmod_op { HM_ADD, HM_REMOVE };
1380 struct hotmod_vals {
1381         char *name;
1382         int  val;
1383 };
1384 static struct hotmod_vals hotmod_ops[] = {
1385         { "add",        HM_ADD },
1386         { "remove",     HM_REMOVE },
1387         { NULL }
1388 };
1389 static struct hotmod_vals hotmod_si[] = {
1390         { "kcs",        SI_KCS },
1391         { "smic",       SI_SMIC },
1392         { "bt",         SI_BT },
1393         { NULL }
1394 };
1395 static struct hotmod_vals hotmod_as[] = {
1396         { "mem",        IPMI_MEM_ADDR_SPACE },
1397         { "i/o",        IPMI_IO_ADDR_SPACE },
1398         { NULL }
1399 };
1400
1401 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1402 {
1403         char *s;
1404         int  i;
1405
1406         s = strchr(*curr, ',');
1407         if (!s) {
1408                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1409                 return -EINVAL;
1410         }
1411         *s = '\0';
1412         s++;
1413         for (i = 0; hotmod_ops[i].name; i++) {
1414                 if (strcmp(*curr, v[i].name) == 0) {
1415                         *val = v[i].val;
1416                         *curr = s;
1417                         return 0;
1418                 }
1419         }
1420
1421         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1422         return -EINVAL;
1423 }
1424
1425 static int check_hotmod_int_op(const char *curr, const char *option,
1426                                const char *name, int *val)
1427 {
1428         char *n;
1429
1430         if (strcmp(curr, name) == 0) {
1431                 if (!option) {
1432                         printk(KERN_WARNING PFX
1433                                "No option given for '%s'\n",
1434                                curr);
1435                         return -EINVAL;
1436                 }
1437                 *val = simple_strtoul(option, &n, 0);
1438                 if ((*n != '\0') || (*option == '\0')) {
1439                         printk(KERN_WARNING PFX
1440                                "Bad option given for '%s'\n",
1441                                curr);
1442                         return -EINVAL;
1443                 }
1444                 return 1;
1445         }
1446         return 0;
1447 }
1448
1449 static int hotmod_handler(const char *val, struct kernel_param *kp)
1450 {
1451         char *str = kstrdup(val, GFP_KERNEL);
1452         int  rv;
1453         char *next, *curr, *s, *n, *o;
1454         enum hotmod_op op;
1455         enum si_type si_type;
1456         int  addr_space;
1457         unsigned long addr;
1458         int regspacing;
1459         int regsize;
1460         int regshift;
1461         int irq;
1462         int ipmb;
1463         int ival;
1464         int len;
1465         struct smi_info *info;
1466
1467         if (!str)
1468                 return -ENOMEM;
1469
1470         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1471         len = strlen(str);
1472         ival = len - 1;
1473         while ((ival >= 0) && isspace(str[ival])) {
1474                 str[ival] = '\0';
1475                 ival--;
1476         }
1477
1478         for (curr = str; curr; curr = next) {
1479                 regspacing = 1;
1480                 regsize = 1;
1481                 regshift = 0;
1482                 irq = 0;
1483                 ipmb = 0x20;
1484
1485                 next = strchr(curr, ':');
1486                 if (next) {
1487                         *next = '\0';
1488                         next++;
1489                 }
1490
1491                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1492                 if (rv)
1493                         break;
1494                 op = ival;
1495
1496                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1497                 if (rv)
1498                         break;
1499                 si_type = ival;
1500
1501                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1502                 if (rv)
1503                         break;
1504
1505                 s = strchr(curr, ',');
1506                 if (s) {
1507                         *s = '\0';
1508                         s++;
1509                 }
1510                 addr = simple_strtoul(curr, &n, 0);
1511                 if ((*n != '\0') || (*curr == '\0')) {
1512                         printk(KERN_WARNING PFX "Invalid hotmod address"
1513                                " '%s'\n", curr);
1514                         break;
1515                 }
1516
1517                 while (s) {
1518                         curr = s;
1519                         s = strchr(curr, ',');
1520                         if (s) {
1521                                 *s = '\0';
1522                                 s++;
1523                         }
1524                         o = strchr(curr, '=');
1525                         if (o) {
1526                                 *o = '\0';
1527                                 o++;
1528                         }
1529                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1530                         if (rv < 0)
1531                                 goto out;
1532                         else if (rv)
1533                                 continue;
1534                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1535                         if (rv < 0)
1536                                 goto out;
1537                         else if (rv)
1538                                 continue;
1539                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1540                         if (rv < 0)
1541                                 goto out;
1542                         else if (rv)
1543                                 continue;
1544                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1545                         if (rv < 0)
1546                                 goto out;
1547                         else if (rv)
1548                                 continue;
1549                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1550                         if (rv < 0)
1551                                 goto out;
1552                         else if (rv)
1553                                 continue;
1554
1555                         rv = -EINVAL;
1556                         printk(KERN_WARNING PFX
1557                                "Invalid hotmod option '%s'\n",
1558                                curr);
1559                         goto out;
1560                 }
1561
1562                 if (op == HM_ADD) {
1563                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1564                         if (!info) {
1565                                 rv = -ENOMEM;
1566                                 goto out;
1567                         }
1568
1569                         info->addr_source = "hotmod";
1570                         info->si_type = si_type;
1571                         info->io.addr_data = addr;
1572                         info->io.addr_type = addr_space;
1573                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1574                                 info->io_setup = mem_setup;
1575                         else
1576                                 info->io_setup = port_setup;
1577
1578                         info->io.addr = NULL;
1579                         info->io.regspacing = regspacing;
1580                         if (!info->io.regspacing)
1581                                 info->io.regspacing = DEFAULT_REGSPACING;
1582                         info->io.regsize = regsize;
1583                         if (!info->io.regsize)
1584                                 info->io.regsize = DEFAULT_REGSPACING;
1585                         info->io.regshift = regshift;
1586                         info->irq = irq;
1587                         if (info->irq)
1588                                 info->irq_setup = std_irq_setup;
1589                         info->slave_addr = ipmb;
1590
1591                         try_smi_init(info);
1592                 } else {
1593                         /* remove */
1594                         struct smi_info *e, *tmp_e;
1595
1596                         mutex_lock(&smi_infos_lock);
1597                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1598                                 if (e->io.addr_type != addr_space)
1599                                         continue;
1600                                 if (e->si_type != si_type)
1601                                         continue;
1602                                 if (e->io.addr_data == addr)
1603                                         cleanup_one_si(e);
1604                         }
1605                         mutex_unlock(&smi_infos_lock);
1606                 }
1607         }
1608         rv = len;
1609  out:
1610         kfree(str);
1611         return rv;
1612 }
1613
1614 static __devinit void hardcode_find_bmc(void)
1615 {
1616         int             i;
1617         struct smi_info *info;
1618
1619         for (i = 0; i < SI_MAX_PARMS; i++) {
1620                 if (!ports[i] && !addrs[i])
1621                         continue;
1622
1623                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1624                 if (!info)
1625                         return;
1626
1627                 info->addr_source = "hardcoded";
1628
1629                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1630                         info->si_type = SI_KCS;
1631                 } else if (strcmp(si_type[i], "smic") == 0) {
1632                         info->si_type = SI_SMIC;
1633                 } else if (strcmp(si_type[i], "bt") == 0) {
1634                         info->si_type = SI_BT;
1635                 } else {
1636                         printk(KERN_WARNING
1637                                "ipmi_si: Interface type specified "
1638                                "for interface %d, was invalid: %s\n",
1639                                i, si_type[i]);
1640                         kfree(info);
1641                         continue;
1642                 }
1643
1644                 if (ports[i]) {
1645                         /* An I/O port */
1646                         info->io_setup = port_setup;
1647                         info->io.addr_data = ports[i];
1648                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1649                 } else if (addrs[i]) {
1650                         /* A memory port */
1651                         info->io_setup = mem_setup;
1652                         info->io.addr_data = addrs[i];
1653                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1654                 } else {
1655                         printk(KERN_WARNING
1656                                "ipmi_si: Interface type specified "
1657                                "for interface %d, "
1658                                "but port and address were not set or "
1659                                "set to zero.\n", i);
1660                         kfree(info);
1661                         continue;
1662                 }
1663
1664                 info->io.addr = NULL;
1665                 info->io.regspacing = regspacings[i];
1666                 if (!info->io.regspacing)
1667                         info->io.regspacing = DEFAULT_REGSPACING;
1668                 info->io.regsize = regsizes[i];
1669                 if (!info->io.regsize)
1670                         info->io.regsize = DEFAULT_REGSPACING;
1671                 info->io.regshift = regshifts[i];
1672                 info->irq = irqs[i];
1673                 if (info->irq)
1674                         info->irq_setup = std_irq_setup;
1675
1676                 try_smi_init(info);
1677         }
1678 }
1679
1680 #ifdef CONFIG_ACPI
1681
1682 #include <linux/acpi.h>
1683
1684 /* Once we get an ACPI failure, we don't try any more, because we go
1685    through the tables sequentially.  Once we don't find a table, there
1686    are no more. */
1687 static int acpi_failure;
1688
1689 /* For GPE-type interrupts. */
1690 static u32 ipmi_acpi_gpe(void *context)
1691 {
1692         struct smi_info *smi_info = context;
1693         unsigned long   flags;
1694 #ifdef DEBUG_TIMING
1695         struct timeval t;
1696 #endif
1697
1698         spin_lock_irqsave(&(smi_info->si_lock), flags);
1699
1700         spin_lock(&smi_info->count_lock);
1701         smi_info->interrupts++;
1702         spin_unlock(&smi_info->count_lock);
1703
1704         if (atomic_read(&smi_info->stop_operation))
1705                 goto out;
1706
1707 #ifdef DEBUG_TIMING
1708         do_gettimeofday(&t);
1709         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1710 #endif
1711         smi_event_handler(smi_info, 0);
1712  out:
1713         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1714
1715         return ACPI_INTERRUPT_HANDLED;
1716 }
1717
1718 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1719 {
1720         if (!info->irq)
1721                 return;
1722
1723         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1724 }
1725
1726 static int acpi_gpe_irq_setup(struct smi_info *info)
1727 {
1728         acpi_status status;
1729
1730         if (!info->irq)
1731                 return 0;
1732
1733         /* FIXME - is level triggered right? */
1734         status = acpi_install_gpe_handler(NULL,
1735                                           info->irq,
1736                                           ACPI_GPE_LEVEL_TRIGGERED,
1737                                           &ipmi_acpi_gpe,
1738                                           info);
1739         if (status != AE_OK) {
1740                 printk(KERN_WARNING
1741                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1742                        " running polled\n",
1743                        DEVICE_NAME, info->irq);
1744                 info->irq = 0;
1745                 return -EINVAL;
1746         } else {
1747                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1748                 printk("  Using ACPI GPE %d\n", info->irq);
1749                 return 0;
1750         }
1751 }
1752
1753 /*
1754  * Defined at
1755  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1756  */
1757 struct SPMITable {
1758         s8      Signature[4];
1759         u32     Length;
1760         u8      Revision;
1761         u8      Checksum;
1762         s8      OEMID[6];
1763         s8      OEMTableID[8];
1764         s8      OEMRevision[4];
1765         s8      CreatorID[4];
1766         s8      CreatorRevision[4];
1767         u8      InterfaceType;
1768         u8      IPMIlegacy;
1769         s16     SpecificationRevision;
1770
1771         /*
1772          * Bit 0 - SCI interrupt supported
1773          * Bit 1 - I/O APIC/SAPIC
1774          */
1775         u8      InterruptType;
1776
1777         /* If bit 0 of InterruptType is set, then this is the SCI
1778            interrupt in the GPEx_STS register. */
1779         u8      GPE;
1780
1781         s16     Reserved;
1782
1783         /* If bit 1 of InterruptType is set, then this is the I/O
1784            APIC/SAPIC interrupt. */
1785         u32     GlobalSystemInterrupt;
1786
1787         /* The actual register address. */
1788         struct acpi_generic_address addr;
1789
1790         u8      UID[4];
1791
1792         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1793 };
1794
1795 static __devinit int try_init_acpi(struct SPMITable *spmi)
1796 {
1797         struct smi_info  *info;
1798         u8               addr_space;
1799
1800         if (spmi->IPMIlegacy != 1) {
1801             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1802             return -ENODEV;
1803         }
1804
1805         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1806                 addr_space = IPMI_MEM_ADDR_SPACE;
1807         else
1808                 addr_space = IPMI_IO_ADDR_SPACE;
1809
1810         info = kzalloc(sizeof(*info), GFP_KERNEL);
1811         if (!info) {
1812                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1813                 return -ENOMEM;
1814         }
1815
1816         info->addr_source = "ACPI";
1817
1818         /* Figure out the interface type. */
1819         switch (spmi->InterfaceType)
1820         {
1821         case 1: /* KCS */
1822                 info->si_type = SI_KCS;
1823                 break;
1824         case 2: /* SMIC */
1825                 info->si_type = SI_SMIC;
1826                 break;
1827         case 3: /* BT */
1828                 info->si_type = SI_BT;
1829                 break;
1830         default:
1831                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1832                         spmi->InterfaceType);
1833                 kfree(info);
1834                 return -EIO;
1835         }
1836
1837         if (spmi->InterruptType & 1) {
1838                 /* We've got a GPE interrupt. */
1839                 info->irq = spmi->GPE;
1840                 info->irq_setup = acpi_gpe_irq_setup;
1841         } else if (spmi->InterruptType & 2) {
1842                 /* We've got an APIC/SAPIC interrupt. */
1843                 info->irq = spmi->GlobalSystemInterrupt;
1844                 info->irq_setup = std_irq_setup;
1845         } else {
1846                 /* Use the default interrupt setting. */
1847                 info->irq = 0;
1848                 info->irq_setup = NULL;
1849         }
1850
1851         if (spmi->addr.bit_width) {
1852                 /* A (hopefully) properly formed register bit width. */
1853                 info->io.regspacing = spmi->addr.bit_width / 8;
1854         } else {
1855                 info->io.regspacing = DEFAULT_REGSPACING;
1856         }
1857         info->io.regsize = info->io.regspacing;
1858         info->io.regshift = spmi->addr.bit_offset;
1859
1860         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1861                 info->io_setup = mem_setup;
1862                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1863         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1864                 info->io_setup = port_setup;
1865                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1866         } else {
1867                 kfree(info);
1868                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1869                 return -EIO;
1870         }
1871         info->io.addr_data = spmi->addr.address;
1872
1873         try_smi_init(info);
1874
1875         return 0;
1876 }
1877
1878 static __devinit void acpi_find_bmc(void)
1879 {
1880         acpi_status      status;
1881         struct SPMITable *spmi;
1882         int              i;
1883
1884         if (acpi_disabled)
1885                 return;
1886
1887         if (acpi_failure)
1888                 return;
1889
1890         for (i = 0; ; i++) {
1891                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1892                                         (struct acpi_table_header **)&spmi);
1893                 if (status != AE_OK)
1894                         return;
1895
1896                 try_init_acpi(spmi);
1897         }
1898 }
1899 #endif
1900
1901 #ifdef CONFIG_DMI
1902 struct dmi_ipmi_data
1903 {
1904         u8              type;
1905         u8              addr_space;
1906         unsigned long   base_addr;
1907         u8              irq;
1908         u8              offset;
1909         u8              slave_addr;
1910 };
1911
1912 static int __devinit decode_dmi(struct dmi_header *dm,
1913                                 struct dmi_ipmi_data *dmi)
1914 {
1915         u8              *data = (u8 *)dm;
1916         unsigned long   base_addr;
1917         u8              reg_spacing;
1918         u8              len = dm->length;
1919
1920         dmi->type = data[4];
1921
1922         memcpy(&base_addr, data+8, sizeof(unsigned long));
1923         if (len >= 0x11) {
1924                 if (base_addr & 1) {
1925                         /* I/O */
1926                         base_addr &= 0xFFFE;
1927                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1928                 }
1929                 else {
1930                         /* Memory */
1931                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1932                 }
1933                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1934                    is odd. */
1935                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1936
1937                 dmi->irq = data[0x11];
1938
1939                 /* The top two bits of byte 0x10 hold the register spacing. */
1940                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1941                 switch(reg_spacing){
1942                 case 0x00: /* Byte boundaries */
1943                     dmi->offset = 1;
1944                     break;
1945                 case 0x01: /* 32-bit boundaries */
1946                     dmi->offset = 4;
1947                     break;
1948                 case 0x02: /* 16-byte boundaries */
1949                     dmi->offset = 16;
1950                     break;
1951                 default:
1952                     /* Some other interface, just ignore it. */
1953                     return -EIO;
1954                 }
1955         } else {
1956                 /* Old DMI spec. */
1957                 /* Note that technically, the lower bit of the base
1958                  * address should be 1 if the address is I/O and 0 if
1959                  * the address is in memory.  So many systems get that
1960                  * wrong (and all that I have seen are I/O) so we just
1961                  * ignore that bit and assume I/O.  Systems that use
1962                  * memory should use the newer spec, anyway. */
1963                 dmi->base_addr = base_addr & 0xfffe;
1964                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1965                 dmi->offset = 1;
1966         }
1967
1968         dmi->slave_addr = data[6];
1969
1970         return 0;
1971 }
1972
1973 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1974 {
1975         struct smi_info *info;
1976
1977         info = kzalloc(sizeof(*info), GFP_KERNEL);
1978         if (!info) {
1979                 printk(KERN_ERR
1980                        "ipmi_si: Could not allocate SI data\n");
1981                 return;
1982         }
1983
1984         info->addr_source = "SMBIOS";
1985
1986         switch (ipmi_data->type) {
1987         case 0x01: /* KCS */
1988                 info->si_type = SI_KCS;
1989                 break;
1990         case 0x02: /* SMIC */
1991                 info->si_type = SI_SMIC;
1992                 break;
1993         case 0x03: /* BT */
1994                 info->si_type = SI_BT;
1995                 break;
1996         default:
1997                 return;
1998         }
1999
2000         switch (ipmi_data->addr_space) {
2001         case IPMI_MEM_ADDR_SPACE:
2002                 info->io_setup = mem_setup;
2003                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2004                 break;
2005
2006         case IPMI_IO_ADDR_SPACE:
2007                 info->io_setup = port_setup;
2008                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2009                 break;
2010
2011         default:
2012                 kfree(info);
2013                 printk(KERN_WARNING
2014                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2015                        ipmi_data->addr_space);
2016                 return;
2017         }
2018         info->io.addr_data = ipmi_data->base_addr;
2019
2020         info->io.regspacing = ipmi_data->offset;
2021         if (!info->io.regspacing)
2022                 info->io.regspacing = DEFAULT_REGSPACING;
2023         info->io.regsize = DEFAULT_REGSPACING;
2024         info->io.regshift = 0;
2025
2026         info->slave_addr = ipmi_data->slave_addr;
2027
2028         info->irq = ipmi_data->irq;
2029         if (info->irq)
2030                 info->irq_setup = std_irq_setup;
2031
2032         try_smi_init(info);
2033 }
2034
2035 static void __devinit dmi_find_bmc(void)
2036 {
2037         struct dmi_device    *dev = NULL;
2038         struct dmi_ipmi_data data;
2039         int                  rv;
2040
2041         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2042                 memset(&data, 0, sizeof(data));
2043                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2044                 if (!rv)
2045                         try_init_dmi(&data);
2046         }
2047 }
2048 #endif /* CONFIG_DMI */
2049
2050 #ifdef CONFIG_PCI
2051
2052 #define PCI_ERMC_CLASSCODE              0x0C0700
2053 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2054 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2055 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2056 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2057 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2058
2059 #define PCI_HP_VENDOR_ID    0x103C
2060 #define PCI_MMC_DEVICE_ID   0x121A
2061 #define PCI_MMC_ADDR_CW     0x10
2062
2063 static void ipmi_pci_cleanup(struct smi_info *info)
2064 {
2065         struct pci_dev *pdev = info->addr_source_data;
2066
2067         pci_disable_device(pdev);
2068 }
2069
2070 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2071                                     const struct pci_device_id *ent)
2072 {
2073         int rv;
2074         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2075         struct smi_info *info;
2076         int first_reg_offset = 0;
2077
2078         info = kzalloc(sizeof(*info), GFP_KERNEL);
2079         if (!info)
2080                 return -ENOMEM;
2081
2082         info->addr_source = "PCI";
2083
2084         switch (class_type) {
2085         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2086                 info->si_type = SI_SMIC;
2087                 break;
2088
2089         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2090                 info->si_type = SI_KCS;
2091                 break;
2092
2093         case PCI_ERMC_CLASSCODE_TYPE_BT:
2094                 info->si_type = SI_BT;
2095                 break;
2096
2097         default:
2098                 kfree(info);
2099                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2100                        pci_name(pdev), class_type);
2101                 return -ENOMEM;
2102         }
2103
2104         rv = pci_enable_device(pdev);
2105         if (rv) {
2106                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2107                        pci_name(pdev));
2108                 kfree(info);
2109                 return rv;
2110         }
2111
2112         info->addr_source_cleanup = ipmi_pci_cleanup;
2113         info->addr_source_data = pdev;
2114
2115         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2116                 first_reg_offset = 1;
2117
2118         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2119                 info->io_setup = port_setup;
2120                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2121         } else {
2122                 info->io_setup = mem_setup;
2123                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2124         }
2125         info->io.addr_data = pci_resource_start(pdev, 0);
2126
2127         info->io.regspacing = DEFAULT_REGSPACING;
2128         info->io.regsize = DEFAULT_REGSPACING;
2129         info->io.regshift = 0;
2130
2131         info->irq = pdev->irq;
2132         if (info->irq)
2133                 info->irq_setup = std_irq_setup;
2134
2135         info->dev = &pdev->dev;
2136
2137         return try_smi_init(info);
2138 }
2139
2140 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2141 {
2142 }
2143
2144 #ifdef CONFIG_PM
2145 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2146 {
2147         return 0;
2148 }
2149
2150 static int ipmi_pci_resume(struct pci_dev *pdev)
2151 {
2152         return 0;
2153 }
2154 #endif
2155
2156 static struct pci_device_id ipmi_pci_devices[] = {
2157         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2158         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2159 };
2160 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2161
2162 static struct pci_driver ipmi_pci_driver = {
2163         .name =         DEVICE_NAME,
2164         .id_table =     ipmi_pci_devices,
2165         .probe =        ipmi_pci_probe,
2166         .remove =       __devexit_p(ipmi_pci_remove),
2167 #ifdef CONFIG_PM
2168         .suspend =      ipmi_pci_suspend,
2169         .resume =       ipmi_pci_resume,
2170 #endif
2171 };
2172 #endif /* CONFIG_PCI */
2173
2174
2175 static int try_get_dev_id(struct smi_info *smi_info)
2176 {
2177         unsigned char         msg[2];
2178         unsigned char         *resp;
2179         unsigned long         resp_len;
2180         enum si_sm_result     smi_result;
2181         int                   rv = 0;
2182
2183         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2184         if (!resp)
2185                 return -ENOMEM;
2186
2187         /* Do a Get Device ID command, since it comes back with some
2188            useful info. */
2189         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2190         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2191         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2192
2193         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2194         for (;;)
2195         {
2196                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2197                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2198                         schedule_timeout_uninterruptible(1);
2199                         smi_result = smi_info->handlers->event(
2200                                 smi_info->si_sm, 100);
2201                 }
2202                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2203                 {
2204                         smi_result = smi_info->handlers->event(
2205                                 smi_info->si_sm, 0);
2206                 }
2207                 else
2208                         break;
2209         }
2210         if (smi_result == SI_SM_HOSED) {
2211                 /* We couldn't get the state machine to run, so whatever's at
2212                    the port is probably not an IPMI SMI interface. */
2213                 rv = -ENODEV;
2214                 goto out;
2215         }
2216
2217         /* Otherwise, we got some data. */
2218         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2219                                                   resp, IPMI_MAX_MSG_LENGTH);
2220         if (resp_len < 14) {
2221                 /* That's odd, it should be longer. */
2222                 rv = -EINVAL;
2223                 goto out;
2224         }
2225
2226         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2227                 /* That's odd, it shouldn't be able to fail. */
2228                 rv = -EINVAL;
2229                 goto out;
2230         }
2231
2232         /* Record info from the get device id, in case we need it. */
2233         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2234
2235  out:
2236         kfree(resp);
2237         return rv;
2238 }
2239
2240 static int type_file_read_proc(char *page, char **start, off_t off,
2241                                int count, int *eof, void *data)
2242 {
2243         struct smi_info *smi = data;
2244
2245         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2246 }
2247
2248 static int stat_file_read_proc(char *page, char **start, off_t off,
2249                                int count, int *eof, void *data)
2250 {
2251         char            *out = (char *) page;
2252         struct smi_info *smi = data;
2253
2254         out += sprintf(out, "interrupts_enabled:    %d\n",
2255                        smi->irq && !smi->interrupt_disabled);
2256         out += sprintf(out, "short_timeouts:        %ld\n",
2257                        smi->short_timeouts);
2258         out += sprintf(out, "long_timeouts:         %ld\n",
2259                        smi->long_timeouts);
2260         out += sprintf(out, "timeout_restarts:      %ld\n",
2261                        smi->timeout_restarts);
2262         out += sprintf(out, "idles:                 %ld\n",
2263                        smi->idles);
2264         out += sprintf(out, "interrupts:            %ld\n",
2265                        smi->interrupts);
2266         out += sprintf(out, "attentions:            %ld\n",
2267                        smi->attentions);
2268         out += sprintf(out, "flag_fetches:          %ld\n",
2269                        smi->flag_fetches);
2270         out += sprintf(out, "hosed_count:           %ld\n",
2271                        smi->hosed_count);
2272         out += sprintf(out, "complete_transactions: %ld\n",
2273                        smi->complete_transactions);
2274         out += sprintf(out, "events:                %ld\n",
2275                        smi->events);
2276         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2277                        smi->watchdog_pretimeouts);
2278         out += sprintf(out, "incoming_messages:     %ld\n",
2279                        smi->incoming_messages);
2280
2281         return out - page;
2282 }
2283
2284 static int param_read_proc(char *page, char **start, off_t off,
2285                            int count, int *eof, void *data)
2286 {
2287         struct smi_info *smi = data;
2288
2289         return sprintf(page,
2290                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2291                        si_to_str[smi->si_type],
2292                        addr_space_to_str[smi->io.addr_type],
2293                        smi->io.addr_data,
2294                        smi->io.regspacing,
2295                        smi->io.regsize,
2296                        smi->io.regshift,
2297                        smi->irq,
2298                        smi->slave_addr);
2299 }
2300
2301 /*
2302  * oem_data_avail_to_receive_msg_avail
2303  * @info - smi_info structure with msg_flags set
2304  *
2305  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2306  * Returns 1 indicating need to re-run handle_flags().
2307  */
2308 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2309 {
2310         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2311                                 RECEIVE_MSG_AVAIL);
2312         return 1;
2313 }
2314
2315 /*
2316  * setup_dell_poweredge_oem_data_handler
2317  * @info - smi_info.device_id must be populated
2318  *
2319  * Systems that match, but have firmware version < 1.40 may assert
2320  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2321  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2322  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2323  * as RECEIVE_MSG_AVAIL instead.
2324  *
2325  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2326  * assert the OEM[012] bits, and if it did, the driver would have to
2327  * change to handle that properly, we don't actually check for the
2328  * firmware version.
2329  * Device ID = 0x20                BMC on PowerEdge 8G servers
2330  * Device Revision = 0x80
2331  * Firmware Revision1 = 0x01       BMC version 1.40
2332  * Firmware Revision2 = 0x40       BCD encoded
2333  * IPMI Version = 0x51             IPMI 1.5
2334  * Manufacturer ID = A2 02 00      Dell IANA
2335  *
2336  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2337  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2338  *
2339  */
2340 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2341 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2342 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2343 #define DELL_IANA_MFR_ID 0x0002a2
2344 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2345 {
2346         struct ipmi_device_id *id = &smi_info->device_id;
2347         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2348                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2349                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2350                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2351                         smi_info->oem_data_avail_handler =
2352                                 oem_data_avail_to_receive_msg_avail;
2353                 }
2354                 else if (ipmi_version_major(id) < 1 ||
2355                          (ipmi_version_major(id) == 1 &&
2356                           ipmi_version_minor(id) < 5)) {
2357                         smi_info->oem_data_avail_handler =
2358                                 oem_data_avail_to_receive_msg_avail;
2359                 }
2360         }
2361 }
2362
2363 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2364 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2365 {
2366         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2367
2368         /* Make it a reponse */
2369         msg->rsp[0] = msg->data[0] | 4;
2370         msg->rsp[1] = msg->data[1];
2371         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2372         msg->rsp_size = 3;
2373         smi_info->curr_msg = NULL;
2374         deliver_recv_msg(smi_info, msg);
2375 }
2376
2377 /*
2378  * dell_poweredge_bt_xaction_handler
2379  * @info - smi_info.device_id must be populated
2380  *
2381  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2382  * not respond to a Get SDR command if the length of the data
2383  * requested is exactly 0x3A, which leads to command timeouts and no
2384  * data returned.  This intercepts such commands, and causes userspace
2385  * callers to try again with a different-sized buffer, which succeeds.
2386  */
2387
2388 #define STORAGE_NETFN 0x0A
2389 #define STORAGE_CMD_GET_SDR 0x23
2390 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2391                                              unsigned long unused,
2392                                              void *in)
2393 {
2394         struct smi_info *smi_info = in;
2395         unsigned char *data = smi_info->curr_msg->data;
2396         unsigned int size   = smi_info->curr_msg->data_size;
2397         if (size >= 8 &&
2398             (data[0]>>2) == STORAGE_NETFN &&
2399             data[1] == STORAGE_CMD_GET_SDR &&
2400             data[7] == 0x3A) {
2401                 return_hosed_msg_badsize(smi_info);
2402                 return NOTIFY_STOP;
2403         }
2404         return NOTIFY_DONE;
2405 }
2406
2407 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2408         .notifier_call  = dell_poweredge_bt_xaction_handler,
2409 };
2410
2411 /*
2412  * setup_dell_poweredge_bt_xaction_handler
2413  * @info - smi_info.device_id must be filled in already
2414  *
2415  * Fills in smi_info.device_id.start_transaction_pre_hook
2416  * when we know what function to use there.
2417  */
2418 static void
2419 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2420 {
2421         struct ipmi_device_id *id = &smi_info->device_id;
2422         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2423             smi_info->si_type == SI_BT)
2424                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2425 }
2426
2427 /*
2428  * setup_oem_data_handler
2429  * @info - smi_info.device_id must be filled in already
2430  *
2431  * Fills in smi_info.device_id.oem_data_available_handler
2432  * when we know what function to use there.
2433  */
2434
2435 static void setup_oem_data_handler(struct smi_info *smi_info)
2436 {
2437         setup_dell_poweredge_oem_data_handler(smi_info);
2438 }
2439
2440 static void setup_xaction_handlers(struct smi_info *smi_info)
2441 {
2442         setup_dell_poweredge_bt_xaction_handler(smi_info);
2443 }
2444
2445 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2446 {
2447         if (smi_info->intf) {
2448                 /* The timer and thread are only running if the
2449                    interface has been started up and registered. */
2450                 if (smi_info->thread != NULL)
2451                         kthread_stop(smi_info->thread);
2452                 del_timer_sync(&smi_info->si_timer);
2453         }
2454 }
2455
2456 static __devinitdata struct ipmi_default_vals
2457 {
2458         int type;
2459         int port;
2460 } ipmi_defaults[] =
2461 {
2462         { .type = SI_KCS, .port = 0xca2 },
2463         { .type = SI_SMIC, .port = 0xca9 },
2464         { .type = SI_BT, .port = 0xe4 },
2465         { .port = 0 }
2466 };
2467
2468 static __devinit void default_find_bmc(void)
2469 {
2470         struct smi_info *info;
2471         int             i;
2472
2473         for (i = 0; ; i++) {
2474                 if (!ipmi_defaults[i].port)
2475                         break;
2476
2477                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2478                 if (!info)
2479                         return;
2480
2481                 info->addr_source = NULL;
2482
2483                 info->si_type = ipmi_defaults[i].type;
2484                 info->io_setup = port_setup;
2485                 info->io.addr_data = ipmi_defaults[i].port;
2486                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2487
2488                 info->io.addr = NULL;
2489                 info->io.regspacing = DEFAULT_REGSPACING;
2490                 info->io.regsize = DEFAULT_REGSPACING;
2491                 info->io.regshift = 0;
2492
2493                 if (try_smi_init(info) == 0) {
2494                         /* Found one... */
2495                         printk(KERN_INFO "ipmi_si: Found default %s state"
2496                                " machine at %s address 0x%lx\n",
2497                                si_to_str[info->si_type],
2498                                addr_space_to_str[info->io.addr_type],
2499                                info->io.addr_data);
2500                         return;
2501                 }
2502         }
2503 }
2504
2505 static int is_new_interface(struct smi_info *info)
2506 {
2507         struct smi_info *e;
2508
2509         list_for_each_entry(e, &smi_infos, link) {
2510                 if (e->io.addr_type != info->io.addr_type)
2511                         continue;
2512                 if (e->io.addr_data == info->io.addr_data)
2513                         return 0;
2514         }
2515
2516         return 1;
2517 }
2518
2519 static int try_smi_init(struct smi_info *new_smi)
2520 {
2521         int rv;
2522
2523         if (new_smi->addr_source) {
2524                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2525                        " machine at %s address 0x%lx, slave address 0x%x,"
2526                        " irq %d\n",
2527                        new_smi->addr_source,
2528                        si_to_str[new_smi->si_type],
2529                        addr_space_to_str[new_smi->io.addr_type],
2530                        new_smi->io.addr_data,
2531                        new_smi->slave_addr, new_smi->irq);
2532         }
2533
2534         mutex_lock(&smi_infos_lock);
2535         if (!is_new_interface(new_smi)) {
2536                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2537                 rv = -EBUSY;
2538                 goto out_err;
2539         }
2540
2541         /* So we know not to free it unless we have allocated one. */
2542         new_smi->intf = NULL;
2543         new_smi->si_sm = NULL;
2544         new_smi->handlers = NULL;
2545
2546         switch (new_smi->si_type) {
2547         case SI_KCS:
2548                 new_smi->handlers = &kcs_smi_handlers;
2549                 break;
2550
2551         case SI_SMIC:
2552                 new_smi->handlers = &smic_smi_handlers;
2553                 break;
2554
2555         case SI_BT:
2556                 new_smi->handlers = &bt_smi_handlers;
2557                 break;
2558
2559         default:
2560                 /* No support for anything else yet. */
2561                 rv = -EIO;
2562                 goto out_err;
2563         }
2564
2565         /* Allocate the state machine's data and initialize it. */
2566         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2567         if (!new_smi->si_sm) {
2568                 printk(" Could not allocate state machine memory\n");
2569                 rv = -ENOMEM;
2570                 goto out_err;
2571         }
2572         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2573                                                         &new_smi->io);
2574
2575         /* Now that we know the I/O size, we can set up the I/O. */
2576         rv = new_smi->io_setup(new_smi);
2577         if (rv) {
2578                 printk(" Could not set up I/O space\n");
2579                 goto out_err;
2580         }
2581
2582         spin_lock_init(&(new_smi->si_lock));
2583         spin_lock_init(&(new_smi->msg_lock));
2584         spin_lock_init(&(new_smi->count_lock));
2585
2586         /* Do low-level detection first. */
2587         if (new_smi->handlers->detect(new_smi->si_sm)) {
2588                 if (new_smi->addr_source)
2589                         printk(KERN_INFO "ipmi_si: Interface detection"
2590                                " failed\n");
2591                 rv = -ENODEV;
2592                 goto out_err;
2593         }
2594
2595         /* Attempt a get device id command.  If it fails, we probably
2596            don't have a BMC here. */
2597         rv = try_get_dev_id(new_smi);
2598         if (rv) {
2599                 if (new_smi->addr_source)
2600                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2601                                " at this location\n");
2602                 goto out_err;
2603         }
2604
2605         setup_oem_data_handler(new_smi);
2606         setup_xaction_handlers(new_smi);
2607
2608         /* Try to claim any interrupts. */
2609         if (new_smi->irq_setup)
2610                 new_smi->irq_setup(new_smi);
2611
2612         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2613         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2614         new_smi->curr_msg = NULL;
2615         atomic_set(&new_smi->req_events, 0);
2616         new_smi->run_to_completion = 0;
2617
2618         new_smi->interrupt_disabled = 0;
2619         atomic_set(&new_smi->stop_operation, 0);
2620         new_smi->intf_num = smi_num;
2621         smi_num++;
2622
2623         /* Start clearing the flags before we enable interrupts or the
2624            timer to avoid racing with the timer. */
2625         start_clear_flags(new_smi);
2626         /* IRQ is defined to be set when non-zero. */
2627         if (new_smi->irq)
2628                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2629
2630         if (!new_smi->dev) {
2631                 /* If we don't already have a device from something
2632                  * else (like PCI), then register a new one. */
2633                 new_smi->pdev = platform_device_alloc("ipmi_si",
2634                                                       new_smi->intf_num);
2635                 if (rv) {
2636                         printk(KERN_ERR
2637                                "ipmi_si_intf:"
2638                                " Unable to allocate platform device\n");
2639                         goto out_err;
2640                 }
2641                 new_smi->dev = &new_smi->pdev->dev;
2642                 new_smi->dev->driver = &ipmi_driver;
2643
2644                 rv = platform_device_add(new_smi->pdev);
2645                 if (rv) {
2646                         printk(KERN_ERR
2647                                "ipmi_si_intf:"
2648                                " Unable to register system interface device:"
2649                                " %d\n",
2650                                rv);
2651                         goto out_err;
2652                 }
2653                 new_smi->dev_registered = 1;
2654         }
2655
2656         rv = ipmi_register_smi(&handlers,
2657                                new_smi,
2658                                &new_smi->device_id,
2659                                new_smi->dev,
2660                                "bmc",
2661                                new_smi->slave_addr);
2662         if (rv) {
2663                 printk(KERN_ERR
2664                        "ipmi_si: Unable to register device: error %d\n",
2665                        rv);
2666                 goto out_err_stop_timer;
2667         }
2668
2669         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2670                                      type_file_read_proc, NULL,
2671                                      new_smi, THIS_MODULE);
2672         if (rv) {
2673                 printk(KERN_ERR
2674                        "ipmi_si: Unable to create proc entry: %d\n",
2675                        rv);
2676                 goto out_err_stop_timer;
2677         }
2678
2679         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2680                                      stat_file_read_proc, NULL,
2681                                      new_smi, THIS_MODULE);
2682         if (rv) {
2683                 printk(KERN_ERR
2684                        "ipmi_si: Unable to create proc entry: %d\n",
2685                        rv);
2686                 goto out_err_stop_timer;
2687         }
2688
2689         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2690                                      param_read_proc, NULL,
2691                                      new_smi, THIS_MODULE);
2692         if (rv) {
2693                 printk(KERN_ERR
2694                        "ipmi_si: Unable to create proc entry: %d\n",
2695                        rv);
2696                 goto out_err_stop_timer;
2697         }
2698
2699         list_add_tail(&new_smi->link, &smi_infos);
2700
2701         mutex_unlock(&smi_infos_lock);
2702
2703         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2704
2705         return 0;
2706
2707  out_err_stop_timer:
2708         atomic_inc(&new_smi->stop_operation);
2709         wait_for_timer_and_thread(new_smi);
2710
2711  out_err:
2712         if (new_smi->intf)
2713                 ipmi_unregister_smi(new_smi->intf);
2714
2715         if (new_smi->irq_cleanup)
2716                 new_smi->irq_cleanup(new_smi);
2717
2718         /* Wait until we know that we are out of any interrupt
2719            handlers might have been running before we freed the
2720            interrupt. */
2721         synchronize_sched();
2722
2723         if (new_smi->si_sm) {
2724                 if (new_smi->handlers)
2725                         new_smi->handlers->cleanup(new_smi->si_sm);
2726                 kfree(new_smi->si_sm);
2727         }
2728         if (new_smi->addr_source_cleanup)
2729                 new_smi->addr_source_cleanup(new_smi);
2730         if (new_smi->io_cleanup)
2731                 new_smi->io_cleanup(new_smi);
2732
2733         if (new_smi->dev_registered)
2734                 platform_device_unregister(new_smi->pdev);
2735
2736         kfree(new_smi);
2737
2738         mutex_unlock(&smi_infos_lock);
2739
2740         return rv;
2741 }
2742
2743 static __devinit int init_ipmi_si(void)
2744 {
2745         int  i;
2746         char *str;
2747         int  rv;
2748
2749         if (initialized)
2750                 return 0;
2751         initialized = 1;
2752
2753         /* Register the device drivers. */
2754         rv = driver_register(&ipmi_driver);
2755         if (rv) {
2756                 printk(KERN_ERR
2757                        "init_ipmi_si: Unable to register driver: %d\n",
2758                        rv);
2759                 return rv;
2760         }
2761
2762
2763         /* Parse out the si_type string into its components. */
2764         str = si_type_str;
2765         if (*str != '\0') {
2766                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2767                         si_type[i] = str;
2768                         str = strchr(str, ',');
2769                         if (str) {
2770                                 *str = '\0';
2771                                 str++;
2772                         } else {
2773                                 break;
2774                         }
2775                 }
2776         }
2777
2778         printk(KERN_INFO "IPMI System Interface driver.\n");
2779
2780         hardcode_find_bmc();
2781
2782 #ifdef CONFIG_DMI
2783         dmi_find_bmc();
2784 #endif
2785
2786 #ifdef CONFIG_ACPI
2787         acpi_find_bmc();
2788 #endif
2789
2790 #ifdef CONFIG_PCI
2791         rv = pci_register_driver(&ipmi_pci_driver);
2792         if (rv){
2793                 printk(KERN_ERR
2794                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2795                        rv);
2796         }
2797 #endif
2798
2799         if (si_trydefaults) {
2800                 mutex_lock(&smi_infos_lock);
2801                 if (list_empty(&smi_infos)) {
2802                         /* No BMC was found, try defaults. */
2803                         mutex_unlock(&smi_infos_lock);
2804                         default_find_bmc();
2805                 } else {
2806                         mutex_unlock(&smi_infos_lock);
2807                 }
2808         }
2809
2810         mutex_lock(&smi_infos_lock);
2811         if (unload_when_empty && list_empty(&smi_infos)) {
2812                 mutex_unlock(&smi_infos_lock);
2813 #ifdef CONFIG_PCI
2814                 pci_unregister_driver(&ipmi_pci_driver);
2815 #endif
2816                 driver_unregister(&ipmi_driver);
2817                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2818                 return -ENODEV;
2819         } else {
2820                 mutex_unlock(&smi_infos_lock);
2821                 return 0;
2822         }
2823 }
2824 module_init(init_ipmi_si);
2825
2826 static void cleanup_one_si(struct smi_info *to_clean)
2827 {
2828         int           rv;
2829         unsigned long flags;
2830
2831         if (!to_clean)
2832                 return;
2833
2834         list_del(&to_clean->link);
2835
2836         /* Tell the timer and interrupt handlers that we are shutting
2837            down. */
2838         spin_lock_irqsave(&(to_clean->si_lock), flags);
2839         spin_lock(&(to_clean->msg_lock));
2840
2841         atomic_inc(&to_clean->stop_operation);
2842
2843         if (to_clean->irq_cleanup)
2844                 to_clean->irq_cleanup(to_clean);
2845
2846         spin_unlock(&(to_clean->msg_lock));
2847         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2848
2849         /* Wait until we know that we are out of any interrupt
2850            handlers might have been running before we freed the
2851            interrupt. */
2852         synchronize_sched();
2853
2854         wait_for_timer_and_thread(to_clean);
2855
2856         /* Interrupts and timeouts are stopped, now make sure the
2857            interface is in a clean state. */
2858         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2859                 poll(to_clean);
2860                 schedule_timeout_uninterruptible(1);
2861         }
2862
2863         rv = ipmi_unregister_smi(to_clean->intf);
2864         if (rv) {
2865                 printk(KERN_ERR
2866                        "ipmi_si: Unable to unregister device: errno=%d\n",
2867                        rv);
2868         }
2869
2870         to_clean->handlers->cleanup(to_clean->si_sm);
2871
2872         kfree(to_clean->si_sm);
2873
2874         if (to_clean->addr_source_cleanup)
2875                 to_clean->addr_source_cleanup(to_clean);
2876         if (to_clean->io_cleanup)
2877                 to_clean->io_cleanup(to_clean);
2878
2879         if (to_clean->dev_registered)
2880                 platform_device_unregister(to_clean->pdev);
2881
2882         kfree(to_clean);
2883 }
2884
2885 static __exit void cleanup_ipmi_si(void)
2886 {
2887         struct smi_info *e, *tmp_e;
2888
2889         if (!initialized)
2890                 return;
2891
2892 #ifdef CONFIG_PCI
2893         pci_unregister_driver(&ipmi_pci_driver);
2894 #endif
2895
2896         mutex_lock(&smi_infos_lock);
2897         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2898                 cleanup_one_si(e);
2899         mutex_unlock(&smi_infos_lock);
2900
2901         driver_unregister(&ipmi_driver);
2902 }
2903 module_exit(cleanup_ipmi_si);
2904
2905 MODULE_LICENSE("GPL");
2906 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2907 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");