Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/usb-2.6
[linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <asm/system.h>
44 #include <linux/sched.h>
45 #include <linux/timer.h>
46 #include <linux/errno.h>
47 #include <linux/spinlock.h>
48 #include <linux/slab.h>
49 #include <linux/delay.h>
50 #include <linux/list.h>
51 #include <linux/pci.h>
52 #include <linux/ioport.h>
53 #include <linux/notifier.h>
54 #include <linux/mutex.h>
55 #include <linux/kthread.h>
56 #include <asm/irq.h>
57 #include <linux/interrupt.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ipmi_smi.h>
60 #include <asm/io.h>
61 #include "ipmi_si_sm.h"
62 #include <linux/init.h>
63 #include <linux/dmi.h>
64
65 /* Measure times between events in the driver. */
66 #undef DEBUG_TIMING
67
68 /* Call every 10 ms. */
69 #define SI_TIMEOUT_TIME_USEC    10000
70 #define SI_USEC_PER_JIFFY       (1000000/HZ)
71 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
72 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
73                                        short timeout */
74
75 enum si_intf_state {
76         SI_NORMAL,
77         SI_GETTING_FLAGS,
78         SI_GETTING_EVENTS,
79         SI_CLEARING_FLAGS,
80         SI_CLEARING_FLAGS_THEN_SET_IRQ,
81         SI_GETTING_MESSAGES,
82         SI_ENABLE_INTERRUPTS1,
83         SI_ENABLE_INTERRUPTS2
84         /* FIXME - add watchdog stuff. */
85 };
86
87 /* Some BT-specific defines we need here. */
88 #define IPMI_BT_INTMASK_REG             2
89 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
90 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
91
92 enum si_type {
93     SI_KCS, SI_SMIC, SI_BT
94 };
95 static char *si_to_str[] = { "KCS", "SMIC", "BT" };
96
97 #define DEVICE_NAME "ipmi_si"
98
99 static struct device_driver ipmi_driver =
100 {
101         .name = DEVICE_NAME,
102         .bus = &platform_bus_type
103 };
104
105 struct smi_info
106 {
107         int                    intf_num;
108         ipmi_smi_t             intf;
109         struct si_sm_data      *si_sm;
110         struct si_sm_handlers  *handlers;
111         enum si_type           si_type;
112         spinlock_t             si_lock;
113         spinlock_t             msg_lock;
114         struct list_head       xmit_msgs;
115         struct list_head       hp_xmit_msgs;
116         struct ipmi_smi_msg    *curr_msg;
117         enum si_intf_state     si_state;
118
119         /* Used to handle the various types of I/O that can occur with
120            IPMI */
121         struct si_sm_io io;
122         int (*io_setup)(struct smi_info *info);
123         void (*io_cleanup)(struct smi_info *info);
124         int (*irq_setup)(struct smi_info *info);
125         void (*irq_cleanup)(struct smi_info *info);
126         unsigned int io_size;
127         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
128         void (*addr_source_cleanup)(struct smi_info *info);
129         void *addr_source_data;
130
131         /* Per-OEM handler, called from handle_flags().
132            Returns 1 when handle_flags() needs to be re-run
133            or 0 indicating it set si_state itself.
134         */
135         int (*oem_data_avail_handler)(struct smi_info *smi_info);
136
137         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
138            is set to hold the flags until we are done handling everything
139            from the flags. */
140 #define RECEIVE_MSG_AVAIL       0x01
141 #define EVENT_MSG_BUFFER_FULL   0x02
142 #define WDT_PRE_TIMEOUT_INT     0x08
143 #define OEM0_DATA_AVAIL     0x20
144 #define OEM1_DATA_AVAIL     0x40
145 #define OEM2_DATA_AVAIL     0x80
146 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
147                              OEM1_DATA_AVAIL | \
148                              OEM2_DATA_AVAIL)
149         unsigned char       msg_flags;
150
151         /* If set to true, this will request events the next time the
152            state machine is idle. */
153         atomic_t            req_events;
154
155         /* If true, run the state machine to completion on every send
156            call.  Generally used after a panic to make sure stuff goes
157            out. */
158         int                 run_to_completion;
159
160         /* The I/O port of an SI interface. */
161         int                 port;
162
163         /* The space between start addresses of the two ports.  For
164            instance, if the first port is 0xca2 and the spacing is 4, then
165            the second port is 0xca6. */
166         unsigned int        spacing;
167
168         /* zero if no irq; */
169         int                 irq;
170
171         /* The timer for this si. */
172         struct timer_list   si_timer;
173
174         /* The time (in jiffies) the last timeout occurred at. */
175         unsigned long       last_timeout_jiffies;
176
177         /* Used to gracefully stop the timer without race conditions. */
178         atomic_t            stop_operation;
179
180         /* The driver will disable interrupts when it gets into a
181            situation where it cannot handle messages due to lack of
182            memory.  Once that situation clears up, it will re-enable
183            interrupts. */
184         int interrupt_disabled;
185
186         /* From the get device id response... */
187         struct ipmi_device_id device_id;
188
189         /* Driver model stuff. */
190         struct device *dev;
191         struct platform_device *pdev;
192
193          /* True if we allocated the device, false if it came from
194           * someplace else (like PCI). */
195         int dev_registered;
196
197         /* Slave address, could be reported from DMI. */
198         unsigned char slave_addr;
199
200         /* Counters and things for the proc filesystem. */
201         spinlock_t count_lock;
202         unsigned long short_timeouts;
203         unsigned long long_timeouts;
204         unsigned long timeout_restarts;
205         unsigned long idles;
206         unsigned long interrupts;
207         unsigned long attentions;
208         unsigned long flag_fetches;
209         unsigned long hosed_count;
210         unsigned long complete_transactions;
211         unsigned long events;
212         unsigned long watchdog_pretimeouts;
213         unsigned long incoming_messages;
214
215         struct task_struct *thread;
216
217         struct list_head link;
218 };
219
220 #define SI_MAX_PARMS 4
221
222 static int force_kipmid[SI_MAX_PARMS];
223 static int num_force_kipmid;
224
225 static int try_smi_init(struct smi_info *smi);
226
227 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
228 static int register_xaction_notifier(struct notifier_block * nb)
229 {
230         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
231 }
232
233 static void deliver_recv_msg(struct smi_info *smi_info,
234                              struct ipmi_smi_msg *msg)
235 {
236         /* Deliver the message to the upper layer with the lock
237            released. */
238         spin_unlock(&(smi_info->si_lock));
239         ipmi_smi_msg_received(smi_info->intf, msg);
240         spin_lock(&(smi_info->si_lock));
241 }
242
243 static void return_hosed_msg(struct smi_info *smi_info)
244 {
245         struct ipmi_smi_msg *msg = smi_info->curr_msg;
246
247         /* Make it a reponse */
248         msg->rsp[0] = msg->data[0] | 4;
249         msg->rsp[1] = msg->data[1];
250         msg->rsp[2] = 0xFF; /* Unknown error. */
251         msg->rsp_size = 3;
252
253         smi_info->curr_msg = NULL;
254         deliver_recv_msg(smi_info, msg);
255 }
256
257 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
258 {
259         int              rv;
260         struct list_head *entry = NULL;
261 #ifdef DEBUG_TIMING
262         struct timeval t;
263 #endif
264
265         /* No need to save flags, we aleady have interrupts off and we
266            already hold the SMI lock. */
267         spin_lock(&(smi_info->msg_lock));
268
269         /* Pick the high priority queue first. */
270         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
271                 entry = smi_info->hp_xmit_msgs.next;
272         } else if (!list_empty(&(smi_info->xmit_msgs))) {
273                 entry = smi_info->xmit_msgs.next;
274         }
275
276         if (!entry) {
277                 smi_info->curr_msg = NULL;
278                 rv = SI_SM_IDLE;
279         } else {
280                 int err;
281
282                 list_del(entry);
283                 smi_info->curr_msg = list_entry(entry,
284                                                 struct ipmi_smi_msg,
285                                                 link);
286 #ifdef DEBUG_TIMING
287                 do_gettimeofday(&t);
288                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
289 #endif
290                 err = atomic_notifier_call_chain(&xaction_notifier_list,
291                                 0, smi_info);
292                 if (err & NOTIFY_STOP_MASK) {
293                         rv = SI_SM_CALL_WITHOUT_DELAY;
294                         goto out;
295                 }
296                 err = smi_info->handlers->start_transaction(
297                         smi_info->si_sm,
298                         smi_info->curr_msg->data,
299                         smi_info->curr_msg->data_size);
300                 if (err) {
301                         return_hosed_msg(smi_info);
302                 }
303
304                 rv = SI_SM_CALL_WITHOUT_DELAY;
305         }
306         out:
307         spin_unlock(&(smi_info->msg_lock));
308
309         return rv;
310 }
311
312 static void start_enable_irq(struct smi_info *smi_info)
313 {
314         unsigned char msg[2];
315
316         /* If we are enabling interrupts, we have to tell the
317            BMC to use them. */
318         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
319         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
320
321         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
322         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
323 }
324
325 static void start_clear_flags(struct smi_info *smi_info)
326 {
327         unsigned char msg[3];
328
329         /* Make sure the watchdog pre-timeout flag is not set at startup. */
330         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
331         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
332         msg[2] = WDT_PRE_TIMEOUT_INT;
333
334         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
335         smi_info->si_state = SI_CLEARING_FLAGS;
336 }
337
338 /* When we have a situtaion where we run out of memory and cannot
339    allocate messages, we just leave them in the BMC and run the system
340    polled until we can allocate some memory.  Once we have some
341    memory, we will re-enable the interrupt. */
342 static inline void disable_si_irq(struct smi_info *smi_info)
343 {
344         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
345                 disable_irq_nosync(smi_info->irq);
346                 smi_info->interrupt_disabled = 1;
347         }
348 }
349
350 static inline void enable_si_irq(struct smi_info *smi_info)
351 {
352         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
353                 enable_irq(smi_info->irq);
354                 smi_info->interrupt_disabled = 0;
355         }
356 }
357
358 static void handle_flags(struct smi_info *smi_info)
359 {
360  retry:
361         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
362                 /* Watchdog pre-timeout */
363                 spin_lock(&smi_info->count_lock);
364                 smi_info->watchdog_pretimeouts++;
365                 spin_unlock(&smi_info->count_lock);
366
367                 start_clear_flags(smi_info);
368                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
369                 spin_unlock(&(smi_info->si_lock));
370                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
371                 spin_lock(&(smi_info->si_lock));
372         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
373                 /* Messages available. */
374                 smi_info->curr_msg = ipmi_alloc_smi_msg();
375                 if (!smi_info->curr_msg) {
376                         disable_si_irq(smi_info);
377                         smi_info->si_state = SI_NORMAL;
378                         return;
379                 }
380                 enable_si_irq(smi_info);
381
382                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
383                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
384                 smi_info->curr_msg->data_size = 2;
385
386                 smi_info->handlers->start_transaction(
387                         smi_info->si_sm,
388                         smi_info->curr_msg->data,
389                         smi_info->curr_msg->data_size);
390                 smi_info->si_state = SI_GETTING_MESSAGES;
391         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
392                 /* Events available. */
393                 smi_info->curr_msg = ipmi_alloc_smi_msg();
394                 if (!smi_info->curr_msg) {
395                         disable_si_irq(smi_info);
396                         smi_info->si_state = SI_NORMAL;
397                         return;
398                 }
399                 enable_si_irq(smi_info);
400
401                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
402                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
403                 smi_info->curr_msg->data_size = 2;
404
405                 smi_info->handlers->start_transaction(
406                         smi_info->si_sm,
407                         smi_info->curr_msg->data,
408                         smi_info->curr_msg->data_size);
409                 smi_info->si_state = SI_GETTING_EVENTS;
410         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
411                    smi_info->oem_data_avail_handler) {
412                 if (smi_info->oem_data_avail_handler(smi_info))
413                         goto retry;
414         } else {
415                 smi_info->si_state = SI_NORMAL;
416         }
417 }
418
419 static void handle_transaction_done(struct smi_info *smi_info)
420 {
421         struct ipmi_smi_msg *msg;
422 #ifdef DEBUG_TIMING
423         struct timeval t;
424
425         do_gettimeofday(&t);
426         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
427 #endif
428         switch (smi_info->si_state) {
429         case SI_NORMAL:
430                 if (!smi_info->curr_msg)
431                         break;
432
433                 smi_info->curr_msg->rsp_size
434                         = smi_info->handlers->get_result(
435                                 smi_info->si_sm,
436                                 smi_info->curr_msg->rsp,
437                                 IPMI_MAX_MSG_LENGTH);
438
439                 /* Do this here becase deliver_recv_msg() releases the
440                    lock, and a new message can be put in during the
441                    time the lock is released. */
442                 msg = smi_info->curr_msg;
443                 smi_info->curr_msg = NULL;
444                 deliver_recv_msg(smi_info, msg);
445                 break;
446
447         case SI_GETTING_FLAGS:
448         {
449                 unsigned char msg[4];
450                 unsigned int  len;
451
452                 /* We got the flags from the SMI, now handle them. */
453                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
454                 if (msg[2] != 0) {
455                         /* Error fetching flags, just give up for
456                            now. */
457                         smi_info->si_state = SI_NORMAL;
458                 } else if (len < 4) {
459                         /* Hmm, no flags.  That's technically illegal, but
460                            don't use uninitialized data. */
461                         smi_info->si_state = SI_NORMAL;
462                 } else {
463                         smi_info->msg_flags = msg[3];
464                         handle_flags(smi_info);
465                 }
466                 break;
467         }
468
469         case SI_CLEARING_FLAGS:
470         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
471         {
472                 unsigned char msg[3];
473
474                 /* We cleared the flags. */
475                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
476                 if (msg[2] != 0) {
477                         /* Error clearing flags */
478                         printk(KERN_WARNING
479                                "ipmi_si: Error clearing flags: %2.2x\n",
480                                msg[2]);
481                 }
482                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
483                         start_enable_irq(smi_info);
484                 else
485                         smi_info->si_state = SI_NORMAL;
486                 break;
487         }
488
489         case SI_GETTING_EVENTS:
490         {
491                 smi_info->curr_msg->rsp_size
492                         = smi_info->handlers->get_result(
493                                 smi_info->si_sm,
494                                 smi_info->curr_msg->rsp,
495                                 IPMI_MAX_MSG_LENGTH);
496
497                 /* Do this here becase deliver_recv_msg() releases the
498                    lock, and a new message can be put in during the
499                    time the lock is released. */
500                 msg = smi_info->curr_msg;
501                 smi_info->curr_msg = NULL;
502                 if (msg->rsp[2] != 0) {
503                         /* Error getting event, probably done. */
504                         msg->done(msg);
505
506                         /* Take off the event flag. */
507                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
508                         handle_flags(smi_info);
509                 } else {
510                         spin_lock(&smi_info->count_lock);
511                         smi_info->events++;
512                         spin_unlock(&smi_info->count_lock);
513
514                         /* Do this before we deliver the message
515                            because delivering the message releases the
516                            lock and something else can mess with the
517                            state. */
518                         handle_flags(smi_info);
519
520                         deliver_recv_msg(smi_info, msg);
521                 }
522                 break;
523         }
524
525         case SI_GETTING_MESSAGES:
526         {
527                 smi_info->curr_msg->rsp_size
528                         = smi_info->handlers->get_result(
529                                 smi_info->si_sm,
530                                 smi_info->curr_msg->rsp,
531                                 IPMI_MAX_MSG_LENGTH);
532
533                 /* Do this here becase deliver_recv_msg() releases the
534                    lock, and a new message can be put in during the
535                    time the lock is released. */
536                 msg = smi_info->curr_msg;
537                 smi_info->curr_msg = NULL;
538                 if (msg->rsp[2] != 0) {
539                         /* Error getting event, probably done. */
540                         msg->done(msg);
541
542                         /* Take off the msg flag. */
543                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
544                         handle_flags(smi_info);
545                 } else {
546                         spin_lock(&smi_info->count_lock);
547                         smi_info->incoming_messages++;
548                         spin_unlock(&smi_info->count_lock);
549
550                         /* Do this before we deliver the message
551                            because delivering the message releases the
552                            lock and something else can mess with the
553                            state. */
554                         handle_flags(smi_info);
555
556                         deliver_recv_msg(smi_info, msg);
557                 }
558                 break;
559         }
560
561         case SI_ENABLE_INTERRUPTS1:
562         {
563                 unsigned char msg[4];
564
565                 /* We got the flags from the SMI, now handle them. */
566                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
567                 if (msg[2] != 0) {
568                         printk(KERN_WARNING
569                                "ipmi_si: Could not enable interrupts"
570                                ", failed get, using polled mode.\n");
571                         smi_info->si_state = SI_NORMAL;
572                 } else {
573                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
574                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
575                         msg[2] = msg[3] | 1; /* enable msg queue int */
576                         smi_info->handlers->start_transaction(
577                                 smi_info->si_sm, msg, 3);
578                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
579                 }
580                 break;
581         }
582
583         case SI_ENABLE_INTERRUPTS2:
584         {
585                 unsigned char msg[4];
586
587                 /* We got the flags from the SMI, now handle them. */
588                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
589                 if (msg[2] != 0) {
590                         printk(KERN_WARNING
591                                "ipmi_si: Could not enable interrupts"
592                                ", failed set, using polled mode.\n");
593                 }
594                 smi_info->si_state = SI_NORMAL;
595                 break;
596         }
597         }
598 }
599
600 /* Called on timeouts and events.  Timeouts should pass the elapsed
601    time, interrupts should pass in zero. */
602 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
603                                            int time)
604 {
605         enum si_sm_result si_sm_result;
606
607  restart:
608         /* There used to be a loop here that waited a little while
609            (around 25us) before giving up.  That turned out to be
610            pointless, the minimum delays I was seeing were in the 300us
611            range, which is far too long to wait in an interrupt.  So
612            we just run until the state machine tells us something
613            happened or it needs a delay. */
614         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
615         time = 0;
616         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
617         {
618                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
619         }
620
621         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
622         {
623                 spin_lock(&smi_info->count_lock);
624                 smi_info->complete_transactions++;
625                 spin_unlock(&smi_info->count_lock);
626
627                 handle_transaction_done(smi_info);
628                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
629         }
630         else if (si_sm_result == SI_SM_HOSED)
631         {
632                 spin_lock(&smi_info->count_lock);
633                 smi_info->hosed_count++;
634                 spin_unlock(&smi_info->count_lock);
635
636                 /* Do the before return_hosed_msg, because that
637                    releases the lock. */
638                 smi_info->si_state = SI_NORMAL;
639                 if (smi_info->curr_msg != NULL) {
640                         /* If we were handling a user message, format
641                            a response to send to the upper layer to
642                            tell it about the error. */
643                         return_hosed_msg(smi_info);
644                 }
645                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
646         }
647
648         /* We prefer handling attn over new messages. */
649         if (si_sm_result == SI_SM_ATTN)
650         {
651                 unsigned char msg[2];
652
653                 spin_lock(&smi_info->count_lock);
654                 smi_info->attentions++;
655                 spin_unlock(&smi_info->count_lock);
656
657                 /* Got a attn, send down a get message flags to see
658                    what's causing it.  It would be better to handle
659                    this in the upper layer, but due to the way
660                    interrupts work with the SMI, that's not really
661                    possible. */
662                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
663                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
664
665                 smi_info->handlers->start_transaction(
666                         smi_info->si_sm, msg, 2);
667                 smi_info->si_state = SI_GETTING_FLAGS;
668                 goto restart;
669         }
670
671         /* If we are currently idle, try to start the next message. */
672         if (si_sm_result == SI_SM_IDLE) {
673                 spin_lock(&smi_info->count_lock);
674                 smi_info->idles++;
675                 spin_unlock(&smi_info->count_lock);
676
677                 si_sm_result = start_next_msg(smi_info);
678                 if (si_sm_result != SI_SM_IDLE)
679                         goto restart;
680         }
681
682         if ((si_sm_result == SI_SM_IDLE)
683             && (atomic_read(&smi_info->req_events)))
684         {
685                 /* We are idle and the upper layer requested that I fetch
686                    events, so do so. */
687                 unsigned char msg[2];
688
689                 spin_lock(&smi_info->count_lock);
690                 smi_info->flag_fetches++;
691                 spin_unlock(&smi_info->count_lock);
692
693                 atomic_set(&smi_info->req_events, 0);
694                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
695                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
696
697                 smi_info->handlers->start_transaction(
698                         smi_info->si_sm, msg, 2);
699                 smi_info->si_state = SI_GETTING_FLAGS;
700                 goto restart;
701         }
702
703         return si_sm_result;
704 }
705
706 static void sender(void                *send_info,
707                    struct ipmi_smi_msg *msg,
708                    int                 priority)
709 {
710         struct smi_info   *smi_info = send_info;
711         enum si_sm_result result;
712         unsigned long     flags;
713 #ifdef DEBUG_TIMING
714         struct timeval    t;
715 #endif
716
717         spin_lock_irqsave(&(smi_info->msg_lock), flags);
718 #ifdef DEBUG_TIMING
719         do_gettimeofday(&t);
720         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
721 #endif
722
723         if (smi_info->run_to_completion) {
724                 /* If we are running to completion, then throw it in
725                    the list and run transactions until everything is
726                    clear.  Priority doesn't matter here. */
727                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
728
729                 /* We have to release the msg lock and claim the smi
730                    lock in this case, because of race conditions. */
731                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
732
733                 spin_lock_irqsave(&(smi_info->si_lock), flags);
734                 result = smi_event_handler(smi_info, 0);
735                 while (result != SI_SM_IDLE) {
736                         udelay(SI_SHORT_TIMEOUT_USEC);
737                         result = smi_event_handler(smi_info,
738                                                    SI_SHORT_TIMEOUT_USEC);
739                 }
740                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
741                 return;
742         } else {
743                 if (priority > 0) {
744                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
745                 } else {
746                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
747                 }
748         }
749         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
750
751         spin_lock_irqsave(&(smi_info->si_lock), flags);
752         if ((smi_info->si_state == SI_NORMAL)
753             && (smi_info->curr_msg == NULL))
754         {
755                 start_next_msg(smi_info);
756         }
757         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
758 }
759
760 static void set_run_to_completion(void *send_info, int i_run_to_completion)
761 {
762         struct smi_info   *smi_info = send_info;
763         enum si_sm_result result;
764         unsigned long     flags;
765
766         spin_lock_irqsave(&(smi_info->si_lock), flags);
767
768         smi_info->run_to_completion = i_run_to_completion;
769         if (i_run_to_completion) {
770                 result = smi_event_handler(smi_info, 0);
771                 while (result != SI_SM_IDLE) {
772                         udelay(SI_SHORT_TIMEOUT_USEC);
773                         result = smi_event_handler(smi_info,
774                                                    SI_SHORT_TIMEOUT_USEC);
775                 }
776         }
777
778         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
779 }
780
781 static int ipmi_thread(void *data)
782 {
783         struct smi_info *smi_info = data;
784         unsigned long flags;
785         enum si_sm_result smi_result;
786
787         set_user_nice(current, 19);
788         while (!kthread_should_stop()) {
789                 spin_lock_irqsave(&(smi_info->si_lock), flags);
790                 smi_result = smi_event_handler(smi_info, 0);
791                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
792                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
793                         /* do nothing */
794                 }
795                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
796                         schedule();
797                 else
798                         schedule_timeout_interruptible(1);
799         }
800         return 0;
801 }
802
803
804 static void poll(void *send_info)
805 {
806         struct smi_info *smi_info = send_info;
807
808         smi_event_handler(smi_info, 0);
809 }
810
811 static void request_events(void *send_info)
812 {
813         struct smi_info *smi_info = send_info;
814
815         atomic_set(&smi_info->req_events, 1);
816 }
817
818 static int initialized = 0;
819
820 static void smi_timeout(unsigned long data)
821 {
822         struct smi_info   *smi_info = (struct smi_info *) data;
823         enum si_sm_result smi_result;
824         unsigned long     flags;
825         unsigned long     jiffies_now;
826         long              time_diff;
827 #ifdef DEBUG_TIMING
828         struct timeval    t;
829 #endif
830
831         if (atomic_read(&smi_info->stop_operation))
832                 return;
833
834         spin_lock_irqsave(&(smi_info->si_lock), flags);
835 #ifdef DEBUG_TIMING
836         do_gettimeofday(&t);
837         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
838 #endif
839         jiffies_now = jiffies;
840         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
841                      * SI_USEC_PER_JIFFY);
842         smi_result = smi_event_handler(smi_info, time_diff);
843
844         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
845
846         smi_info->last_timeout_jiffies = jiffies_now;
847
848         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
849                 /* Running with interrupts, only do long timeouts. */
850                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
851                 spin_lock_irqsave(&smi_info->count_lock, flags);
852                 smi_info->long_timeouts++;
853                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
854                 goto do_add_timer;
855         }
856
857         /* If the state machine asks for a short delay, then shorten
858            the timer timeout. */
859         if (smi_result == SI_SM_CALL_WITH_DELAY) {
860                 spin_lock_irqsave(&smi_info->count_lock, flags);
861                 smi_info->short_timeouts++;
862                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
863                 smi_info->si_timer.expires = jiffies + 1;
864         } else {
865                 spin_lock_irqsave(&smi_info->count_lock, flags);
866                 smi_info->long_timeouts++;
867                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
868                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
869         }
870
871  do_add_timer:
872         add_timer(&(smi_info->si_timer));
873 }
874
875 static irqreturn_t si_irq_handler(int irq, void *data)
876 {
877         struct smi_info *smi_info = data;
878         unsigned long   flags;
879 #ifdef DEBUG_TIMING
880         struct timeval  t;
881 #endif
882
883         spin_lock_irqsave(&(smi_info->si_lock), flags);
884
885         spin_lock(&smi_info->count_lock);
886         smi_info->interrupts++;
887         spin_unlock(&smi_info->count_lock);
888
889         if (atomic_read(&smi_info->stop_operation))
890                 goto out;
891
892 #ifdef DEBUG_TIMING
893         do_gettimeofday(&t);
894         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
895 #endif
896         smi_event_handler(smi_info, 0);
897  out:
898         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
899         return IRQ_HANDLED;
900 }
901
902 static irqreturn_t si_bt_irq_handler(int irq, void *data)
903 {
904         struct smi_info *smi_info = data;
905         /* We need to clear the IRQ flag for the BT interface. */
906         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
907                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
908                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
909         return si_irq_handler(irq, data);
910 }
911
912 static int smi_start_processing(void       *send_info,
913                                 ipmi_smi_t intf)
914 {
915         struct smi_info *new_smi = send_info;
916         int             enable = 0;
917
918         new_smi->intf = intf;
919
920         /* Set up the timer that drives the interface. */
921         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
922         new_smi->last_timeout_jiffies = jiffies;
923         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
924
925         /*
926          * Check if the user forcefully enabled the daemon.
927          */
928         if (new_smi->intf_num < num_force_kipmid)
929                 enable = force_kipmid[new_smi->intf_num];
930         /*
931          * The BT interface is efficient enough to not need a thread,
932          * and there is no need for a thread if we have interrupts.
933          */
934         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
935                 enable = 1;
936
937         if (enable) {
938                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
939                                               "kipmi%d", new_smi->intf_num);
940                 if (IS_ERR(new_smi->thread)) {
941                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
942                                " kernel thread due to error %ld, only using"
943                                " timers to drive the interface\n",
944                                PTR_ERR(new_smi->thread));
945                         new_smi->thread = NULL;
946                 }
947         }
948
949         return 0;
950 }
951
952 static struct ipmi_smi_handlers handlers =
953 {
954         .owner                  = THIS_MODULE,
955         .start_processing       = smi_start_processing,
956         .sender                 = sender,
957         .request_events         = request_events,
958         .set_run_to_completion  = set_run_to_completion,
959         .poll                   = poll,
960 };
961
962 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
963    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
964
965 static LIST_HEAD(smi_infos);
966 static DEFINE_MUTEX(smi_infos_lock);
967 static int smi_num; /* Used to sequence the SMIs */
968
969 #define DEFAULT_REGSPACING      1
970
971 static int           si_trydefaults = 1;
972 static char          *si_type[SI_MAX_PARMS];
973 #define MAX_SI_TYPE_STR 30
974 static char          si_type_str[MAX_SI_TYPE_STR];
975 static unsigned long addrs[SI_MAX_PARMS];
976 static int num_addrs;
977 static unsigned int  ports[SI_MAX_PARMS];
978 static int num_ports;
979 static int           irqs[SI_MAX_PARMS];
980 static int num_irqs;
981 static int           regspacings[SI_MAX_PARMS];
982 static int num_regspacings = 0;
983 static int           regsizes[SI_MAX_PARMS];
984 static int num_regsizes = 0;
985 static int           regshifts[SI_MAX_PARMS];
986 static int num_regshifts = 0;
987 static int slave_addrs[SI_MAX_PARMS];
988 static int num_slave_addrs = 0;
989
990
991 module_param_named(trydefaults, si_trydefaults, bool, 0);
992 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
993                  " default scan of the KCS and SMIC interface at the standard"
994                  " address");
995 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
996 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
997                  " interface separated by commas.  The types are 'kcs',"
998                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
999                  " the first interface to kcs and the second to bt");
1000 module_param_array(addrs, long, &num_addrs, 0);
1001 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1002                  " addresses separated by commas.  Only use if an interface"
1003                  " is in memory.  Otherwise, set it to zero or leave"
1004                  " it blank.");
1005 module_param_array(ports, int, &num_ports, 0);
1006 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1007                  " addresses separated by commas.  Only use if an interface"
1008                  " is a port.  Otherwise, set it to zero or leave"
1009                  " it blank.");
1010 module_param_array(irqs, int, &num_irqs, 0);
1011 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1012                  " addresses separated by commas.  Only use if an interface"
1013                  " has an interrupt.  Otherwise, set it to zero or leave"
1014                  " it blank.");
1015 module_param_array(regspacings, int, &num_regspacings, 0);
1016 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1017                  " and each successive register used by the interface.  For"
1018                  " instance, if the start address is 0xca2 and the spacing"
1019                  " is 2, then the second address is at 0xca4.  Defaults"
1020                  " to 1.");
1021 module_param_array(regsizes, int, &num_regsizes, 0);
1022 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1023                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1024                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1025                  " the 8-bit IPMI register has to be read from a larger"
1026                  " register.");
1027 module_param_array(regshifts, int, &num_regshifts, 0);
1028 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1029                  " IPMI register, in bits.  For instance, if the data"
1030                  " is read from a 32-bit word and the IPMI data is in"
1031                  " bit 8-15, then the shift would be 8");
1032 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1033 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1034                  " the controller.  Normally this is 0x20, but can be"
1035                  " overridden by this parm.  This is an array indexed"
1036                  " by interface number.");
1037 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1038 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1039                  " disabled(0).  Normally the IPMI driver auto-detects"
1040                  " this, but the value may be overridden by this parm.");
1041
1042
1043 #define IPMI_IO_ADDR_SPACE  0
1044 #define IPMI_MEM_ADDR_SPACE 1
1045 static char *addr_space_to_str[] = { "I/O", "memory" };
1046
1047 static void std_irq_cleanup(struct smi_info *info)
1048 {
1049         if (info->si_type == SI_BT)
1050                 /* Disable the interrupt in the BT interface. */
1051                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1052         free_irq(info->irq, info);
1053 }
1054
1055 static int std_irq_setup(struct smi_info *info)
1056 {
1057         int rv;
1058
1059         if (!info->irq)
1060                 return 0;
1061
1062         if (info->si_type == SI_BT) {
1063                 rv = request_irq(info->irq,
1064                                  si_bt_irq_handler,
1065                                  IRQF_DISABLED,
1066                                  DEVICE_NAME,
1067                                  info);
1068                 if (!rv)
1069                         /* Enable the interrupt in the BT interface. */
1070                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1071                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1072         } else
1073                 rv = request_irq(info->irq,
1074                                  si_irq_handler,
1075                                  IRQF_DISABLED,
1076                                  DEVICE_NAME,
1077                                  info);
1078         if (rv) {
1079                 printk(KERN_WARNING
1080                        "ipmi_si: %s unable to claim interrupt %d,"
1081                        " running polled\n",
1082                        DEVICE_NAME, info->irq);
1083                 info->irq = 0;
1084         } else {
1085                 info->irq_cleanup = std_irq_cleanup;
1086                 printk("  Using irq %d\n", info->irq);
1087         }
1088
1089         return rv;
1090 }
1091
1092 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1093 {
1094         unsigned int addr = io->addr_data;
1095
1096         return inb(addr + (offset * io->regspacing));
1097 }
1098
1099 static void port_outb(struct si_sm_io *io, unsigned int offset,
1100                       unsigned char b)
1101 {
1102         unsigned int addr = io->addr_data;
1103
1104         outb(b, addr + (offset * io->regspacing));
1105 }
1106
1107 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1108 {
1109         unsigned int addr = io->addr_data;
1110
1111         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1112 }
1113
1114 static void port_outw(struct si_sm_io *io, unsigned int offset,
1115                       unsigned char b)
1116 {
1117         unsigned int addr = io->addr_data;
1118
1119         outw(b << io->regshift, addr + (offset * io->regspacing));
1120 }
1121
1122 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1123 {
1124         unsigned int addr = io->addr_data;
1125
1126         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1127 }
1128
1129 static void port_outl(struct si_sm_io *io, unsigned int offset,
1130                       unsigned char b)
1131 {
1132         unsigned int addr = io->addr_data;
1133
1134         outl(b << io->regshift, addr+(offset * io->regspacing));
1135 }
1136
1137 static void port_cleanup(struct smi_info *info)
1138 {
1139         unsigned int addr = info->io.addr_data;
1140         int          idx;
1141
1142         if (addr) {
1143                 for (idx = 0; idx < info->io_size; idx++) {
1144                         release_region(addr + idx * info->io.regspacing,
1145                                        info->io.regsize);
1146                 }
1147         }
1148 }
1149
1150 static int port_setup(struct smi_info *info)
1151 {
1152         unsigned int addr = info->io.addr_data;
1153         int          idx;
1154
1155         if (!addr)
1156                 return -ENODEV;
1157
1158         info->io_cleanup = port_cleanup;
1159
1160         /* Figure out the actual inb/inw/inl/etc routine to use based
1161            upon the register size. */
1162         switch (info->io.regsize) {
1163         case 1:
1164                 info->io.inputb = port_inb;
1165                 info->io.outputb = port_outb;
1166                 break;
1167         case 2:
1168                 info->io.inputb = port_inw;
1169                 info->io.outputb = port_outw;
1170                 break;
1171         case 4:
1172                 info->io.inputb = port_inl;
1173                 info->io.outputb = port_outl;
1174                 break;
1175         default:
1176                 printk("ipmi_si: Invalid register size: %d\n",
1177                        info->io.regsize);
1178                 return -EINVAL;
1179         }
1180
1181         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1182          * tables.  This causes problems when trying to register the
1183          * entire I/O region.  Therefore we must register each I/O
1184          * port separately.
1185          */
1186         for (idx = 0; idx < info->io_size; idx++) {
1187                 if (request_region(addr + idx * info->io.regspacing,
1188                                    info->io.regsize, DEVICE_NAME) == NULL) {
1189                         /* Undo allocations */
1190                         while (idx--) {
1191                                 release_region(addr + idx * info->io.regspacing,
1192                                                info->io.regsize);
1193                         }
1194                         return -EIO;
1195                 }
1196         }
1197         return 0;
1198 }
1199
1200 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1201 {
1202         return readb((io->addr)+(offset * io->regspacing));
1203 }
1204
1205 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1206                      unsigned char b)
1207 {
1208         writeb(b, (io->addr)+(offset * io->regspacing));
1209 }
1210
1211 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1212 {
1213         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1214                 && 0xff;
1215 }
1216
1217 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1218                      unsigned char b)
1219 {
1220         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1221 }
1222
1223 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1224 {
1225         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1226                 && 0xff;
1227 }
1228
1229 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1230                      unsigned char b)
1231 {
1232         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1233 }
1234
1235 #ifdef readq
1236 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1237 {
1238         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1239                 && 0xff;
1240 }
1241
1242 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1243                      unsigned char b)
1244 {
1245         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1246 }
1247 #endif
1248
1249 static void mem_cleanup(struct smi_info *info)
1250 {
1251         unsigned long addr = info->io.addr_data;
1252         int           mapsize;
1253
1254         if (info->io.addr) {
1255                 iounmap(info->io.addr);
1256
1257                 mapsize = ((info->io_size * info->io.regspacing)
1258                            - (info->io.regspacing - info->io.regsize));
1259
1260                 release_mem_region(addr, mapsize);
1261         }
1262 }
1263
1264 static int mem_setup(struct smi_info *info)
1265 {
1266         unsigned long addr = info->io.addr_data;
1267         int           mapsize;
1268
1269         if (!addr)
1270                 return -ENODEV;
1271
1272         info->io_cleanup = mem_cleanup;
1273
1274         /* Figure out the actual readb/readw/readl/etc routine to use based
1275            upon the register size. */
1276         switch (info->io.regsize) {
1277         case 1:
1278                 info->io.inputb = intf_mem_inb;
1279                 info->io.outputb = intf_mem_outb;
1280                 break;
1281         case 2:
1282                 info->io.inputb = intf_mem_inw;
1283                 info->io.outputb = intf_mem_outw;
1284                 break;
1285         case 4:
1286                 info->io.inputb = intf_mem_inl;
1287                 info->io.outputb = intf_mem_outl;
1288                 break;
1289 #ifdef readq
1290         case 8:
1291                 info->io.inputb = mem_inq;
1292                 info->io.outputb = mem_outq;
1293                 break;
1294 #endif
1295         default:
1296                 printk("ipmi_si: Invalid register size: %d\n",
1297                        info->io.regsize);
1298                 return -EINVAL;
1299         }
1300
1301         /* Calculate the total amount of memory to claim.  This is an
1302          * unusual looking calculation, but it avoids claiming any
1303          * more memory than it has to.  It will claim everything
1304          * between the first address to the end of the last full
1305          * register. */
1306         mapsize = ((info->io_size * info->io.regspacing)
1307                    - (info->io.regspacing - info->io.regsize));
1308
1309         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1310                 return -EIO;
1311
1312         info->io.addr = ioremap(addr, mapsize);
1313         if (info->io.addr == NULL) {
1314                 release_mem_region(addr, mapsize);
1315                 return -EIO;
1316         }
1317         return 0;
1318 }
1319
1320
1321 static __devinit void hardcode_find_bmc(void)
1322 {
1323         int             i;
1324         struct smi_info *info;
1325
1326         for (i = 0; i < SI_MAX_PARMS; i++) {
1327                 if (!ports[i] && !addrs[i])
1328                         continue;
1329
1330                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1331                 if (!info)
1332                         return;
1333
1334                 info->addr_source = "hardcoded";
1335
1336                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1337                         info->si_type = SI_KCS;
1338                 } else if (strcmp(si_type[i], "smic") == 0) {
1339                         info->si_type = SI_SMIC;
1340                 } else if (strcmp(si_type[i], "bt") == 0) {
1341                         info->si_type = SI_BT;
1342                 } else {
1343                         printk(KERN_WARNING
1344                                "ipmi_si: Interface type specified "
1345                                "for interface %d, was invalid: %s\n",
1346                                i, si_type[i]);
1347                         kfree(info);
1348                         continue;
1349                 }
1350
1351                 if (ports[i]) {
1352                         /* An I/O port */
1353                         info->io_setup = port_setup;
1354                         info->io.addr_data = ports[i];
1355                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1356                 } else if (addrs[i]) {
1357                         /* A memory port */
1358                         info->io_setup = mem_setup;
1359                         info->io.addr_data = addrs[i];
1360                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1361                 } else {
1362                         printk(KERN_WARNING
1363                                "ipmi_si: Interface type specified "
1364                                "for interface %d, "
1365                                "but port and address were not set or "
1366                                "set to zero.\n", i);
1367                         kfree(info);
1368                         continue;
1369                 }
1370
1371                 info->io.addr = NULL;
1372                 info->io.regspacing = regspacings[i];
1373                 if (!info->io.regspacing)
1374                         info->io.regspacing = DEFAULT_REGSPACING;
1375                 info->io.regsize = regsizes[i];
1376                 if (!info->io.regsize)
1377                         info->io.regsize = DEFAULT_REGSPACING;
1378                 info->io.regshift = regshifts[i];
1379                 info->irq = irqs[i];
1380                 if (info->irq)
1381                         info->irq_setup = std_irq_setup;
1382
1383                 try_smi_init(info);
1384         }
1385 }
1386
1387 #ifdef CONFIG_ACPI
1388
1389 #include <linux/acpi.h>
1390
1391 /* Once we get an ACPI failure, we don't try any more, because we go
1392    through the tables sequentially.  Once we don't find a table, there
1393    are no more. */
1394 static int acpi_failure = 0;
1395
1396 /* For GPE-type interrupts. */
1397 static u32 ipmi_acpi_gpe(void *context)
1398 {
1399         struct smi_info *smi_info = context;
1400         unsigned long   flags;
1401 #ifdef DEBUG_TIMING
1402         struct timeval t;
1403 #endif
1404
1405         spin_lock_irqsave(&(smi_info->si_lock), flags);
1406
1407         spin_lock(&smi_info->count_lock);
1408         smi_info->interrupts++;
1409         spin_unlock(&smi_info->count_lock);
1410
1411         if (atomic_read(&smi_info->stop_operation))
1412                 goto out;
1413
1414 #ifdef DEBUG_TIMING
1415         do_gettimeofday(&t);
1416         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1417 #endif
1418         smi_event_handler(smi_info, 0);
1419  out:
1420         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1421
1422         return ACPI_INTERRUPT_HANDLED;
1423 }
1424
1425 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1426 {
1427         if (!info->irq)
1428                 return;
1429
1430         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1431 }
1432
1433 static int acpi_gpe_irq_setup(struct smi_info *info)
1434 {
1435         acpi_status status;
1436
1437         if (!info->irq)
1438                 return 0;
1439
1440         /* FIXME - is level triggered right? */
1441         status = acpi_install_gpe_handler(NULL,
1442                                           info->irq,
1443                                           ACPI_GPE_LEVEL_TRIGGERED,
1444                                           &ipmi_acpi_gpe,
1445                                           info);
1446         if (status != AE_OK) {
1447                 printk(KERN_WARNING
1448                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1449                        " running polled\n",
1450                        DEVICE_NAME, info->irq);
1451                 info->irq = 0;
1452                 return -EINVAL;
1453         } else {
1454                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1455                 printk("  Using ACPI GPE %d\n", info->irq);
1456                 return 0;
1457         }
1458 }
1459
1460 /*
1461  * Defined at
1462  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1463  */
1464 struct SPMITable {
1465         s8      Signature[4];
1466         u32     Length;
1467         u8      Revision;
1468         u8      Checksum;
1469         s8      OEMID[6];
1470         s8      OEMTableID[8];
1471         s8      OEMRevision[4];
1472         s8      CreatorID[4];
1473         s8      CreatorRevision[4];
1474         u8      InterfaceType;
1475         u8      IPMIlegacy;
1476         s16     SpecificationRevision;
1477
1478         /*
1479          * Bit 0 - SCI interrupt supported
1480          * Bit 1 - I/O APIC/SAPIC
1481          */
1482         u8      InterruptType;
1483
1484         /* If bit 0 of InterruptType is set, then this is the SCI
1485            interrupt in the GPEx_STS register. */
1486         u8      GPE;
1487
1488         s16     Reserved;
1489
1490         /* If bit 1 of InterruptType is set, then this is the I/O
1491            APIC/SAPIC interrupt. */
1492         u32     GlobalSystemInterrupt;
1493
1494         /* The actual register address. */
1495         struct acpi_generic_address addr;
1496
1497         u8      UID[4];
1498
1499         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1500 };
1501
1502 static __devinit int try_init_acpi(struct SPMITable *spmi)
1503 {
1504         struct smi_info  *info;
1505         char             *io_type;
1506         u8               addr_space;
1507
1508         if (spmi->IPMIlegacy != 1) {
1509             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1510             return -ENODEV;
1511         }
1512
1513         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1514                 addr_space = IPMI_MEM_ADDR_SPACE;
1515         else
1516                 addr_space = IPMI_IO_ADDR_SPACE;
1517
1518         info = kzalloc(sizeof(*info), GFP_KERNEL);
1519         if (!info) {
1520                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1521                 return -ENOMEM;
1522         }
1523
1524         info->addr_source = "ACPI";
1525
1526         /* Figure out the interface type. */
1527         switch (spmi->InterfaceType)
1528         {
1529         case 1: /* KCS */
1530                 info->si_type = SI_KCS;
1531                 break;
1532         case 2: /* SMIC */
1533                 info->si_type = SI_SMIC;
1534                 break;
1535         case 3: /* BT */
1536                 info->si_type = SI_BT;
1537                 break;
1538         default:
1539                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1540                         spmi->InterfaceType);
1541                 kfree(info);
1542                 return -EIO;
1543         }
1544
1545         if (spmi->InterruptType & 1) {
1546                 /* We've got a GPE interrupt. */
1547                 info->irq = spmi->GPE;
1548                 info->irq_setup = acpi_gpe_irq_setup;
1549         } else if (spmi->InterruptType & 2) {
1550                 /* We've got an APIC/SAPIC interrupt. */
1551                 info->irq = spmi->GlobalSystemInterrupt;
1552                 info->irq_setup = std_irq_setup;
1553         } else {
1554                 /* Use the default interrupt setting. */
1555                 info->irq = 0;
1556                 info->irq_setup = NULL;
1557         }
1558
1559         if (spmi->addr.register_bit_width) {
1560                 /* A (hopefully) properly formed register bit width. */
1561                 info->io.regspacing = spmi->addr.register_bit_width / 8;
1562         } else {
1563                 info->io.regspacing = DEFAULT_REGSPACING;
1564         }
1565         info->io.regsize = info->io.regspacing;
1566         info->io.regshift = spmi->addr.register_bit_offset;
1567
1568         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1569                 io_type = "memory";
1570                 info->io_setup = mem_setup;
1571                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1572         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1573                 io_type = "I/O";
1574                 info->io_setup = port_setup;
1575                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1576         } else {
1577                 kfree(info);
1578                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1579                 return -EIO;
1580         }
1581         info->io.addr_data = spmi->addr.address;
1582
1583         try_smi_init(info);
1584
1585         return 0;
1586 }
1587
1588 static __devinit void acpi_find_bmc(void)
1589 {
1590         acpi_status      status;
1591         struct SPMITable *spmi;
1592         int              i;
1593
1594         if (acpi_disabled)
1595                 return;
1596
1597         if (acpi_failure)
1598                 return;
1599
1600         for (i = 0; ; i++) {
1601                 status = acpi_get_firmware_table("SPMI", i+1,
1602                                                  ACPI_LOGICAL_ADDRESSING,
1603                                                  (struct acpi_table_header **)
1604                                                  &spmi);
1605                 if (status != AE_OK)
1606                         return;
1607
1608                 try_init_acpi(spmi);
1609         }
1610 }
1611 #endif
1612
1613 #ifdef CONFIG_DMI
1614 struct dmi_ipmi_data
1615 {
1616         u8              type;
1617         u8              addr_space;
1618         unsigned long   base_addr;
1619         u8              irq;
1620         u8              offset;
1621         u8              slave_addr;
1622 };
1623
1624 static int __devinit decode_dmi(struct dmi_header *dm,
1625                                 struct dmi_ipmi_data *dmi)
1626 {
1627         u8              *data = (u8 *)dm;
1628         unsigned long   base_addr;
1629         u8              reg_spacing;
1630         u8              len = dm->length;
1631
1632         dmi->type = data[4];
1633
1634         memcpy(&base_addr, data+8, sizeof(unsigned long));
1635         if (len >= 0x11) {
1636                 if (base_addr & 1) {
1637                         /* I/O */
1638                         base_addr &= 0xFFFE;
1639                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1640                 }
1641                 else {
1642                         /* Memory */
1643                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1644                 }
1645                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1646                    is odd. */
1647                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1648
1649                 dmi->irq = data[0x11];
1650
1651                 /* The top two bits of byte 0x10 hold the register spacing. */
1652                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1653                 switch(reg_spacing){
1654                 case 0x00: /* Byte boundaries */
1655                     dmi->offset = 1;
1656                     break;
1657                 case 0x01: /* 32-bit boundaries */
1658                     dmi->offset = 4;
1659                     break;
1660                 case 0x02: /* 16-byte boundaries */
1661                     dmi->offset = 16;
1662                     break;
1663                 default:
1664                     /* Some other interface, just ignore it. */
1665                     return -EIO;
1666                 }
1667         } else {
1668                 /* Old DMI spec. */
1669                 /* Note that technically, the lower bit of the base
1670                  * address should be 1 if the address is I/O and 0 if
1671                  * the address is in memory.  So many systems get that
1672                  * wrong (and all that I have seen are I/O) so we just
1673                  * ignore that bit and assume I/O.  Systems that use
1674                  * memory should use the newer spec, anyway. */
1675                 dmi->base_addr = base_addr & 0xfffe;
1676                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1677                 dmi->offset = 1;
1678         }
1679
1680         dmi->slave_addr = data[6];
1681
1682         return 0;
1683 }
1684
1685 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1686 {
1687         struct smi_info *info;
1688
1689         info = kzalloc(sizeof(*info), GFP_KERNEL);
1690         if (!info) {
1691                 printk(KERN_ERR
1692                        "ipmi_si: Could not allocate SI data\n");
1693                 return;
1694         }
1695
1696         info->addr_source = "SMBIOS";
1697
1698         switch (ipmi_data->type) {
1699         case 0x01: /* KCS */
1700                 info->si_type = SI_KCS;
1701                 break;
1702         case 0x02: /* SMIC */
1703                 info->si_type = SI_SMIC;
1704                 break;
1705         case 0x03: /* BT */
1706                 info->si_type = SI_BT;
1707                 break;
1708         default:
1709                 return;
1710         }
1711
1712         switch (ipmi_data->addr_space) {
1713         case IPMI_MEM_ADDR_SPACE:
1714                 info->io_setup = mem_setup;
1715                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1716                 break;
1717
1718         case IPMI_IO_ADDR_SPACE:
1719                 info->io_setup = port_setup;
1720                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1721                 break;
1722
1723         default:
1724                 kfree(info);
1725                 printk(KERN_WARNING
1726                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1727                        ipmi_data->addr_space);
1728                 return;
1729         }
1730         info->io.addr_data = ipmi_data->base_addr;
1731
1732         info->io.regspacing = ipmi_data->offset;
1733         if (!info->io.regspacing)
1734                 info->io.regspacing = DEFAULT_REGSPACING;
1735         info->io.regsize = DEFAULT_REGSPACING;
1736         info->io.regshift = 0;
1737
1738         info->slave_addr = ipmi_data->slave_addr;
1739
1740         info->irq = ipmi_data->irq;
1741         if (info->irq)
1742                 info->irq_setup = std_irq_setup;
1743
1744         try_smi_init(info);
1745 }
1746
1747 static void __devinit dmi_find_bmc(void)
1748 {
1749         struct dmi_device    *dev = NULL;
1750         struct dmi_ipmi_data data;
1751         int                  rv;
1752
1753         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1754                 memset(&data, 0, sizeof(data));
1755                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1756                 if (!rv)
1757                         try_init_dmi(&data);
1758         }
1759 }
1760 #endif /* CONFIG_DMI */
1761
1762 #ifdef CONFIG_PCI
1763
1764 #define PCI_ERMC_CLASSCODE              0x0C0700
1765 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
1766 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
1767 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
1768 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
1769 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
1770
1771 #define PCI_HP_VENDOR_ID    0x103C
1772 #define PCI_MMC_DEVICE_ID   0x121A
1773 #define PCI_MMC_ADDR_CW     0x10
1774
1775 static void ipmi_pci_cleanup(struct smi_info *info)
1776 {
1777         struct pci_dev *pdev = info->addr_source_data;
1778
1779         pci_disable_device(pdev);
1780 }
1781
1782 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1783                                     const struct pci_device_id *ent)
1784 {
1785         int rv;
1786         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1787         struct smi_info *info;
1788         int first_reg_offset = 0;
1789
1790         info = kzalloc(sizeof(*info), GFP_KERNEL);
1791         if (!info)
1792                 return -ENOMEM;
1793
1794         info->addr_source = "PCI";
1795
1796         switch (class_type) {
1797         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1798                 info->si_type = SI_SMIC;
1799                 break;
1800
1801         case PCI_ERMC_CLASSCODE_TYPE_KCS:
1802                 info->si_type = SI_KCS;
1803                 break;
1804
1805         case PCI_ERMC_CLASSCODE_TYPE_BT:
1806                 info->si_type = SI_BT;
1807                 break;
1808
1809         default:
1810                 kfree(info);
1811                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1812                        pci_name(pdev), class_type);
1813                 return -ENOMEM;
1814         }
1815
1816         rv = pci_enable_device(pdev);
1817         if (rv) {
1818                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1819                        pci_name(pdev));
1820                 kfree(info);
1821                 return rv;
1822         }
1823
1824         info->addr_source_cleanup = ipmi_pci_cleanup;
1825         info->addr_source_data = pdev;
1826
1827         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1828                 first_reg_offset = 1;
1829
1830         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1831                 info->io_setup = port_setup;
1832                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1833         } else {
1834                 info->io_setup = mem_setup;
1835                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1836         }
1837         info->io.addr_data = pci_resource_start(pdev, 0);
1838
1839         info->io.regspacing = DEFAULT_REGSPACING;
1840         info->io.regsize = DEFAULT_REGSPACING;
1841         info->io.regshift = 0;
1842
1843         info->irq = pdev->irq;
1844         if (info->irq)
1845                 info->irq_setup = std_irq_setup;
1846
1847         info->dev = &pdev->dev;
1848
1849         return try_smi_init(info);
1850 }
1851
1852 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1853 {
1854 }
1855
1856 #ifdef CONFIG_PM
1857 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1858 {
1859         return 0;
1860 }
1861
1862 static int ipmi_pci_resume(struct pci_dev *pdev)
1863 {
1864         return 0;
1865 }
1866 #endif
1867
1868 static struct pci_device_id ipmi_pci_devices[] = {
1869         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1870         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
1871 };
1872 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1873
1874 static struct pci_driver ipmi_pci_driver = {
1875         .name =         DEVICE_NAME,
1876         .id_table =     ipmi_pci_devices,
1877         .probe =        ipmi_pci_probe,
1878         .remove =       __devexit_p(ipmi_pci_remove),
1879 #ifdef CONFIG_PM
1880         .suspend =      ipmi_pci_suspend,
1881         .resume =       ipmi_pci_resume,
1882 #endif
1883 };
1884 #endif /* CONFIG_PCI */
1885
1886
1887 static int try_get_dev_id(struct smi_info *smi_info)
1888 {
1889         unsigned char         msg[2];
1890         unsigned char         *resp;
1891         unsigned long         resp_len;
1892         enum si_sm_result     smi_result;
1893         int                   rv = 0;
1894
1895         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1896         if (!resp)
1897                 return -ENOMEM;
1898
1899         /* Do a Get Device ID command, since it comes back with some
1900            useful info. */
1901         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1902         msg[1] = IPMI_GET_DEVICE_ID_CMD;
1903         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1904
1905         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1906         for (;;)
1907         {
1908                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1909                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1910                         schedule_timeout_uninterruptible(1);
1911                         smi_result = smi_info->handlers->event(
1912                                 smi_info->si_sm, 100);
1913                 }
1914                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1915                 {
1916                         smi_result = smi_info->handlers->event(
1917                                 smi_info->si_sm, 0);
1918                 }
1919                 else
1920                         break;
1921         }
1922         if (smi_result == SI_SM_HOSED) {
1923                 /* We couldn't get the state machine to run, so whatever's at
1924                    the port is probably not an IPMI SMI interface. */
1925                 rv = -ENODEV;
1926                 goto out;
1927         }
1928
1929         /* Otherwise, we got some data. */
1930         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1931                                                   resp, IPMI_MAX_MSG_LENGTH);
1932         if (resp_len < 14) {
1933                 /* That's odd, it should be longer. */
1934                 rv = -EINVAL;
1935                 goto out;
1936         }
1937
1938         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1939                 /* That's odd, it shouldn't be able to fail. */
1940                 rv = -EINVAL;
1941                 goto out;
1942         }
1943
1944         /* Record info from the get device id, in case we need it. */
1945         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1946
1947  out:
1948         kfree(resp);
1949         return rv;
1950 }
1951
1952 static int type_file_read_proc(char *page, char **start, off_t off,
1953                                int count, int *eof, void *data)
1954 {
1955         char            *out = (char *) page;
1956         struct smi_info *smi = data;
1957
1958         switch (smi->si_type) {
1959             case SI_KCS:
1960                 return sprintf(out, "kcs\n");
1961             case SI_SMIC:
1962                 return sprintf(out, "smic\n");
1963             case SI_BT:
1964                 return sprintf(out, "bt\n");
1965             default:
1966                 return 0;
1967         }
1968 }
1969
1970 static int stat_file_read_proc(char *page, char **start, off_t off,
1971                                int count, int *eof, void *data)
1972 {
1973         char            *out = (char *) page;
1974         struct smi_info *smi = data;
1975
1976         out += sprintf(out, "interrupts_enabled:    %d\n",
1977                        smi->irq && !smi->interrupt_disabled);
1978         out += sprintf(out, "short_timeouts:        %ld\n",
1979                        smi->short_timeouts);
1980         out += sprintf(out, "long_timeouts:         %ld\n",
1981                        smi->long_timeouts);
1982         out += sprintf(out, "timeout_restarts:      %ld\n",
1983                        smi->timeout_restarts);
1984         out += sprintf(out, "idles:                 %ld\n",
1985                        smi->idles);
1986         out += sprintf(out, "interrupts:            %ld\n",
1987                        smi->interrupts);
1988         out += sprintf(out, "attentions:            %ld\n",
1989                        smi->attentions);
1990         out += sprintf(out, "flag_fetches:          %ld\n",
1991                        smi->flag_fetches);
1992         out += sprintf(out, "hosed_count:           %ld\n",
1993                        smi->hosed_count);
1994         out += sprintf(out, "complete_transactions: %ld\n",
1995                        smi->complete_transactions);
1996         out += sprintf(out, "events:                %ld\n",
1997                        smi->events);
1998         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
1999                        smi->watchdog_pretimeouts);
2000         out += sprintf(out, "incoming_messages:     %ld\n",
2001                        smi->incoming_messages);
2002
2003         return (out - ((char *) page));
2004 }
2005
2006 /*
2007  * oem_data_avail_to_receive_msg_avail
2008  * @info - smi_info structure with msg_flags set
2009  *
2010  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2011  * Returns 1 indicating need to re-run handle_flags().
2012  */
2013 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2014 {
2015         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2016                                 RECEIVE_MSG_AVAIL);
2017         return 1;
2018 }
2019
2020 /*
2021  * setup_dell_poweredge_oem_data_handler
2022  * @info - smi_info.device_id must be populated
2023  *
2024  * Systems that match, but have firmware version < 1.40 may assert
2025  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2026  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2027  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2028  * as RECEIVE_MSG_AVAIL instead.
2029  *
2030  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2031  * assert the OEM[012] bits, and if it did, the driver would have to
2032  * change to handle that properly, we don't actually check for the
2033  * firmware version.
2034  * Device ID = 0x20                BMC on PowerEdge 8G servers
2035  * Device Revision = 0x80
2036  * Firmware Revision1 = 0x01       BMC version 1.40
2037  * Firmware Revision2 = 0x40       BCD encoded
2038  * IPMI Version = 0x51             IPMI 1.5
2039  * Manufacturer ID = A2 02 00      Dell IANA
2040  *
2041  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2042  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2043  *
2044  */
2045 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2046 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2047 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2048 #define DELL_IANA_MFR_ID 0x0002a2
2049 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2050 {
2051         struct ipmi_device_id *id = &smi_info->device_id;
2052         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2053                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2054                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2055                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2056                         smi_info->oem_data_avail_handler =
2057                                 oem_data_avail_to_receive_msg_avail;
2058                 }
2059                 else if (ipmi_version_major(id) < 1 ||
2060                          (ipmi_version_major(id) == 1 &&
2061                           ipmi_version_minor(id) < 5)) {
2062                         smi_info->oem_data_avail_handler =
2063                                 oem_data_avail_to_receive_msg_avail;
2064                 }
2065         }
2066 }
2067
2068 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2069 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2070 {
2071         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2072
2073         /* Make it a reponse */
2074         msg->rsp[0] = msg->data[0] | 4;
2075         msg->rsp[1] = msg->data[1];
2076         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2077         msg->rsp_size = 3;
2078         smi_info->curr_msg = NULL;
2079         deliver_recv_msg(smi_info, msg);
2080 }
2081
2082 /*
2083  * dell_poweredge_bt_xaction_handler
2084  * @info - smi_info.device_id must be populated
2085  *
2086  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2087  * not respond to a Get SDR command if the length of the data
2088  * requested is exactly 0x3A, which leads to command timeouts and no
2089  * data returned.  This intercepts such commands, and causes userspace
2090  * callers to try again with a different-sized buffer, which succeeds.
2091  */
2092
2093 #define STORAGE_NETFN 0x0A
2094 #define STORAGE_CMD_GET_SDR 0x23
2095 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2096                                              unsigned long unused,
2097                                              void *in)
2098 {
2099         struct smi_info *smi_info = in;
2100         unsigned char *data = smi_info->curr_msg->data;
2101         unsigned int size   = smi_info->curr_msg->data_size;
2102         if (size >= 8 &&
2103             (data[0]>>2) == STORAGE_NETFN &&
2104             data[1] == STORAGE_CMD_GET_SDR &&
2105             data[7] == 0x3A) {
2106                 return_hosed_msg_badsize(smi_info);
2107                 return NOTIFY_STOP;
2108         }
2109         return NOTIFY_DONE;
2110 }
2111
2112 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2113         .notifier_call  = dell_poweredge_bt_xaction_handler,
2114 };
2115
2116 /*
2117  * setup_dell_poweredge_bt_xaction_handler
2118  * @info - smi_info.device_id must be filled in already
2119  *
2120  * Fills in smi_info.device_id.start_transaction_pre_hook
2121  * when we know what function to use there.
2122  */
2123 static void
2124 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2125 {
2126         struct ipmi_device_id *id = &smi_info->device_id;
2127         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2128             smi_info->si_type == SI_BT)
2129                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2130 }
2131
2132 /*
2133  * setup_oem_data_handler
2134  * @info - smi_info.device_id must be filled in already
2135  *
2136  * Fills in smi_info.device_id.oem_data_available_handler
2137  * when we know what function to use there.
2138  */
2139
2140 static void setup_oem_data_handler(struct smi_info *smi_info)
2141 {
2142         setup_dell_poweredge_oem_data_handler(smi_info);
2143 }
2144
2145 static void setup_xaction_handlers(struct smi_info *smi_info)
2146 {
2147         setup_dell_poweredge_bt_xaction_handler(smi_info);
2148 }
2149
2150 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2151 {
2152         if (smi_info->intf) {
2153                 /* The timer and thread are only running if the
2154                    interface has been started up and registered. */
2155                 if (smi_info->thread != NULL)
2156                         kthread_stop(smi_info->thread);
2157                 del_timer_sync(&smi_info->si_timer);
2158         }
2159 }
2160
2161 static __devinitdata struct ipmi_default_vals
2162 {
2163         int type;
2164         int port;
2165 } ipmi_defaults[] =
2166 {
2167         { .type = SI_KCS, .port = 0xca2 },
2168         { .type = SI_SMIC, .port = 0xca9 },
2169         { .type = SI_BT, .port = 0xe4 },
2170         { .port = 0 }
2171 };
2172
2173 static __devinit void default_find_bmc(void)
2174 {
2175         struct smi_info *info;
2176         int             i;
2177
2178         for (i = 0; ; i++) {
2179                 if (!ipmi_defaults[i].port)
2180                         break;
2181
2182                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2183                 if (!info)
2184                         return;
2185
2186                 info->addr_source = NULL;
2187
2188                 info->si_type = ipmi_defaults[i].type;
2189                 info->io_setup = port_setup;
2190                 info->io.addr_data = ipmi_defaults[i].port;
2191                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2192
2193                 info->io.addr = NULL;
2194                 info->io.regspacing = DEFAULT_REGSPACING;
2195                 info->io.regsize = DEFAULT_REGSPACING;
2196                 info->io.regshift = 0;
2197
2198                 if (try_smi_init(info) == 0) {
2199                         /* Found one... */
2200                         printk(KERN_INFO "ipmi_si: Found default %s state"
2201                                " machine at %s address 0x%lx\n",
2202                                si_to_str[info->si_type],
2203                                addr_space_to_str[info->io.addr_type],
2204                                info->io.addr_data);
2205                         return;
2206                 }
2207         }
2208 }
2209
2210 static int is_new_interface(struct smi_info *info)
2211 {
2212         struct smi_info *e;
2213
2214         list_for_each_entry(e, &smi_infos, link) {
2215                 if (e->io.addr_type != info->io.addr_type)
2216                         continue;
2217                 if (e->io.addr_data == info->io.addr_data)
2218                         return 0;
2219         }
2220
2221         return 1;
2222 }
2223
2224 static int try_smi_init(struct smi_info *new_smi)
2225 {
2226         int rv;
2227
2228         if (new_smi->addr_source) {
2229                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2230                        " machine at %s address 0x%lx, slave address 0x%x,"
2231                        " irq %d\n",
2232                        new_smi->addr_source,
2233                        si_to_str[new_smi->si_type],
2234                        addr_space_to_str[new_smi->io.addr_type],
2235                        new_smi->io.addr_data,
2236                        new_smi->slave_addr, new_smi->irq);
2237         }
2238
2239         mutex_lock(&smi_infos_lock);
2240         if (!is_new_interface(new_smi)) {
2241                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2242                 rv = -EBUSY;
2243                 goto out_err;
2244         }
2245
2246         /* So we know not to free it unless we have allocated one. */
2247         new_smi->intf = NULL;
2248         new_smi->si_sm = NULL;
2249         new_smi->handlers = NULL;
2250
2251         switch (new_smi->si_type) {
2252         case SI_KCS:
2253                 new_smi->handlers = &kcs_smi_handlers;
2254                 break;
2255
2256         case SI_SMIC:
2257                 new_smi->handlers = &smic_smi_handlers;
2258                 break;
2259
2260         case SI_BT:
2261                 new_smi->handlers = &bt_smi_handlers;
2262                 break;
2263
2264         default:
2265                 /* No support for anything else yet. */
2266                 rv = -EIO;
2267                 goto out_err;
2268         }
2269
2270         /* Allocate the state machine's data and initialize it. */
2271         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2272         if (!new_smi->si_sm) {
2273                 printk(" Could not allocate state machine memory\n");
2274                 rv = -ENOMEM;
2275                 goto out_err;
2276         }
2277         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2278                                                         &new_smi->io);
2279
2280         /* Now that we know the I/O size, we can set up the I/O. */
2281         rv = new_smi->io_setup(new_smi);
2282         if (rv) {
2283                 printk(" Could not set up I/O space\n");
2284                 goto out_err;
2285         }
2286
2287         spin_lock_init(&(new_smi->si_lock));
2288         spin_lock_init(&(new_smi->msg_lock));
2289         spin_lock_init(&(new_smi->count_lock));
2290
2291         /* Do low-level detection first. */
2292         if (new_smi->handlers->detect(new_smi->si_sm)) {
2293                 if (new_smi->addr_source)
2294                         printk(KERN_INFO "ipmi_si: Interface detection"
2295                                " failed\n");
2296                 rv = -ENODEV;
2297                 goto out_err;
2298         }
2299
2300         /* Attempt a get device id command.  If it fails, we probably
2301            don't have a BMC here. */
2302         rv = try_get_dev_id(new_smi);
2303         if (rv) {
2304                 if (new_smi->addr_source)
2305                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2306                                " at this location\n");
2307                 goto out_err;
2308         }
2309
2310         setup_oem_data_handler(new_smi);
2311         setup_xaction_handlers(new_smi);
2312
2313         /* Try to claim any interrupts. */
2314         if (new_smi->irq_setup)
2315                 new_smi->irq_setup(new_smi);
2316
2317         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2318         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2319         new_smi->curr_msg = NULL;
2320         atomic_set(&new_smi->req_events, 0);
2321         new_smi->run_to_completion = 0;
2322
2323         new_smi->interrupt_disabled = 0;
2324         atomic_set(&new_smi->stop_operation, 0);
2325         new_smi->intf_num = smi_num;
2326         smi_num++;
2327
2328         /* Start clearing the flags before we enable interrupts or the
2329            timer to avoid racing with the timer. */
2330         start_clear_flags(new_smi);
2331         /* IRQ is defined to be set when non-zero. */
2332         if (new_smi->irq)
2333                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2334
2335         if (!new_smi->dev) {
2336                 /* If we don't already have a device from something
2337                  * else (like PCI), then register a new one. */
2338                 new_smi->pdev = platform_device_alloc("ipmi_si",
2339                                                       new_smi->intf_num);
2340                 if (rv) {
2341                         printk(KERN_ERR
2342                                "ipmi_si_intf:"
2343                                " Unable to allocate platform device\n");
2344                         goto out_err;
2345                 }
2346                 new_smi->dev = &new_smi->pdev->dev;
2347                 new_smi->dev->driver = &ipmi_driver;
2348
2349                 rv = platform_device_register(new_smi->pdev);
2350                 if (rv) {
2351                         printk(KERN_ERR
2352                                "ipmi_si_intf:"
2353                                " Unable to register system interface device:"
2354                                " %d\n",
2355                                rv);
2356                         goto out_err;
2357                 }
2358                 new_smi->dev_registered = 1;
2359         }
2360
2361         rv = ipmi_register_smi(&handlers,
2362                                new_smi,
2363                                &new_smi->device_id,
2364                                new_smi->dev,
2365                                new_smi->slave_addr);
2366         if (rv) {
2367                 printk(KERN_ERR
2368                        "ipmi_si: Unable to register device: error %d\n",
2369                        rv);
2370                 goto out_err_stop_timer;
2371         }
2372
2373         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2374                                      type_file_read_proc, NULL,
2375                                      new_smi, THIS_MODULE);
2376         if (rv) {
2377                 printk(KERN_ERR
2378                        "ipmi_si: Unable to create proc entry: %d\n",
2379                        rv);
2380                 goto out_err_stop_timer;
2381         }
2382
2383         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2384                                      stat_file_read_proc, NULL,
2385                                      new_smi, THIS_MODULE);
2386         if (rv) {
2387                 printk(KERN_ERR
2388                        "ipmi_si: Unable to create proc entry: %d\n",
2389                        rv);
2390                 goto out_err_stop_timer;
2391         }
2392
2393         list_add_tail(&new_smi->link, &smi_infos);
2394
2395         mutex_unlock(&smi_infos_lock);
2396
2397         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2398
2399         return 0;
2400
2401  out_err_stop_timer:
2402         atomic_inc(&new_smi->stop_operation);
2403         wait_for_timer_and_thread(new_smi);
2404
2405  out_err:
2406         if (new_smi->intf)
2407                 ipmi_unregister_smi(new_smi->intf);
2408
2409         if (new_smi->irq_cleanup)
2410                 new_smi->irq_cleanup(new_smi);
2411
2412         /* Wait until we know that we are out of any interrupt
2413            handlers might have been running before we freed the
2414            interrupt. */
2415         synchronize_sched();
2416
2417         if (new_smi->si_sm) {
2418                 if (new_smi->handlers)
2419                         new_smi->handlers->cleanup(new_smi->si_sm);
2420                 kfree(new_smi->si_sm);
2421         }
2422         if (new_smi->addr_source_cleanup)
2423                 new_smi->addr_source_cleanup(new_smi);
2424         if (new_smi->io_cleanup)
2425                 new_smi->io_cleanup(new_smi);
2426
2427         if (new_smi->dev_registered)
2428                 platform_device_unregister(new_smi->pdev);
2429
2430         kfree(new_smi);
2431
2432         mutex_unlock(&smi_infos_lock);
2433
2434         return rv;
2435 }
2436
2437 static __devinit int init_ipmi_si(void)
2438 {
2439         int  i;
2440         char *str;
2441         int  rv;
2442
2443         if (initialized)
2444                 return 0;
2445         initialized = 1;
2446
2447         /* Register the device drivers. */
2448         rv = driver_register(&ipmi_driver);
2449         if (rv) {
2450                 printk(KERN_ERR
2451                        "init_ipmi_si: Unable to register driver: %d\n",
2452                        rv);
2453                 return rv;
2454         }
2455
2456
2457         /* Parse out the si_type string into its components. */
2458         str = si_type_str;
2459         if (*str != '\0') {
2460                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2461                         si_type[i] = str;
2462                         str = strchr(str, ',');
2463                         if (str) {
2464                                 *str = '\0';
2465                                 str++;
2466                         } else {
2467                                 break;
2468                         }
2469                 }
2470         }
2471
2472         printk(KERN_INFO "IPMI System Interface driver.\n");
2473
2474         hardcode_find_bmc();
2475
2476 #ifdef CONFIG_DMI
2477         dmi_find_bmc();
2478 #endif
2479
2480 #ifdef CONFIG_ACPI
2481         if (si_trydefaults)
2482                 acpi_find_bmc();
2483 #endif
2484
2485 #ifdef CONFIG_PCI
2486         pci_module_init(&ipmi_pci_driver);
2487 #endif
2488
2489         if (si_trydefaults) {
2490                 mutex_lock(&smi_infos_lock);
2491                 if (list_empty(&smi_infos)) {
2492                         /* No BMC was found, try defaults. */
2493                         mutex_unlock(&smi_infos_lock);
2494                         default_find_bmc();
2495                 } else {
2496                         mutex_unlock(&smi_infos_lock);
2497                 }
2498         }
2499
2500         mutex_lock(&smi_infos_lock);
2501         if (list_empty(&smi_infos)) {
2502                 mutex_unlock(&smi_infos_lock);
2503 #ifdef CONFIG_PCI
2504                 pci_unregister_driver(&ipmi_pci_driver);
2505 #endif
2506                 driver_unregister(&ipmi_driver);
2507                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2508                 return -ENODEV;
2509         } else {
2510                 mutex_unlock(&smi_infos_lock);
2511                 return 0;
2512         }
2513 }
2514 module_init(init_ipmi_si);
2515
2516 static void __devexit cleanup_one_si(struct smi_info *to_clean)
2517 {
2518         int           rv;
2519         unsigned long flags;
2520
2521         if (!to_clean)
2522                 return;
2523
2524         list_del(&to_clean->link);
2525
2526         /* Tell the timer and interrupt handlers that we are shutting
2527            down. */
2528         spin_lock_irqsave(&(to_clean->si_lock), flags);
2529         spin_lock(&(to_clean->msg_lock));
2530
2531         atomic_inc(&to_clean->stop_operation);
2532
2533         if (to_clean->irq_cleanup)
2534                 to_clean->irq_cleanup(to_clean);
2535
2536         spin_unlock(&(to_clean->msg_lock));
2537         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2538
2539         /* Wait until we know that we are out of any interrupt
2540            handlers might have been running before we freed the
2541            interrupt. */
2542         synchronize_sched();
2543
2544         wait_for_timer_and_thread(to_clean);
2545
2546         /* Interrupts and timeouts are stopped, now make sure the
2547            interface is in a clean state. */
2548         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2549                 poll(to_clean);
2550                 schedule_timeout_uninterruptible(1);
2551         }
2552
2553         rv = ipmi_unregister_smi(to_clean->intf);
2554         if (rv) {
2555                 printk(KERN_ERR
2556                        "ipmi_si: Unable to unregister device: errno=%d\n",
2557                        rv);
2558         }
2559
2560         to_clean->handlers->cleanup(to_clean->si_sm);
2561
2562         kfree(to_clean->si_sm);
2563
2564         if (to_clean->addr_source_cleanup)
2565                 to_clean->addr_source_cleanup(to_clean);
2566         if (to_clean->io_cleanup)
2567                 to_clean->io_cleanup(to_clean);
2568
2569         if (to_clean->dev_registered)
2570                 platform_device_unregister(to_clean->pdev);
2571
2572         kfree(to_clean);
2573 }
2574
2575 static __exit void cleanup_ipmi_si(void)
2576 {
2577         struct smi_info *e, *tmp_e;
2578
2579         if (!initialized)
2580                 return;
2581
2582 #ifdef CONFIG_PCI
2583         pci_unregister_driver(&ipmi_pci_driver);
2584 #endif
2585
2586         mutex_lock(&smi_infos_lock);
2587         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2588                 cleanup_one_si(e);
2589         mutex_unlock(&smi_infos_lock);
2590
2591         driver_unregister(&ipmi_driver);
2592 }
2593 module_exit(cleanup_ipmi_si);
2594
2595 MODULE_LICENSE("GPL");
2596 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2597 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");