[PATCH] ipmi: add full sysfs support
[linux-2.6.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #ifdef CONFIG_HIGH_RES_TIMERS
59 #include <linux/hrtime.h>
60 # if defined(schedule_next_int)
61 /* Old high-res timer code, do translations. */
62 #  define get_arch_cycles(a) quick_update_jiffies_sub(a)
63 #  define arch_cycles_per_jiffy cycles_per_jiffies
64 # endif
65 static inline void add_usec_to_timer(struct timer_list *t, long v)
66 {
67         t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
68         while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
69         {
70                 t->expires++;
71                 t->arch_cycle_expires -= arch_cycles_per_jiffy;
72         }
73 }
74 #endif
75 #include <linux/interrupt.h>
76 #include <linux/rcupdate.h>
77 #include <linux/ipmi_smi.h>
78 #include <asm/io.h>
79 #include "ipmi_si_sm.h"
80 #include <linux/init.h>
81 #include <linux/dmi.h>
82
83 /* Measure times between events in the driver. */
84 #undef DEBUG_TIMING
85
86 /* Call every 10 ms. */
87 #define SI_TIMEOUT_TIME_USEC    10000
88 #define SI_USEC_PER_JIFFY       (1000000/HZ)
89 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
90 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
91                                        short timeout */
92
93 enum si_intf_state {
94         SI_NORMAL,
95         SI_GETTING_FLAGS,
96         SI_GETTING_EVENTS,
97         SI_CLEARING_FLAGS,
98         SI_CLEARING_FLAGS_THEN_SET_IRQ,
99         SI_GETTING_MESSAGES,
100         SI_ENABLE_INTERRUPTS1,
101         SI_ENABLE_INTERRUPTS2
102         /* FIXME - add watchdog stuff. */
103 };
104
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG             2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
109
110 enum si_type {
111     SI_KCS, SI_SMIC, SI_BT
112 };
113 static char *si_to_str[] = { "KCS", "SMIC", "BT" };
114
115 #define DEVICE_NAME "ipmi_si"
116
117 static struct device_driver ipmi_driver =
118 {
119         .name = DEVICE_NAME,
120         .bus = &platform_bus_type
121 };
122
123 struct smi_info
124 {
125         int                    intf_num;
126         ipmi_smi_t             intf;
127         struct si_sm_data      *si_sm;
128         struct si_sm_handlers  *handlers;
129         enum si_type           si_type;
130         spinlock_t             si_lock;
131         spinlock_t             msg_lock;
132         struct list_head       xmit_msgs;
133         struct list_head       hp_xmit_msgs;
134         struct ipmi_smi_msg    *curr_msg;
135         enum si_intf_state     si_state;
136
137         /* Used to handle the various types of I/O that can occur with
138            IPMI */
139         struct si_sm_io io;
140         int (*io_setup)(struct smi_info *info);
141         void (*io_cleanup)(struct smi_info *info);
142         int (*irq_setup)(struct smi_info *info);
143         void (*irq_cleanup)(struct smi_info *info);
144         unsigned int io_size;
145         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146         void (*addr_source_cleanup)(struct smi_info *info);
147         void *addr_source_data;
148
149         /* Per-OEM handler, called from handle_flags().
150            Returns 1 when handle_flags() needs to be re-run
151            or 0 indicating it set si_state itself.
152         */
153         int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156            is set to hold the flags until we are done handling everything
157            from the flags. */
158 #define RECEIVE_MSG_AVAIL       0x01
159 #define EVENT_MSG_BUFFER_FULL   0x02
160 #define WDT_PRE_TIMEOUT_INT     0x08
161 #define OEM0_DATA_AVAIL     0x20
162 #define OEM1_DATA_AVAIL     0x40
163 #define OEM2_DATA_AVAIL     0x80
164 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
165                              OEM1_DATA_AVAIL | \
166                              OEM2_DATA_AVAIL)
167         unsigned char       msg_flags;
168
169         /* If set to true, this will request events the next time the
170            state machine is idle. */
171         atomic_t            req_events;
172
173         /* If true, run the state machine to completion on every send
174            call.  Generally used after a panic to make sure stuff goes
175            out. */
176         int                 run_to_completion;
177
178         /* The I/O port of an SI interface. */
179         int                 port;
180
181         /* The space between start addresses of the two ports.  For
182            instance, if the first port is 0xca2 and the spacing is 4, then
183            the second port is 0xca6. */
184         unsigned int        spacing;
185
186         /* zero if no irq; */
187         int                 irq;
188
189         /* The timer for this si. */
190         struct timer_list   si_timer;
191
192         /* The time (in jiffies) the last timeout occurred at. */
193         unsigned long       last_timeout_jiffies;
194
195         /* Used to gracefully stop the timer without race conditions. */
196         atomic_t            stop_operation;
197
198         /* The driver will disable interrupts when it gets into a
199            situation where it cannot handle messages due to lack of
200            memory.  Once that situation clears up, it will re-enable
201            interrupts. */
202         int interrupt_disabled;
203
204         /* From the get device id response... */
205         struct ipmi_device_id device_id;
206
207         /* Driver model stuff. */
208         struct device *dev;
209         struct platform_device *pdev;
210
211          /* True if we allocated the device, false if it came from
212           * someplace else (like PCI). */
213         int dev_registered;
214
215         /* Slave address, could be reported from DMI. */
216         unsigned char slave_addr;
217
218         /* Counters and things for the proc filesystem. */
219         spinlock_t count_lock;
220         unsigned long short_timeouts;
221         unsigned long long_timeouts;
222         unsigned long timeout_restarts;
223         unsigned long idles;
224         unsigned long interrupts;
225         unsigned long attentions;
226         unsigned long flag_fetches;
227         unsigned long hosed_count;
228         unsigned long complete_transactions;
229         unsigned long events;
230         unsigned long watchdog_pretimeouts;
231         unsigned long incoming_messages;
232
233         struct task_struct *thread;
234
235         struct list_head link;
236 };
237
238 static int try_smi_init(struct smi_info *smi);
239
240 static struct notifier_block *xaction_notifier_list;
241 static int register_xaction_notifier(struct notifier_block * nb)
242 {
243         return notifier_chain_register(&xaction_notifier_list, nb);
244 }
245
246 static void si_restart_short_timer(struct smi_info *smi_info);
247
248 static void deliver_recv_msg(struct smi_info *smi_info,
249                              struct ipmi_smi_msg *msg)
250 {
251         /* Deliver the message to the upper layer with the lock
252            released. */
253         spin_unlock(&(smi_info->si_lock));
254         ipmi_smi_msg_received(smi_info->intf, msg);
255         spin_lock(&(smi_info->si_lock));
256 }
257
258 static void return_hosed_msg(struct smi_info *smi_info)
259 {
260         struct ipmi_smi_msg *msg = smi_info->curr_msg;
261
262         /* Make it a reponse */
263         msg->rsp[0] = msg->data[0] | 4;
264         msg->rsp[1] = msg->data[1];
265         msg->rsp[2] = 0xFF; /* Unknown error. */
266         msg->rsp_size = 3;
267
268         smi_info->curr_msg = NULL;
269         deliver_recv_msg(smi_info, msg);
270 }
271
272 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
273 {
274         int              rv;
275         struct list_head *entry = NULL;
276 #ifdef DEBUG_TIMING
277         struct timeval t;
278 #endif
279
280         /* No need to save flags, we aleady have interrupts off and we
281            already hold the SMI lock. */
282         spin_lock(&(smi_info->msg_lock));
283
284         /* Pick the high priority queue first. */
285         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
286                 entry = smi_info->hp_xmit_msgs.next;
287         } else if (!list_empty(&(smi_info->xmit_msgs))) {
288                 entry = smi_info->xmit_msgs.next;
289         }
290
291         if (!entry) {
292                 smi_info->curr_msg = NULL;
293                 rv = SI_SM_IDLE;
294         } else {
295                 int err;
296
297                 list_del(entry);
298                 smi_info->curr_msg = list_entry(entry,
299                                                 struct ipmi_smi_msg,
300                                                 link);
301 #ifdef DEBUG_TIMING
302                 do_gettimeofday(&t);
303                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
304 #endif
305                 err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
306                 if (err & NOTIFY_STOP_MASK) {
307                         rv = SI_SM_CALL_WITHOUT_DELAY;
308                         goto out;
309                 }
310                 err = smi_info->handlers->start_transaction(
311                         smi_info->si_sm,
312                         smi_info->curr_msg->data,
313                         smi_info->curr_msg->data_size);
314                 if (err) {
315                         return_hosed_msg(smi_info);
316                 }
317
318                 rv = SI_SM_CALL_WITHOUT_DELAY;
319         }
320         out:
321         spin_unlock(&(smi_info->msg_lock));
322
323         return rv;
324 }
325
326 static void start_enable_irq(struct smi_info *smi_info)
327 {
328         unsigned char msg[2];
329
330         /* If we are enabling interrupts, we have to tell the
331            BMC to use them. */
332         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
333         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
334
335         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
336         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
337 }
338
339 static void start_clear_flags(struct smi_info *smi_info)
340 {
341         unsigned char msg[3];
342
343         /* Make sure the watchdog pre-timeout flag is not set at startup. */
344         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
345         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
346         msg[2] = WDT_PRE_TIMEOUT_INT;
347
348         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
349         smi_info->si_state = SI_CLEARING_FLAGS;
350 }
351
352 /* When we have a situtaion where we run out of memory and cannot
353    allocate messages, we just leave them in the BMC and run the system
354    polled until we can allocate some memory.  Once we have some
355    memory, we will re-enable the interrupt. */
356 static inline void disable_si_irq(struct smi_info *smi_info)
357 {
358         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
359                 disable_irq_nosync(smi_info->irq);
360                 smi_info->interrupt_disabled = 1;
361         }
362 }
363
364 static inline void enable_si_irq(struct smi_info *smi_info)
365 {
366         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
367                 enable_irq(smi_info->irq);
368                 smi_info->interrupt_disabled = 0;
369         }
370 }
371
372 static void handle_flags(struct smi_info *smi_info)
373 {
374  retry:
375         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
376                 /* Watchdog pre-timeout */
377                 spin_lock(&smi_info->count_lock);
378                 smi_info->watchdog_pretimeouts++;
379                 spin_unlock(&smi_info->count_lock);
380
381                 start_clear_flags(smi_info);
382                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
383                 spin_unlock(&(smi_info->si_lock));
384                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
385                 spin_lock(&(smi_info->si_lock));
386         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
387                 /* Messages available. */
388                 smi_info->curr_msg = ipmi_alloc_smi_msg();
389                 if (!smi_info->curr_msg) {
390                         disable_si_irq(smi_info);
391                         smi_info->si_state = SI_NORMAL;
392                         return;
393                 }
394                 enable_si_irq(smi_info);
395
396                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
397                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
398                 smi_info->curr_msg->data_size = 2;
399
400                 smi_info->handlers->start_transaction(
401                         smi_info->si_sm,
402                         smi_info->curr_msg->data,
403                         smi_info->curr_msg->data_size);
404                 smi_info->si_state = SI_GETTING_MESSAGES;
405         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
406                 /* Events available. */
407                 smi_info->curr_msg = ipmi_alloc_smi_msg();
408                 if (!smi_info->curr_msg) {
409                         disable_si_irq(smi_info);
410                         smi_info->si_state = SI_NORMAL;
411                         return;
412                 }
413                 enable_si_irq(smi_info);
414
415                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
416                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
417                 smi_info->curr_msg->data_size = 2;
418
419                 smi_info->handlers->start_transaction(
420                         smi_info->si_sm,
421                         smi_info->curr_msg->data,
422                         smi_info->curr_msg->data_size);
423                 smi_info->si_state = SI_GETTING_EVENTS;
424         } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
425                 if (smi_info->oem_data_avail_handler)
426                         if (smi_info->oem_data_avail_handler(smi_info))
427                                 goto retry;
428         } else {
429                 smi_info->si_state = SI_NORMAL;
430         }
431 }
432
433 static void handle_transaction_done(struct smi_info *smi_info)
434 {
435         struct ipmi_smi_msg *msg;
436 #ifdef DEBUG_TIMING
437         struct timeval t;
438
439         do_gettimeofday(&t);
440         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
441 #endif
442         switch (smi_info->si_state) {
443         case SI_NORMAL:
444                 if (!smi_info->curr_msg)
445                         break;
446
447                 smi_info->curr_msg->rsp_size
448                         = smi_info->handlers->get_result(
449                                 smi_info->si_sm,
450                                 smi_info->curr_msg->rsp,
451                                 IPMI_MAX_MSG_LENGTH);
452
453                 /* Do this here becase deliver_recv_msg() releases the
454                    lock, and a new message can be put in during the
455                    time the lock is released. */
456                 msg = smi_info->curr_msg;
457                 smi_info->curr_msg = NULL;
458                 deliver_recv_msg(smi_info, msg);
459                 break;
460
461         case SI_GETTING_FLAGS:
462         {
463                 unsigned char msg[4];
464                 unsigned int  len;
465
466                 /* We got the flags from the SMI, now handle them. */
467                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
468                 if (msg[2] != 0) {
469                         /* Error fetching flags, just give up for
470                            now. */
471                         smi_info->si_state = SI_NORMAL;
472                 } else if (len < 4) {
473                         /* Hmm, no flags.  That's technically illegal, but
474                            don't use uninitialized data. */
475                         smi_info->si_state = SI_NORMAL;
476                 } else {
477                         smi_info->msg_flags = msg[3];
478                         handle_flags(smi_info);
479                 }
480                 break;
481         }
482
483         case SI_CLEARING_FLAGS:
484         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
485         {
486                 unsigned char msg[3];
487
488                 /* We cleared the flags. */
489                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
490                 if (msg[2] != 0) {
491                         /* Error clearing flags */
492                         printk(KERN_WARNING
493                                "ipmi_si: Error clearing flags: %2.2x\n",
494                                msg[2]);
495                 }
496                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
497                         start_enable_irq(smi_info);
498                 else
499                         smi_info->si_state = SI_NORMAL;
500                 break;
501         }
502
503         case SI_GETTING_EVENTS:
504         {
505                 smi_info->curr_msg->rsp_size
506                         = smi_info->handlers->get_result(
507                                 smi_info->si_sm,
508                                 smi_info->curr_msg->rsp,
509                                 IPMI_MAX_MSG_LENGTH);
510
511                 /* Do this here becase deliver_recv_msg() releases the
512                    lock, and a new message can be put in during the
513                    time the lock is released. */
514                 msg = smi_info->curr_msg;
515                 smi_info->curr_msg = NULL;
516                 if (msg->rsp[2] != 0) {
517                         /* Error getting event, probably done. */
518                         msg->done(msg);
519
520                         /* Take off the event flag. */
521                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
522                         handle_flags(smi_info);
523                 } else {
524                         spin_lock(&smi_info->count_lock);
525                         smi_info->events++;
526                         spin_unlock(&smi_info->count_lock);
527
528                         /* Do this before we deliver the message
529                            because delivering the message releases the
530                            lock and something else can mess with the
531                            state. */
532                         handle_flags(smi_info);
533
534                         deliver_recv_msg(smi_info, msg);
535                 }
536                 break;
537         }
538
539         case SI_GETTING_MESSAGES:
540         {
541                 smi_info->curr_msg->rsp_size
542                         = smi_info->handlers->get_result(
543                                 smi_info->si_sm,
544                                 smi_info->curr_msg->rsp,
545                                 IPMI_MAX_MSG_LENGTH);
546
547                 /* Do this here becase deliver_recv_msg() releases the
548                    lock, and a new message can be put in during the
549                    time the lock is released. */
550                 msg = smi_info->curr_msg;
551                 smi_info->curr_msg = NULL;
552                 if (msg->rsp[2] != 0) {
553                         /* Error getting event, probably done. */
554                         msg->done(msg);
555
556                         /* Take off the msg flag. */
557                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
558                         handle_flags(smi_info);
559                 } else {
560                         spin_lock(&smi_info->count_lock);
561                         smi_info->incoming_messages++;
562                         spin_unlock(&smi_info->count_lock);
563
564                         /* Do this before we deliver the message
565                            because delivering the message releases the
566                            lock and something else can mess with the
567                            state. */
568                         handle_flags(smi_info);
569
570                         deliver_recv_msg(smi_info, msg);
571                 }
572                 break;
573         }
574
575         case SI_ENABLE_INTERRUPTS1:
576         {
577                 unsigned char msg[4];
578
579                 /* We got the flags from the SMI, now handle them. */
580                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
581                 if (msg[2] != 0) {
582                         printk(KERN_WARNING
583                                "ipmi_si: Could not enable interrupts"
584                                ", failed get, using polled mode.\n");
585                         smi_info->si_state = SI_NORMAL;
586                 } else {
587                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
588                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
589                         msg[2] = msg[3] | 1; /* enable msg queue int */
590                         smi_info->handlers->start_transaction(
591                                 smi_info->si_sm, msg, 3);
592                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
593                 }
594                 break;
595         }
596
597         case SI_ENABLE_INTERRUPTS2:
598         {
599                 unsigned char msg[4];
600
601                 /* We got the flags from the SMI, now handle them. */
602                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603                 if (msg[2] != 0) {
604                         printk(KERN_WARNING
605                                "ipmi_si: Could not enable interrupts"
606                                ", failed set, using polled mode.\n");
607                 }
608                 smi_info->si_state = SI_NORMAL;
609                 break;
610         }
611         }
612 }
613
614 /* Called on timeouts and events.  Timeouts should pass the elapsed
615    time, interrupts should pass in zero. */
616 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
617                                            int time)
618 {
619         enum si_sm_result si_sm_result;
620
621  restart:
622         /* There used to be a loop here that waited a little while
623            (around 25us) before giving up.  That turned out to be
624            pointless, the minimum delays I was seeing were in the 300us
625            range, which is far too long to wait in an interrupt.  So
626            we just run until the state machine tells us something
627            happened or it needs a delay. */
628         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
629         time = 0;
630         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
631         {
632                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
633         }
634
635         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
636         {
637                 spin_lock(&smi_info->count_lock);
638                 smi_info->complete_transactions++;
639                 spin_unlock(&smi_info->count_lock);
640
641                 handle_transaction_done(smi_info);
642                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
643         }
644         else if (si_sm_result == SI_SM_HOSED)
645         {
646                 spin_lock(&smi_info->count_lock);
647                 smi_info->hosed_count++;
648                 spin_unlock(&smi_info->count_lock);
649
650                 /* Do the before return_hosed_msg, because that
651                    releases the lock. */
652                 smi_info->si_state = SI_NORMAL;
653                 if (smi_info->curr_msg != NULL) {
654                         /* If we were handling a user message, format
655                            a response to send to the upper layer to
656                            tell it about the error. */
657                         return_hosed_msg(smi_info);
658                 }
659                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
660         }
661
662         /* We prefer handling attn over new messages. */
663         if (si_sm_result == SI_SM_ATTN)
664         {
665                 unsigned char msg[2];
666
667                 spin_lock(&smi_info->count_lock);
668                 smi_info->attentions++;
669                 spin_unlock(&smi_info->count_lock);
670
671                 /* Got a attn, send down a get message flags to see
672                    what's causing it.  It would be better to handle
673                    this in the upper layer, but due to the way
674                    interrupts work with the SMI, that's not really
675                    possible. */
676                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
677                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
678
679                 smi_info->handlers->start_transaction(
680                         smi_info->si_sm, msg, 2);
681                 smi_info->si_state = SI_GETTING_FLAGS;
682                 goto restart;
683         }
684
685         /* If we are currently idle, try to start the next message. */
686         if (si_sm_result == SI_SM_IDLE) {
687                 spin_lock(&smi_info->count_lock);
688                 smi_info->idles++;
689                 spin_unlock(&smi_info->count_lock);
690
691                 si_sm_result = start_next_msg(smi_info);
692                 if (si_sm_result != SI_SM_IDLE)
693                         goto restart;
694         }
695
696         if ((si_sm_result == SI_SM_IDLE)
697             && (atomic_read(&smi_info->req_events)))
698         {
699                 /* We are idle and the upper layer requested that I fetch
700                    events, so do so. */
701                 unsigned char msg[2];
702
703                 spin_lock(&smi_info->count_lock);
704                 smi_info->flag_fetches++;
705                 spin_unlock(&smi_info->count_lock);
706
707                 atomic_set(&smi_info->req_events, 0);
708                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
709                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
710
711                 smi_info->handlers->start_transaction(
712                         smi_info->si_sm, msg, 2);
713                 smi_info->si_state = SI_GETTING_FLAGS;
714                 goto restart;
715         }
716
717         return si_sm_result;
718 }
719
720 static void sender(void                *send_info,
721                    struct ipmi_smi_msg *msg,
722                    int                 priority)
723 {
724         struct smi_info   *smi_info = send_info;
725         enum si_sm_result result;
726         unsigned long     flags;
727 #ifdef DEBUG_TIMING
728         struct timeval    t;
729 #endif
730
731         spin_lock_irqsave(&(smi_info->msg_lock), flags);
732 #ifdef DEBUG_TIMING
733         do_gettimeofday(&t);
734         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
735 #endif
736
737         if (smi_info->run_to_completion) {
738                 /* If we are running to completion, then throw it in
739                    the list and run transactions until everything is
740                    clear.  Priority doesn't matter here. */
741                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
742
743                 /* We have to release the msg lock and claim the smi
744                    lock in this case, because of race conditions. */
745                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
746
747                 spin_lock_irqsave(&(smi_info->si_lock), flags);
748                 result = smi_event_handler(smi_info, 0);
749                 while (result != SI_SM_IDLE) {
750                         udelay(SI_SHORT_TIMEOUT_USEC);
751                         result = smi_event_handler(smi_info,
752                                                    SI_SHORT_TIMEOUT_USEC);
753                 }
754                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
755                 return;
756         } else {
757                 if (priority > 0) {
758                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
759                 } else {
760                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
761                 }
762         }
763         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
764
765         spin_lock_irqsave(&(smi_info->si_lock), flags);
766         if ((smi_info->si_state == SI_NORMAL)
767             && (smi_info->curr_msg == NULL))
768         {
769                 start_next_msg(smi_info);
770                 si_restart_short_timer(smi_info);
771         }
772         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
773 }
774
775 static void set_run_to_completion(void *send_info, int i_run_to_completion)
776 {
777         struct smi_info   *smi_info = send_info;
778         enum si_sm_result result;
779         unsigned long     flags;
780
781         spin_lock_irqsave(&(smi_info->si_lock), flags);
782
783         smi_info->run_to_completion = i_run_to_completion;
784         if (i_run_to_completion) {
785                 result = smi_event_handler(smi_info, 0);
786                 while (result != SI_SM_IDLE) {
787                         udelay(SI_SHORT_TIMEOUT_USEC);
788                         result = smi_event_handler(smi_info,
789                                                    SI_SHORT_TIMEOUT_USEC);
790                 }
791         }
792
793         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
794 }
795
796 static int ipmi_thread(void *data)
797 {
798         struct smi_info *smi_info = data;
799         unsigned long flags;
800         enum si_sm_result smi_result;
801
802         set_user_nice(current, 19);
803         while (!kthread_should_stop()) {
804                 spin_lock_irqsave(&(smi_info->si_lock), flags);
805                 smi_result=smi_event_handler(smi_info, 0);
806                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
807                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
808                         /* do nothing */
809                 }
810                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
811                         udelay(1);
812                 else
813                         schedule_timeout_interruptible(1);
814         }
815         return 0;
816 }
817
818
819 static void poll(void *send_info)
820 {
821         struct smi_info *smi_info = send_info;
822
823         smi_event_handler(smi_info, 0);
824 }
825
826 static void request_events(void *send_info)
827 {
828         struct smi_info *smi_info = send_info;
829
830         atomic_set(&smi_info->req_events, 1);
831 }
832
833 static int initialized = 0;
834
835 /* Must be called with interrupts off and with the si_lock held. */
836 static void si_restart_short_timer(struct smi_info *smi_info)
837 {
838 #if defined(CONFIG_HIGH_RES_TIMERS)
839         unsigned long flags;
840         unsigned long jiffies_now;
841         unsigned long seq;
842
843         if (del_timer(&(smi_info->si_timer))) {
844                 /* If we don't delete the timer, then it will go off
845                    immediately, anyway.  So we only process if we
846                    actually delete the timer. */
847
848                 do {
849                         seq = read_seqbegin_irqsave(&xtime_lock, flags);
850                         jiffies_now = jiffies;
851                         smi_info->si_timer.expires = jiffies_now;
852                         smi_info->si_timer.arch_cycle_expires
853                                 = get_arch_cycles(jiffies_now);
854                 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
855
856                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
857
858                 add_timer(&(smi_info->si_timer));
859                 spin_lock_irqsave(&smi_info->count_lock, flags);
860                 smi_info->timeout_restarts++;
861                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
862         }
863 #endif
864 }
865
866 static void smi_timeout(unsigned long data)
867 {
868         struct smi_info   *smi_info = (struct smi_info *) data;
869         enum si_sm_result smi_result;
870         unsigned long     flags;
871         unsigned long     jiffies_now;
872         long              time_diff;
873 #ifdef DEBUG_TIMING
874         struct timeval    t;
875 #endif
876
877         if (atomic_read(&smi_info->stop_operation))
878                 return;
879
880         spin_lock_irqsave(&(smi_info->si_lock), flags);
881 #ifdef DEBUG_TIMING
882         do_gettimeofday(&t);
883         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
884 #endif
885         jiffies_now = jiffies;
886         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
887                      * SI_USEC_PER_JIFFY);
888         smi_result = smi_event_handler(smi_info, time_diff);
889
890         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
891
892         smi_info->last_timeout_jiffies = jiffies_now;
893
894         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
895                 /* Running with interrupts, only do long timeouts. */
896                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
897                 spin_lock_irqsave(&smi_info->count_lock, flags);
898                 smi_info->long_timeouts++;
899                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
900                 goto do_add_timer;
901         }
902
903         /* If the state machine asks for a short delay, then shorten
904            the timer timeout. */
905         if (smi_result == SI_SM_CALL_WITH_DELAY) {
906 #if defined(CONFIG_HIGH_RES_TIMERS)
907                 unsigned long seq;
908 #endif
909                 spin_lock_irqsave(&smi_info->count_lock, flags);
910                 smi_info->short_timeouts++;
911                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
912 #if defined(CONFIG_HIGH_RES_TIMERS)
913                 do {
914                         seq = read_seqbegin_irqsave(&xtime_lock, flags);
915                         smi_info->si_timer.expires = jiffies;
916                         smi_info->si_timer.arch_cycle_expires
917                                 = get_arch_cycles(smi_info->si_timer.expires);
918                 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
919                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
920 #else
921                 smi_info->si_timer.expires = jiffies + 1;
922 #endif
923         } else {
924                 spin_lock_irqsave(&smi_info->count_lock, flags);
925                 smi_info->long_timeouts++;
926                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
927                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
928 #if defined(CONFIG_HIGH_RES_TIMERS)
929                 smi_info->si_timer.arch_cycle_expires = 0;
930 #endif
931         }
932
933  do_add_timer:
934         add_timer(&(smi_info->si_timer));
935 }
936
937 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
938 {
939         struct smi_info *smi_info = data;
940         unsigned long   flags;
941 #ifdef DEBUG_TIMING
942         struct timeval  t;
943 #endif
944
945         spin_lock_irqsave(&(smi_info->si_lock), flags);
946
947         spin_lock(&smi_info->count_lock);
948         smi_info->interrupts++;
949         spin_unlock(&smi_info->count_lock);
950
951         if (atomic_read(&smi_info->stop_operation))
952                 goto out;
953
954 #ifdef DEBUG_TIMING
955         do_gettimeofday(&t);
956         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
957 #endif
958         smi_event_handler(smi_info, 0);
959  out:
960         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
961         return IRQ_HANDLED;
962 }
963
964 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
965 {
966         struct smi_info *smi_info = data;
967         /* We need to clear the IRQ flag for the BT interface. */
968         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
969                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
970                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
971         return si_irq_handler(irq, data, regs);
972 }
973
974
975 static struct ipmi_smi_handlers handlers =
976 {
977         .owner                  = THIS_MODULE,
978         .sender                 = sender,
979         .request_events         = request_events,
980         .set_run_to_completion  = set_run_to_completion,
981         .poll                   = poll,
982 };
983
984 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
985    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
986
987 #define SI_MAX_PARMS 4
988 static LIST_HEAD(smi_infos);
989 static DECLARE_MUTEX(smi_infos_lock);
990 static int smi_num; /* Used to sequence the SMIs */
991
992 #define DEFAULT_REGSPACING      1
993
994 static int           si_trydefaults = 1;
995 static char          *si_type[SI_MAX_PARMS];
996 #define MAX_SI_TYPE_STR 30
997 static char          si_type_str[MAX_SI_TYPE_STR];
998 static unsigned long addrs[SI_MAX_PARMS];
999 static int num_addrs;
1000 static unsigned int  ports[SI_MAX_PARMS];
1001 static int num_ports;
1002 static int           irqs[SI_MAX_PARMS];
1003 static int num_irqs;
1004 static int           regspacings[SI_MAX_PARMS];
1005 static int num_regspacings = 0;
1006 static int           regsizes[SI_MAX_PARMS];
1007 static int num_regsizes = 0;
1008 static int           regshifts[SI_MAX_PARMS];
1009 static int num_regshifts = 0;
1010 static int slave_addrs[SI_MAX_PARMS];
1011 static int num_slave_addrs = 0;
1012
1013
1014 module_param_named(trydefaults, si_trydefaults, bool, 0);
1015 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1016                  " default scan of the KCS and SMIC interface at the standard"
1017                  " address");
1018 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1019 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1020                  " interface separated by commas.  The types are 'kcs',"
1021                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1022                  " the first interface to kcs and the second to bt");
1023 module_param_array(addrs, long, &num_addrs, 0);
1024 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1025                  " addresses separated by commas.  Only use if an interface"
1026                  " is in memory.  Otherwise, set it to zero or leave"
1027                  " it blank.");
1028 module_param_array(ports, int, &num_ports, 0);
1029 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1030                  " addresses separated by commas.  Only use if an interface"
1031                  " is a port.  Otherwise, set it to zero or leave"
1032                  " it blank.");
1033 module_param_array(irqs, int, &num_irqs, 0);
1034 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1035                  " addresses separated by commas.  Only use if an interface"
1036                  " has an interrupt.  Otherwise, set it to zero or leave"
1037                  " it blank.");
1038 module_param_array(regspacings, int, &num_regspacings, 0);
1039 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1040                  " and each successive register used by the interface.  For"
1041                  " instance, if the start address is 0xca2 and the spacing"
1042                  " is 2, then the second address is at 0xca4.  Defaults"
1043                  " to 1.");
1044 module_param_array(regsizes, int, &num_regsizes, 0);
1045 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1046                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1047                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1048                  " the 8-bit IPMI register has to be read from a larger"
1049                  " register.");
1050 module_param_array(regshifts, int, &num_regshifts, 0);
1051 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1052                  " IPMI register, in bits.  For instance, if the data"
1053                  " is read from a 32-bit word and the IPMI data is in"
1054                  " bit 8-15, then the shift would be 8");
1055 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1056 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1057                  " the controller.  Normally this is 0x20, but can be"
1058                  " overridden by this parm.  This is an array indexed"
1059                  " by interface number.");
1060
1061
1062 #define IPMI_IO_ADDR_SPACE  0
1063 #define IPMI_MEM_ADDR_SPACE 1
1064 static char *addr_space_to_str[] = { "I/O", "memory" };
1065
1066 static void std_irq_cleanup(struct smi_info *info)
1067 {
1068         if (info->si_type == SI_BT)
1069                 /* Disable the interrupt in the BT interface. */
1070                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1071         free_irq(info->irq, info);
1072 }
1073
1074 static int std_irq_setup(struct smi_info *info)
1075 {
1076         int rv;
1077
1078         if (!info->irq)
1079                 return 0;
1080
1081         if (info->si_type == SI_BT) {
1082                 rv = request_irq(info->irq,
1083                                  si_bt_irq_handler,
1084                                  SA_INTERRUPT,
1085                                  DEVICE_NAME,
1086                                  info);
1087                 if (!rv)
1088                         /* Enable the interrupt in the BT interface. */
1089                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1090                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1091         } else
1092                 rv = request_irq(info->irq,
1093                                  si_irq_handler,
1094                                  SA_INTERRUPT,
1095                                  DEVICE_NAME,
1096                                  info);
1097         if (rv) {
1098                 printk(KERN_WARNING
1099                        "ipmi_si: %s unable to claim interrupt %d,"
1100                        " running polled\n",
1101                        DEVICE_NAME, info->irq);
1102                 info->irq = 0;
1103         } else {
1104                 info->irq_cleanup = std_irq_cleanup;
1105                 printk("  Using irq %d\n", info->irq);
1106         }
1107
1108         return rv;
1109 }
1110
1111 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1112 {
1113         unsigned int addr = io->addr_data;
1114
1115         return inb(addr + (offset * io->regspacing));
1116 }
1117
1118 static void port_outb(struct si_sm_io *io, unsigned int offset,
1119                       unsigned char b)
1120 {
1121         unsigned int addr = io->addr_data;
1122
1123         outb(b, addr + (offset * io->regspacing));
1124 }
1125
1126 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1127 {
1128         unsigned int addr = io->addr_data;
1129
1130         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1131 }
1132
1133 static void port_outw(struct si_sm_io *io, unsigned int offset,
1134                       unsigned char b)
1135 {
1136         unsigned int addr = io->addr_data;
1137
1138         outw(b << io->regshift, addr + (offset * io->regspacing));
1139 }
1140
1141 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1142 {
1143         unsigned int addr = io->addr_data;
1144
1145         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1146 }
1147
1148 static void port_outl(struct si_sm_io *io, unsigned int offset,
1149                       unsigned char b)
1150 {
1151         unsigned int addr = io->addr_data;
1152
1153         outl(b << io->regshift, addr+(offset * io->regspacing));
1154 }
1155
1156 static void port_cleanup(struct smi_info *info)
1157 {
1158         unsigned int addr = info->io.addr_data;
1159         int          mapsize;
1160
1161         if (addr) {
1162                 mapsize = ((info->io_size * info->io.regspacing)
1163                            - (info->io.regspacing - info->io.regsize));
1164
1165                 release_region (addr, mapsize);
1166         }
1167 }
1168
1169 static int port_setup(struct smi_info *info)
1170 {
1171         unsigned int addr = info->io.addr_data;
1172         int          mapsize;
1173
1174         if (!addr)
1175                 return -ENODEV;
1176
1177         info->io_cleanup = port_cleanup;
1178
1179         /* Figure out the actual inb/inw/inl/etc routine to use based
1180            upon the register size. */
1181         switch (info->io.regsize) {
1182         case 1:
1183                 info->io.inputb = port_inb;
1184                 info->io.outputb = port_outb;
1185                 break;
1186         case 2:
1187                 info->io.inputb = port_inw;
1188                 info->io.outputb = port_outw;
1189                 break;
1190         case 4:
1191                 info->io.inputb = port_inl;
1192                 info->io.outputb = port_outl;
1193                 break;
1194         default:
1195                 printk("ipmi_si: Invalid register size: %d\n",
1196                        info->io.regsize);
1197                 return -EINVAL;
1198         }
1199
1200         /* Calculate the total amount of memory to claim.  This is an
1201          * unusual looking calculation, but it avoids claiming any
1202          * more memory than it has to.  It will claim everything
1203          * between the first address to the end of the last full
1204          * register. */
1205         mapsize = ((info->io_size * info->io.regspacing)
1206                    - (info->io.regspacing - info->io.regsize));
1207
1208         if (request_region(addr, mapsize, DEVICE_NAME) == NULL)
1209                 return -EIO;
1210         return 0;
1211 }
1212
1213 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1214 {
1215         return readb((io->addr)+(offset * io->regspacing));
1216 }
1217
1218 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1219                      unsigned char b)
1220 {
1221         writeb(b, (io->addr)+(offset * io->regspacing));
1222 }
1223
1224 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1225 {
1226         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1227                 && 0xff;
1228 }
1229
1230 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1231                      unsigned char b)
1232 {
1233         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1234 }
1235
1236 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1237 {
1238         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1239                 && 0xff;
1240 }
1241
1242 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1243                      unsigned char b)
1244 {
1245         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1246 }
1247
1248 #ifdef readq
1249 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1250 {
1251         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1252                 && 0xff;
1253 }
1254
1255 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1256                      unsigned char b)
1257 {
1258         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1259 }
1260 #endif
1261
1262 static void mem_cleanup(struct smi_info *info)
1263 {
1264         unsigned long addr = info->io.addr_data;
1265         int           mapsize;
1266
1267         if (info->io.addr) {
1268                 iounmap(info->io.addr);
1269
1270                 mapsize = ((info->io_size * info->io.regspacing)
1271                            - (info->io.regspacing - info->io.regsize));
1272
1273                 release_mem_region(addr, mapsize);
1274         }
1275 }
1276
1277 static int mem_setup(struct smi_info *info)
1278 {
1279         unsigned long addr = info->io.addr_data;
1280         int           mapsize;
1281
1282         if (!addr)
1283                 return -ENODEV;
1284
1285         info->io_cleanup = mem_cleanup;
1286
1287         /* Figure out the actual readb/readw/readl/etc routine to use based
1288            upon the register size. */
1289         switch (info->io.regsize) {
1290         case 1:
1291                 info->io.inputb = intf_mem_inb;
1292                 info->io.outputb = intf_mem_outb;
1293                 break;
1294         case 2:
1295                 info->io.inputb = intf_mem_inw;
1296                 info->io.outputb = intf_mem_outw;
1297                 break;
1298         case 4:
1299                 info->io.inputb = intf_mem_inl;
1300                 info->io.outputb = intf_mem_outl;
1301                 break;
1302 #ifdef readq
1303         case 8:
1304                 info->io.inputb = mem_inq;
1305                 info->io.outputb = mem_outq;
1306                 break;
1307 #endif
1308         default:
1309                 printk("ipmi_si: Invalid register size: %d\n",
1310                        info->io.regsize);
1311                 return -EINVAL;
1312         }
1313
1314         /* Calculate the total amount of memory to claim.  This is an
1315          * unusual looking calculation, but it avoids claiming any
1316          * more memory than it has to.  It will claim everything
1317          * between the first address to the end of the last full
1318          * register. */
1319         mapsize = ((info->io_size * info->io.regspacing)
1320                    - (info->io.regspacing - info->io.regsize));
1321
1322         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1323                 return -EIO;
1324
1325         info->io.addr = ioremap(addr, mapsize);
1326         if (info->io.addr == NULL) {
1327                 release_mem_region(addr, mapsize);
1328                 return -EIO;
1329         }
1330         return 0;
1331 }
1332
1333
1334 static __devinit void hardcode_find_bmc(void)
1335 {
1336         int             i;
1337         struct smi_info *info;
1338
1339         for (i = 0; i < SI_MAX_PARMS; i++) {
1340                 if (!ports[i] && !addrs[i])
1341                         continue;
1342
1343                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1344                 if (!info)
1345                         return;
1346
1347                 info->addr_source = "hardcoded";
1348
1349                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1350                         info->si_type = SI_KCS;
1351                 } else if (strcmp(si_type[i], "smic") == 0) {
1352                         info->si_type = SI_SMIC;
1353                 } else if (strcmp(si_type[i], "bt") == 0) {
1354                         info->si_type = SI_BT;
1355                 } else {
1356                         printk(KERN_WARNING
1357                                "ipmi_si: Interface type specified "
1358                                "for interface %d, was invalid: %s\n",
1359                                i, si_type[i]);
1360                         kfree(info);
1361                         continue;
1362                 }
1363
1364                 if (ports[i]) {
1365                         /* An I/O port */
1366                         info->io_setup = port_setup;
1367                         info->io.addr_data = ports[i];
1368                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1369                 } else if (addrs[i]) {
1370                         /* A memory port */
1371                         info->io_setup = mem_setup;
1372                         info->io.addr_data = addrs[i];
1373                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1374                 } else {
1375                         printk(KERN_WARNING
1376                                "ipmi_si: Interface type specified "
1377                                "for interface %d, "
1378                                "but port and address were not set or "
1379                                "set to zero.\n", i);
1380                         kfree(info);
1381                         continue;
1382                 }
1383
1384                 info->io.addr = NULL;
1385                 info->io.regspacing = regspacings[i];
1386                 if (!info->io.regspacing)
1387                         info->io.regspacing = DEFAULT_REGSPACING;
1388                 info->io.regsize = regsizes[i];
1389                 if (!info->io.regsize)
1390                         info->io.regsize = DEFAULT_REGSPACING;
1391                 info->io.regshift = regshifts[i];
1392                 info->irq = irqs[i];
1393                 if (info->irq)
1394                         info->irq_setup = std_irq_setup;
1395
1396                 try_smi_init(info);
1397         }
1398 }
1399
1400 #ifdef CONFIG_ACPI
1401
1402 #include <linux/acpi.h>
1403
1404 /* Once we get an ACPI failure, we don't try any more, because we go
1405    through the tables sequentially.  Once we don't find a table, there
1406    are no more. */
1407 static int acpi_failure = 0;
1408
1409 /* For GPE-type interrupts. */
1410 static u32 ipmi_acpi_gpe(void *context)
1411 {
1412         struct smi_info *smi_info = context;
1413         unsigned long   flags;
1414 #ifdef DEBUG_TIMING
1415         struct timeval t;
1416 #endif
1417
1418         spin_lock_irqsave(&(smi_info->si_lock), flags);
1419
1420         spin_lock(&smi_info->count_lock);
1421         smi_info->interrupts++;
1422         spin_unlock(&smi_info->count_lock);
1423
1424         if (atomic_read(&smi_info->stop_operation))
1425                 goto out;
1426
1427 #ifdef DEBUG_TIMING
1428         do_gettimeofday(&t);
1429         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1430 #endif
1431         smi_event_handler(smi_info, 0);
1432  out:
1433         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1434
1435         return ACPI_INTERRUPT_HANDLED;
1436 }
1437
1438 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1439 {
1440         if (!info->irq)
1441                 return;
1442
1443         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1444 }
1445
1446 static int acpi_gpe_irq_setup(struct smi_info *info)
1447 {
1448         acpi_status status;
1449
1450         if (!info->irq)
1451                 return 0;
1452
1453         /* FIXME - is level triggered right? */
1454         status = acpi_install_gpe_handler(NULL,
1455                                           info->irq,
1456                                           ACPI_GPE_LEVEL_TRIGGERED,
1457                                           &ipmi_acpi_gpe,
1458                                           info);
1459         if (status != AE_OK) {
1460                 printk(KERN_WARNING
1461                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1462                        " running polled\n",
1463                        DEVICE_NAME, info->irq);
1464                 info->irq = 0;
1465                 return -EINVAL;
1466         } else {
1467                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1468                 printk("  Using ACPI GPE %d\n", info->irq);
1469                 return 0;
1470         }
1471 }
1472
1473 /*
1474  * Defined at
1475  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1476  */
1477 struct SPMITable {
1478         s8      Signature[4];
1479         u32     Length;
1480         u8      Revision;
1481         u8      Checksum;
1482         s8      OEMID[6];
1483         s8      OEMTableID[8];
1484         s8      OEMRevision[4];
1485         s8      CreatorID[4];
1486         s8      CreatorRevision[4];
1487         u8      InterfaceType;
1488         u8      IPMIlegacy;
1489         s16     SpecificationRevision;
1490
1491         /*
1492          * Bit 0 - SCI interrupt supported
1493          * Bit 1 - I/O APIC/SAPIC
1494          */
1495         u8      InterruptType;
1496
1497         /* If bit 0 of InterruptType is set, then this is the SCI
1498            interrupt in the GPEx_STS register. */
1499         u8      GPE;
1500
1501         s16     Reserved;
1502
1503         /* If bit 1 of InterruptType is set, then this is the I/O
1504            APIC/SAPIC interrupt. */
1505         u32     GlobalSystemInterrupt;
1506
1507         /* The actual register address. */
1508         struct acpi_generic_address addr;
1509
1510         u8      UID[4];
1511
1512         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1513 };
1514
1515 static __devinit int try_init_acpi(struct SPMITable *spmi)
1516 {
1517         struct smi_info  *info;
1518         char             *io_type;
1519         u8               addr_space;
1520
1521         if (spmi->IPMIlegacy != 1) {
1522             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1523             return -ENODEV;
1524         }
1525
1526         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1527                 addr_space = IPMI_MEM_ADDR_SPACE;
1528         else
1529                 addr_space = IPMI_IO_ADDR_SPACE;
1530
1531         info = kzalloc(sizeof(*info), GFP_KERNEL);
1532         if (!info) {
1533                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1534                 return -ENOMEM;
1535         }
1536
1537         info->addr_source = "ACPI";
1538
1539         /* Figure out the interface type. */
1540         switch (spmi->InterfaceType)
1541         {
1542         case 1: /* KCS */
1543                 info->si_type = SI_KCS;
1544                 break;
1545         case 2: /* SMIC */
1546                 info->si_type = SI_SMIC;
1547                 break;
1548         case 3: /* BT */
1549                 info->si_type = SI_BT;
1550                 break;
1551         default:
1552                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1553                         spmi->InterfaceType);
1554                 kfree(info);
1555                 return -EIO;
1556         }
1557
1558         if (spmi->InterruptType & 1) {
1559                 /* We've got a GPE interrupt. */
1560                 info->irq = spmi->GPE;
1561                 info->irq_setup = acpi_gpe_irq_setup;
1562         } else if (spmi->InterruptType & 2) {
1563                 /* We've got an APIC/SAPIC interrupt. */
1564                 info->irq = spmi->GlobalSystemInterrupt;
1565                 info->irq_setup = std_irq_setup;
1566         } else {
1567                 /* Use the default interrupt setting. */
1568                 info->irq = 0;
1569                 info->irq_setup = NULL;
1570         }
1571
1572         if (spmi->addr.register_bit_width) {
1573                 /* A (hopefully) properly formed register bit width. */
1574                 info->io.regspacing = spmi->addr.register_bit_width / 8;
1575         } else {
1576                 info->io.regspacing = DEFAULT_REGSPACING;
1577         }
1578         info->io.regsize = info->io.regspacing;
1579         info->io.regshift = spmi->addr.register_bit_offset;
1580
1581         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1582                 io_type = "memory";
1583                 info->io_setup = mem_setup;
1584                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1585         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1586                 io_type = "I/O";
1587                 info->io_setup = port_setup;
1588                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1589         } else {
1590                 kfree(info);
1591                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1592                 return -EIO;
1593         }
1594         info->io.addr_data = spmi->addr.address;
1595
1596         try_smi_init(info);
1597
1598         return 0;
1599 }
1600
1601 static __devinit void acpi_find_bmc(void)
1602 {
1603         acpi_status      status;
1604         struct SPMITable *spmi;
1605         int              i;
1606
1607         if (acpi_disabled)
1608                 return;
1609
1610         if (acpi_failure)
1611                 return;
1612
1613         for (i = 0; ; i++) {
1614                 status = acpi_get_firmware_table("SPMI", i+1,
1615                                                  ACPI_LOGICAL_ADDRESSING,
1616                                                  (struct acpi_table_header **)
1617                                                  &spmi);
1618                 if (status != AE_OK)
1619                         return;
1620
1621                 try_init_acpi(spmi);
1622         }
1623 }
1624 #endif
1625
1626 #ifdef CONFIG_DMI
1627 struct dmi_ipmi_data
1628 {
1629         u8              type;
1630         u8              addr_space;
1631         unsigned long   base_addr;
1632         u8              irq;
1633         u8              offset;
1634         u8              slave_addr;
1635 };
1636
1637 static int __devinit decode_dmi(struct dmi_header *dm,
1638                                 struct dmi_ipmi_data *dmi)
1639 {
1640         u8              *data = (u8 *)dm;
1641         unsigned long   base_addr;
1642         u8              reg_spacing;
1643         u8              len = dm->length;
1644
1645         dmi->type = data[4];
1646
1647         memcpy(&base_addr, data+8, sizeof(unsigned long));
1648         if (len >= 0x11) {
1649                 if (base_addr & 1) {
1650                         /* I/O */
1651                         base_addr &= 0xFFFE;
1652                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1653                 }
1654                 else {
1655                         /* Memory */
1656                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1657                 }
1658                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1659                    is odd. */
1660                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1661
1662                 dmi->irq = data[0x11];
1663
1664                 /* The top two bits of byte 0x10 hold the register spacing. */
1665                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1666                 switch(reg_spacing){
1667                 case 0x00: /* Byte boundaries */
1668                     dmi->offset = 1;
1669                     break;
1670                 case 0x01: /* 32-bit boundaries */
1671                     dmi->offset = 4;
1672                     break;
1673                 case 0x02: /* 16-byte boundaries */
1674                     dmi->offset = 16;
1675                     break;
1676                 default:
1677                     /* Some other interface, just ignore it. */
1678                     return -EIO;
1679                 }
1680         } else {
1681                 /* Old DMI spec. */
1682                 /* Note that technically, the lower bit of the base
1683                  * address should be 1 if the address is I/O and 0 if
1684                  * the address is in memory.  So many systems get that
1685                  * wrong (and all that I have seen are I/O) so we just
1686                  * ignore that bit and assume I/O.  Systems that use
1687                  * memory should use the newer spec, anyway. */
1688                 dmi->base_addr = base_addr & 0xfffe;
1689                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1690                 dmi->offset = 1;
1691         }
1692
1693         dmi->slave_addr = data[6];
1694
1695         return 0;
1696 }
1697
1698 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1699 {
1700         struct smi_info *info;
1701
1702         info = kzalloc(sizeof(*info), GFP_KERNEL);
1703         if (!info) {
1704                 printk(KERN_ERR
1705                        "ipmi_si: Could not allocate SI data\n");
1706                 return;
1707         }
1708
1709         info->addr_source = "SMBIOS";
1710
1711         switch (ipmi_data->type) {
1712         case 0x01: /* KCS */
1713                 info->si_type = SI_KCS;
1714                 break;
1715         case 0x02: /* SMIC */
1716                 info->si_type = SI_SMIC;
1717                 break;
1718         case 0x03: /* BT */
1719                 info->si_type = SI_BT;
1720                 break;
1721         default:
1722                 return;
1723         }
1724
1725         switch (ipmi_data->addr_space) {
1726         case IPMI_MEM_ADDR_SPACE:
1727                 info->io_setup = mem_setup;
1728                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1729                 break;
1730
1731         case IPMI_IO_ADDR_SPACE:
1732                 info->io_setup = port_setup;
1733                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1734                 break;
1735
1736         default:
1737                 kfree(info);
1738                 printk(KERN_WARNING
1739                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1740                        ipmi_data->addr_space);
1741                 return;
1742         }
1743         info->io.addr_data = ipmi_data->base_addr;
1744
1745         info->io.regspacing = ipmi_data->offset;
1746         if (!info->io.regspacing)
1747                 info->io.regspacing = DEFAULT_REGSPACING;
1748         info->io.regsize = DEFAULT_REGSPACING;
1749         info->io.regshift = 0;
1750
1751         info->slave_addr = ipmi_data->slave_addr;
1752
1753         info->irq = ipmi_data->irq;
1754         if (info->irq)
1755                 info->irq_setup = std_irq_setup;
1756
1757         try_smi_init(info);
1758 }
1759
1760 static void __devinit dmi_find_bmc(void)
1761 {
1762         struct dmi_device    *dev = NULL;
1763         struct dmi_ipmi_data data;
1764         int                  rv;
1765
1766         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1767                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1768                 if (!rv)
1769                         try_init_dmi(&data);
1770         }
1771 }
1772 #endif /* CONFIG_DMI */
1773
1774 #ifdef CONFIG_PCI
1775
1776 #define PCI_ERMC_CLASSCODE              0x0C0700
1777 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
1778 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
1779 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
1780 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
1781 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
1782
1783 #define PCI_HP_VENDOR_ID    0x103C
1784 #define PCI_MMC_DEVICE_ID   0x121A
1785 #define PCI_MMC_ADDR_CW     0x10
1786
1787 static void ipmi_pci_cleanup(struct smi_info *info)
1788 {
1789         struct pci_dev *pdev = info->addr_source_data;
1790
1791         pci_disable_device(pdev);
1792 }
1793
1794 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1795                                     const struct pci_device_id *ent)
1796 {
1797         int rv;
1798         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1799         struct smi_info *info;
1800         int first_reg_offset = 0;
1801
1802         info = kzalloc(sizeof(*info), GFP_KERNEL);
1803         if (!info)
1804                 return ENOMEM;
1805
1806         info->addr_source = "PCI";
1807
1808         switch (class_type) {
1809         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1810                 info->si_type = SI_SMIC;
1811                 break;
1812
1813         case PCI_ERMC_CLASSCODE_TYPE_KCS:
1814                 info->si_type = SI_KCS;
1815                 break;
1816
1817         case PCI_ERMC_CLASSCODE_TYPE_BT:
1818                 info->si_type = SI_BT;
1819                 break;
1820
1821         default:
1822                 kfree(info);
1823                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1824                        pci_name(pdev), class_type);
1825                 return ENOMEM;
1826         }
1827
1828         rv = pci_enable_device(pdev);
1829         if (rv) {
1830                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1831                        pci_name(pdev));
1832                 kfree(info);
1833                 return rv;
1834         }
1835
1836         info->addr_source_cleanup = ipmi_pci_cleanup;
1837         info->addr_source_data = pdev;
1838
1839         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1840                 first_reg_offset = 1;
1841
1842         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1843                 info->io_setup = port_setup;
1844                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1845         } else {
1846                 info->io_setup = mem_setup;
1847                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1848         }
1849         info->io.addr_data = pci_resource_start(pdev, 0);
1850
1851         info->io.regspacing = DEFAULT_REGSPACING;
1852         info->io.regsize = DEFAULT_REGSPACING;
1853         info->io.regshift = 0;
1854
1855         info->irq = pdev->irq;
1856         if (info->irq)
1857                 info->irq_setup = std_irq_setup;
1858
1859         info->dev = &pdev->dev;
1860
1861         return try_smi_init(info);
1862 }
1863
1864 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1865 {
1866 }
1867
1868 #ifdef CONFIG_PM
1869 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1870 {
1871         return 0;
1872 }
1873
1874 static int ipmi_pci_resume(struct pci_dev *pdev)
1875 {
1876         return 0;
1877 }
1878 #endif
1879
1880 static struct pci_device_id ipmi_pci_devices[] = {
1881         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1882         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
1883 };
1884 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1885
1886 static struct pci_driver ipmi_pci_driver = {
1887         .name =         DEVICE_NAME,
1888         .id_table =     ipmi_pci_devices,
1889         .probe =        ipmi_pci_probe,
1890         .remove =       __devexit_p(ipmi_pci_remove),
1891 #ifdef CONFIG_PM
1892         .suspend =      ipmi_pci_suspend,
1893         .resume =       ipmi_pci_resume,
1894 #endif
1895 };
1896 #endif /* CONFIG_PCI */
1897
1898
1899 static int try_get_dev_id(struct smi_info *smi_info)
1900 {
1901         unsigned char         msg[2];
1902         unsigned char         *resp;
1903         unsigned long         resp_len;
1904         enum si_sm_result     smi_result;
1905         int                   rv = 0;
1906
1907         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1908         if (!resp)
1909                 return -ENOMEM;
1910
1911         /* Do a Get Device ID command, since it comes back with some
1912            useful info. */
1913         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1914         msg[1] = IPMI_GET_DEVICE_ID_CMD;
1915         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1916
1917         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1918         for (;;)
1919         {
1920                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1921                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1922                         schedule_timeout_uninterruptible(1);
1923                         smi_result = smi_info->handlers->event(
1924                                 smi_info->si_sm, 100);
1925                 }
1926                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1927                 {
1928                         smi_result = smi_info->handlers->event(
1929                                 smi_info->si_sm, 0);
1930                 }
1931                 else
1932                         break;
1933         }
1934         if (smi_result == SI_SM_HOSED) {
1935                 /* We couldn't get the state machine to run, so whatever's at
1936                    the port is probably not an IPMI SMI interface. */
1937                 rv = -ENODEV;
1938                 goto out;
1939         }
1940
1941         /* Otherwise, we got some data. */
1942         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1943                                                   resp, IPMI_MAX_MSG_LENGTH);
1944         if (resp_len < 14) {
1945                 /* That's odd, it should be longer. */
1946                 rv = -EINVAL;
1947                 goto out;
1948         }
1949
1950         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1951                 /* That's odd, it shouldn't be able to fail. */
1952                 rv = -EINVAL;
1953                 goto out;
1954         }
1955
1956         /* Record info from the get device id, in case we need it. */
1957         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1958
1959  out:
1960         kfree(resp);
1961         return rv;
1962 }
1963
1964 static int type_file_read_proc(char *page, char **start, off_t off,
1965                                int count, int *eof, void *data)
1966 {
1967         char            *out = (char *) page;
1968         struct smi_info *smi = data;
1969
1970         switch (smi->si_type) {
1971             case SI_KCS:
1972                 return sprintf(out, "kcs\n");
1973             case SI_SMIC:
1974                 return sprintf(out, "smic\n");
1975             case SI_BT:
1976                 return sprintf(out, "bt\n");
1977             default:
1978                 return 0;
1979         }
1980 }
1981
1982 static int stat_file_read_proc(char *page, char **start, off_t off,
1983                                int count, int *eof, void *data)
1984 {
1985         char            *out = (char *) page;
1986         struct smi_info *smi = data;
1987
1988         out += sprintf(out, "interrupts_enabled:    %d\n",
1989                        smi->irq && !smi->interrupt_disabled);
1990         out += sprintf(out, "short_timeouts:        %ld\n",
1991                        smi->short_timeouts);
1992         out += sprintf(out, "long_timeouts:         %ld\n",
1993                        smi->long_timeouts);
1994         out += sprintf(out, "timeout_restarts:      %ld\n",
1995                        smi->timeout_restarts);
1996         out += sprintf(out, "idles:                 %ld\n",
1997                        smi->idles);
1998         out += sprintf(out, "interrupts:            %ld\n",
1999                        smi->interrupts);
2000         out += sprintf(out, "attentions:            %ld\n",
2001                        smi->attentions);
2002         out += sprintf(out, "flag_fetches:          %ld\n",
2003                        smi->flag_fetches);
2004         out += sprintf(out, "hosed_count:           %ld\n",
2005                        smi->hosed_count);
2006         out += sprintf(out, "complete_transactions: %ld\n",
2007                        smi->complete_transactions);
2008         out += sprintf(out, "events:                %ld\n",
2009                        smi->events);
2010         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2011                        smi->watchdog_pretimeouts);
2012         out += sprintf(out, "incoming_messages:     %ld\n",
2013                        smi->incoming_messages);
2014
2015         return (out - ((char *) page));
2016 }
2017
2018 /*
2019  * oem_data_avail_to_receive_msg_avail
2020  * @info - smi_info structure with msg_flags set
2021  *
2022  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2023  * Returns 1 indicating need to re-run handle_flags().
2024  */
2025 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2026 {
2027         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2028                                 RECEIVE_MSG_AVAIL);
2029         return 1;
2030 }
2031
2032 /*
2033  * setup_dell_poweredge_oem_data_handler
2034  * @info - smi_info.device_id must be populated
2035  *
2036  * Systems that match, but have firmware version < 1.40 may assert
2037  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2038  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2039  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2040  * as RECEIVE_MSG_AVAIL instead.
2041  *
2042  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2043  * assert the OEM[012] bits, and if it did, the driver would have to
2044  * change to handle that properly, we don't actually check for the
2045  * firmware version.
2046  * Device ID = 0x20                BMC on PowerEdge 8G servers
2047  * Device Revision = 0x80
2048  * Firmware Revision1 = 0x01       BMC version 1.40
2049  * Firmware Revision2 = 0x40       BCD encoded
2050  * IPMI Version = 0x51             IPMI 1.5
2051  * Manufacturer ID = A2 02 00      Dell IANA
2052  *
2053  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2054  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2055  *
2056  */
2057 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2058 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2059 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2060 #define DELL_IANA_MFR_ID 0x0002a2
2061 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2062 {
2063         struct ipmi_device_id *id = &smi_info->device_id;
2064         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2065                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2066                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2067                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2068                         smi_info->oem_data_avail_handler =
2069                                 oem_data_avail_to_receive_msg_avail;
2070                 }
2071                 else if (ipmi_version_major(id) < 1 ||
2072                          (ipmi_version_major(id) == 1 &&
2073                           ipmi_version_minor(id) < 5)) {
2074                         smi_info->oem_data_avail_handler =
2075                                 oem_data_avail_to_receive_msg_avail;
2076                 }
2077         }
2078 }
2079
2080 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2081 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2082 {
2083         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2084
2085         /* Make it a reponse */
2086         msg->rsp[0] = msg->data[0] | 4;
2087         msg->rsp[1] = msg->data[1];
2088         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2089         msg->rsp_size = 3;
2090         smi_info->curr_msg = NULL;
2091         deliver_recv_msg(smi_info, msg);
2092 }
2093
2094 /*
2095  * dell_poweredge_bt_xaction_handler
2096  * @info - smi_info.device_id must be populated
2097  *
2098  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2099  * not respond to a Get SDR command if the length of the data
2100  * requested is exactly 0x3A, which leads to command timeouts and no
2101  * data returned.  This intercepts such commands, and causes userspace
2102  * callers to try again with a different-sized buffer, which succeeds.
2103  */
2104
2105 #define STORAGE_NETFN 0x0A
2106 #define STORAGE_CMD_GET_SDR 0x23
2107 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2108                                              unsigned long unused,
2109                                              void *in)
2110 {
2111         struct smi_info *smi_info = in;
2112         unsigned char *data = smi_info->curr_msg->data;
2113         unsigned int size   = smi_info->curr_msg->data_size;
2114         if (size >= 8 &&
2115             (data[0]>>2) == STORAGE_NETFN &&
2116             data[1] == STORAGE_CMD_GET_SDR &&
2117             data[7] == 0x3A) {
2118                 return_hosed_msg_badsize(smi_info);
2119                 return NOTIFY_STOP;
2120         }
2121         return NOTIFY_DONE;
2122 }
2123
2124 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2125         .notifier_call  = dell_poweredge_bt_xaction_handler,
2126 };
2127
2128 /*
2129  * setup_dell_poweredge_bt_xaction_handler
2130  * @info - smi_info.device_id must be filled in already
2131  *
2132  * Fills in smi_info.device_id.start_transaction_pre_hook
2133  * when we know what function to use there.
2134  */
2135 static void
2136 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2137 {
2138         struct ipmi_device_id *id = &smi_info->device_id;
2139         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2140             smi_info->si_type == SI_BT)
2141                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2142 }
2143
2144 /*
2145  * setup_oem_data_handler
2146  * @info - smi_info.device_id must be filled in already
2147  *
2148  * Fills in smi_info.device_id.oem_data_available_handler
2149  * when we know what function to use there.
2150  */
2151
2152 static void setup_oem_data_handler(struct smi_info *smi_info)
2153 {
2154         setup_dell_poweredge_oem_data_handler(smi_info);
2155 }
2156
2157 static void setup_xaction_handlers(struct smi_info *smi_info)
2158 {
2159         setup_dell_poweredge_bt_xaction_handler(smi_info);
2160 }
2161
2162 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2163 {
2164         if (smi_info->thread != NULL && smi_info->thread != ERR_PTR(-ENOMEM))
2165                 kthread_stop(smi_info->thread);
2166         del_timer_sync(&smi_info->si_timer);
2167 }
2168
2169 static struct ipmi_default_vals
2170 {
2171         int type;
2172         int port;
2173 } __devinit ipmi_defaults[] =
2174 {
2175         { .type = SI_KCS, .port = 0xca2 },
2176         { .type = SI_SMIC, .port = 0xca9 },
2177         { .type = SI_BT, .port = 0xe4 },
2178         { .port = 0 }
2179 };
2180
2181 static __devinit void default_find_bmc(void)
2182 {
2183         struct smi_info *info;
2184         int             i;
2185
2186         for (i = 0; ; i++) {
2187                 if (!ipmi_defaults[i].port)
2188                         break;
2189
2190                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2191                 if (!info)
2192                         return;
2193
2194                 info->addr_source = NULL;
2195
2196                 info->si_type = ipmi_defaults[i].type;
2197                 info->io_setup = port_setup;
2198                 info->io.addr_data = ipmi_defaults[i].port;
2199                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2200
2201                 info->io.addr = NULL;
2202                 info->io.regspacing = DEFAULT_REGSPACING;
2203                 info->io.regsize = DEFAULT_REGSPACING;
2204                 info->io.regshift = 0;
2205
2206                 if (try_smi_init(info) == 0) {
2207                         /* Found one... */
2208                         printk(KERN_INFO "ipmi_si: Found default %s state"
2209                                " machine at %s address 0x%lx\n",
2210                                si_to_str[info->si_type],
2211                                addr_space_to_str[info->io.addr_type],
2212                                info->io.addr_data);
2213                         return;
2214                 }
2215         }
2216 }
2217
2218 static int is_new_interface(struct smi_info *info)
2219 {
2220         struct smi_info *e;
2221
2222         list_for_each_entry(e, &smi_infos, link) {
2223                 if (e->io.addr_type != info->io.addr_type)
2224                         continue;
2225                 if (e->io.addr_data == info->io.addr_data)
2226                         return 0;
2227         }
2228
2229         return 1;
2230 }
2231
2232 static int try_smi_init(struct smi_info *new_smi)
2233 {
2234         int rv;
2235
2236         if (new_smi->addr_source) {
2237                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2238                        " machine at %s address 0x%lx, slave address 0x%x,"
2239                        " irq %d\n",
2240                        new_smi->addr_source,
2241                        si_to_str[new_smi->si_type],
2242                        addr_space_to_str[new_smi->io.addr_type],
2243                        new_smi->io.addr_data,
2244                        new_smi->slave_addr, new_smi->irq);
2245         }
2246
2247         down(&smi_infos_lock);
2248         if (!is_new_interface(new_smi)) {
2249                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2250                 rv = -EBUSY;
2251                 goto out_err;
2252         }
2253
2254         /* So we know not to free it unless we have allocated one. */
2255         new_smi->intf = NULL;
2256         new_smi->si_sm = NULL;
2257         new_smi->handlers = NULL;
2258
2259         switch (new_smi->si_type) {
2260         case SI_KCS:
2261                 new_smi->handlers = &kcs_smi_handlers;
2262                 break;
2263
2264         case SI_SMIC:
2265                 new_smi->handlers = &smic_smi_handlers;
2266                 break;
2267
2268         case SI_BT:
2269                 new_smi->handlers = &bt_smi_handlers;
2270                 break;
2271
2272         default:
2273                 /* No support for anything else yet. */
2274                 rv = -EIO;
2275                 goto out_err;
2276         }
2277
2278         /* Allocate the state machine's data and initialize it. */
2279         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2280         if (!new_smi->si_sm) {
2281                 printk(" Could not allocate state machine memory\n");
2282                 rv = -ENOMEM;
2283                 goto out_err;
2284         }
2285         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2286                                                         &new_smi->io);
2287
2288         /* Now that we know the I/O size, we can set up the I/O. */
2289         rv = new_smi->io_setup(new_smi);
2290         if (rv) {
2291                 printk(" Could not set up I/O space\n");
2292                 goto out_err;
2293         }
2294
2295         spin_lock_init(&(new_smi->si_lock));
2296         spin_lock_init(&(new_smi->msg_lock));
2297         spin_lock_init(&(new_smi->count_lock));
2298
2299         /* Do low-level detection first. */
2300         if (new_smi->handlers->detect(new_smi->si_sm)) {
2301                 if (new_smi->addr_source)
2302                         printk(KERN_INFO "ipmi_si: Interface detection"
2303                                " failed\n");
2304                 rv = -ENODEV;
2305                 goto out_err;
2306         }
2307
2308         /* Attempt a get device id command.  If it fails, we probably
2309            don't have a BMC here. */
2310         rv = try_get_dev_id(new_smi);
2311         if (rv) {
2312                 if (new_smi->addr_source)
2313                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2314                                " at this location\n");
2315                 goto out_err;
2316         }
2317
2318         setup_oem_data_handler(new_smi);
2319         setup_xaction_handlers(new_smi);
2320
2321         /* Try to claim any interrupts. */
2322         if (new_smi->irq_setup)
2323                 new_smi->irq_setup(new_smi);
2324
2325         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2326         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2327         new_smi->curr_msg = NULL;
2328         atomic_set(&new_smi->req_events, 0);
2329         new_smi->run_to_completion = 0;
2330
2331         new_smi->interrupt_disabled = 0;
2332         atomic_set(&new_smi->stop_operation, 0);
2333         new_smi->intf_num = smi_num;
2334         smi_num++;
2335
2336         /* Start clearing the flags before we enable interrupts or the
2337            timer to avoid racing with the timer. */
2338         start_clear_flags(new_smi);
2339         /* IRQ is defined to be set when non-zero. */
2340         if (new_smi->irq)
2341                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2342
2343         /* The ipmi_register_smi() code does some operations to
2344            determine the channel information, so we must be ready to
2345            handle operations before it is called.  This means we have
2346            to stop the timer if we get an error after this point. */
2347         init_timer(&(new_smi->si_timer));
2348         new_smi->si_timer.data = (long) new_smi;
2349         new_smi->si_timer.function = smi_timeout;
2350         new_smi->last_timeout_jiffies = jiffies;
2351         new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2352
2353         add_timer(&(new_smi->si_timer));
2354         if (new_smi->si_type != SI_BT)
2355                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2356                                               "kipmi%d", new_smi->intf_num);
2357
2358         if (!new_smi->dev) {
2359                 /* If we don't already have a device from something
2360                  * else (like PCI), then register a new one. */
2361                 new_smi->pdev = platform_device_alloc("ipmi_si",
2362                                                       new_smi->intf_num);
2363                 if (rv) {
2364                         printk(KERN_ERR
2365                                "ipmi_si_intf:"
2366                                " Unable to allocate platform device\n");
2367                         goto out_err_stop_timer;
2368                 }
2369                 new_smi->dev = &new_smi->pdev->dev;
2370                 new_smi->dev->driver = &ipmi_driver;
2371
2372                 rv = platform_device_register(new_smi->pdev);
2373                 if (rv) {
2374                         printk(KERN_ERR
2375                                "ipmi_si_intf:"
2376                                " Unable to register system interface device:"
2377                                " %d\n",
2378                                rv);
2379                         goto out_err_stop_timer;
2380                 }
2381                 new_smi->dev_registered = 1;
2382         }
2383
2384         rv = ipmi_register_smi(&handlers,
2385                                new_smi,
2386                                &new_smi->device_id,
2387                                new_smi->dev,
2388                                new_smi->slave_addr,
2389                                &(new_smi->intf));
2390         if (rv) {
2391                 printk(KERN_ERR
2392                        "ipmi_si: Unable to register device: error %d\n",
2393                        rv);
2394                 goto out_err_stop_timer;
2395         }
2396
2397         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2398                                      type_file_read_proc, NULL,
2399                                      new_smi, THIS_MODULE);
2400         if (rv) {
2401                 printk(KERN_ERR
2402                        "ipmi_si: Unable to create proc entry: %d\n",
2403                        rv);
2404                 goto out_err_stop_timer;
2405         }
2406
2407         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2408                                      stat_file_read_proc, NULL,
2409                                      new_smi, THIS_MODULE);
2410         if (rv) {
2411                 printk(KERN_ERR
2412                        "ipmi_si: Unable to create proc entry: %d\n",
2413                        rv);
2414                 goto out_err_stop_timer;
2415         }
2416
2417         list_add_tail(&new_smi->link, &smi_infos);
2418
2419         up(&smi_infos_lock);
2420
2421         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2422
2423         return 0;
2424
2425  out_err_stop_timer:
2426         atomic_inc(&new_smi->stop_operation);
2427         wait_for_timer_and_thread(new_smi);
2428
2429  out_err:
2430         if (new_smi->intf)
2431                 ipmi_unregister_smi(new_smi->intf);
2432
2433         if (new_smi->irq_cleanup)
2434                 new_smi->irq_cleanup(new_smi);
2435
2436         /* Wait until we know that we are out of any interrupt
2437            handlers might have been running before we freed the
2438            interrupt. */
2439         synchronize_sched();
2440
2441         if (new_smi->si_sm) {
2442                 if (new_smi->handlers)
2443                         new_smi->handlers->cleanup(new_smi->si_sm);
2444                 kfree(new_smi->si_sm);
2445         }
2446         if (new_smi->addr_source_cleanup)
2447                 new_smi->addr_source_cleanup(new_smi);
2448         if (new_smi->io_cleanup)
2449                 new_smi->io_cleanup(new_smi);
2450
2451         if (new_smi->dev_registered)
2452                 platform_device_unregister(new_smi->pdev);
2453
2454         kfree(new_smi);
2455
2456         up(&smi_infos_lock);
2457
2458         return rv;
2459 }
2460
2461 static __devinit int init_ipmi_si(void)
2462 {
2463         int  i;
2464         char *str;
2465         int  rv;
2466
2467         if (initialized)
2468                 return 0;
2469         initialized = 1;
2470
2471         /* Register the device drivers. */
2472         rv = driver_register(&ipmi_driver);
2473         if (rv) {
2474                 printk(KERN_ERR
2475                        "init_ipmi_si: Unable to register driver: %d\n",
2476                        rv);
2477                 return rv;
2478         }
2479
2480
2481         /* Parse out the si_type string into its components. */
2482         str = si_type_str;
2483         if (*str != '\0') {
2484                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2485                         si_type[i] = str;
2486                         str = strchr(str, ',');
2487                         if (str) {
2488                                 *str = '\0';
2489                                 str++;
2490                         } else {
2491                                 break;
2492                         }
2493                 }
2494         }
2495
2496         printk(KERN_INFO "IPMI System Interface driver.\n");
2497
2498         hardcode_find_bmc();
2499
2500 #ifdef CONFIG_DMI
2501         dmi_find_bmc();
2502 #endif
2503
2504 #ifdef CONFIG_ACPI
2505         if (si_trydefaults)
2506                 acpi_find_bmc();
2507 #endif
2508
2509 #ifdef CONFIG_PCI
2510         pci_module_init(&ipmi_pci_driver);
2511 #endif
2512
2513         if (si_trydefaults) {
2514                 down(&smi_infos_lock);
2515                 if (list_empty(&smi_infos)) {
2516                         /* No BMC was found, try defaults. */
2517                         up(&smi_infos_lock);
2518                         default_find_bmc();
2519                 } else {
2520                         up(&smi_infos_lock);
2521                 }
2522         }
2523
2524         down(&smi_infos_lock);
2525         if (list_empty(&smi_infos)) {
2526                 up(&smi_infos_lock);
2527 #ifdef CONFIG_PCI
2528                 pci_unregister_driver(&ipmi_pci_driver);
2529 #endif
2530                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2531                 return -ENODEV;
2532         } else {
2533                 up(&smi_infos_lock);
2534                 return 0;
2535         }
2536 }
2537 module_init(init_ipmi_si);
2538
2539 static void __devexit cleanup_one_si(struct smi_info *to_clean)
2540 {
2541         int           rv;
2542         unsigned long flags;
2543
2544         if (!to_clean)
2545                 return;
2546
2547         list_del(&to_clean->link);
2548
2549         /* Tell the timer and interrupt handlers that we are shutting
2550            down. */
2551         spin_lock_irqsave(&(to_clean->si_lock), flags);
2552         spin_lock(&(to_clean->msg_lock));
2553
2554         atomic_inc(&to_clean->stop_operation);
2555
2556         if (to_clean->irq_cleanup)
2557                 to_clean->irq_cleanup(to_clean);
2558
2559         spin_unlock(&(to_clean->msg_lock));
2560         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2561
2562         /* Wait until we know that we are out of any interrupt
2563            handlers might have been running before we freed the
2564            interrupt. */
2565         synchronize_sched();
2566
2567         wait_for_timer_and_thread(to_clean);
2568
2569         /* Interrupts and timeouts are stopped, now make sure the
2570            interface is in a clean state. */
2571         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2572                 poll(to_clean);
2573                 schedule_timeout_uninterruptible(1);
2574         }
2575
2576         rv = ipmi_unregister_smi(to_clean->intf);
2577         if (rv) {
2578                 printk(KERN_ERR
2579                        "ipmi_si: Unable to unregister device: errno=%d\n",
2580                        rv);
2581         }
2582
2583         to_clean->handlers->cleanup(to_clean->si_sm);
2584
2585         kfree(to_clean->si_sm);
2586
2587         if (to_clean->addr_source_cleanup)
2588                 to_clean->addr_source_cleanup(to_clean);
2589         if (to_clean->io_cleanup)
2590                 to_clean->io_cleanup(to_clean);
2591
2592         if (to_clean->dev_registered)
2593                 platform_device_unregister(to_clean->pdev);
2594
2595         kfree(to_clean);
2596 }
2597
2598 static __exit void cleanup_ipmi_si(void)
2599 {
2600         struct smi_info *e, *tmp_e;
2601
2602         if (!initialized)
2603                 return;
2604
2605 #ifdef CONFIG_PCI
2606         pci_unregister_driver(&ipmi_pci_driver);
2607 #endif
2608
2609         down(&smi_infos_lock);
2610         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2611                 cleanup_one_si(e);
2612         up(&smi_infos_lock);
2613
2614         driver_unregister(&ipmi_driver);
2615 }
2616 module_exit(cleanup_ipmi_si);
2617
2618 MODULE_LICENSE("GPL");
2619 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2620 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");