IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
[linux-2.6.git] / kernel / irq / chip.c
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17
18 #include "internals.h"
19
20 /**
21  *      dynamic_irq_init - initialize a dynamically allocated irq
22  *      @irq:   irq number to initialize
23  */
24 void dynamic_irq_init(unsigned int irq)
25 {
26         struct irq_desc *desc;
27         unsigned long flags;
28
29         if (irq >= NR_IRQS) {
30                 printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
31                 WARN_ON(1);
32                 return;
33         }
34
35         /* Ensure we don't have left over values from a previous use of this irq */
36         desc = irq_desc + irq;
37         spin_lock_irqsave(&desc->lock, flags);
38         desc->status = IRQ_DISABLED;
39         desc->chip = &no_irq_chip;
40         desc->handle_irq = handle_bad_irq;
41         desc->depth = 1;
42         desc->handler_data = NULL;
43         desc->chip_data = NULL;
44         desc->action = NULL;
45         desc->irq_count = 0;
46         desc->irqs_unhandled = 0;
47 #ifdef CONFIG_SMP
48         desc->affinity = CPU_MASK_ALL;
49 #endif
50         spin_unlock_irqrestore(&desc->lock, flags);
51 }
52
53 /**
54  *      dynamic_irq_cleanup - cleanup a dynamically allocated irq
55  *      @irq:   irq number to initialize
56  */
57 void dynamic_irq_cleanup(unsigned int irq)
58 {
59         struct irq_desc *desc;
60         unsigned long flags;
61
62         if (irq >= NR_IRQS) {
63                 printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
64                 WARN_ON(1);
65                 return;
66         }
67
68         desc = irq_desc + irq;
69         spin_lock_irqsave(&desc->lock, flags);
70         if (desc->action) {
71                 spin_unlock_irqrestore(&desc->lock, flags);
72                 printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n",
73                         irq);
74                 WARN_ON(1);
75                 return;
76         }
77         desc->handle_irq = handle_bad_irq;
78         desc->chip = &no_irq_chip;
79         spin_unlock_irqrestore(&desc->lock, flags);
80 }
81
82
83 /**
84  *      set_irq_chip - set the irq chip for an irq
85  *      @irq:   irq number
86  *      @chip:  pointer to irq chip description structure
87  */
88 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
89 {
90         struct irq_desc *desc;
91         unsigned long flags;
92
93         if (irq >= NR_IRQS) {
94                 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
95                 WARN_ON(1);
96                 return -EINVAL;
97         }
98
99         if (!chip)
100                 chip = &no_irq_chip;
101
102         desc = irq_desc + irq;
103         spin_lock_irqsave(&desc->lock, flags);
104         irq_chip_set_defaults(chip);
105         desc->chip = chip;
106         spin_unlock_irqrestore(&desc->lock, flags);
107
108         return 0;
109 }
110 EXPORT_SYMBOL(set_irq_chip);
111
112 /**
113  *      set_irq_type - set the irq type for an irq
114  *      @irq:   irq number
115  *      @type:  interrupt type - see include/linux/interrupt.h
116  */
117 int set_irq_type(unsigned int irq, unsigned int type)
118 {
119         struct irq_desc *desc;
120         unsigned long flags;
121         int ret = -ENXIO;
122
123         if (irq >= NR_IRQS) {
124                 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
125                 return -ENODEV;
126         }
127
128         desc = irq_desc + irq;
129         if (desc->chip->set_type) {
130                 spin_lock_irqsave(&desc->lock, flags);
131                 ret = desc->chip->set_type(irq, type);
132                 spin_unlock_irqrestore(&desc->lock, flags);
133         }
134         return ret;
135 }
136 EXPORT_SYMBOL(set_irq_type);
137
138 /**
139  *      set_irq_data - set irq type data for an irq
140  *      @irq:   Interrupt number
141  *      @data:  Pointer to interrupt specific data
142  *
143  *      Set the hardware irq controller data for an irq
144  */
145 int set_irq_data(unsigned int irq, void *data)
146 {
147         struct irq_desc *desc;
148         unsigned long flags;
149
150         if (irq >= NR_IRQS) {
151                 printk(KERN_ERR
152                        "Trying to install controller data for IRQ%d\n", irq);
153                 return -EINVAL;
154         }
155
156         desc = irq_desc + irq;
157         spin_lock_irqsave(&desc->lock, flags);
158         desc->handler_data = data;
159         spin_unlock_irqrestore(&desc->lock, flags);
160         return 0;
161 }
162 EXPORT_SYMBOL(set_irq_data);
163
164 /**
165  *      set_irq_chip_data - set irq chip data for an irq
166  *      @irq:   Interrupt number
167  *      @data:  Pointer to chip specific data
168  *
169  *      Set the hardware irq chip data for an irq
170  */
171 int set_irq_chip_data(unsigned int irq, void *data)
172 {
173         struct irq_desc *desc = irq_desc + irq;
174         unsigned long flags;
175
176         if (irq >= NR_IRQS || !desc->chip) {
177                 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
178                 return -EINVAL;
179         }
180
181         spin_lock_irqsave(&desc->lock, flags);
182         desc->chip_data = data;
183         spin_unlock_irqrestore(&desc->lock, flags);
184
185         return 0;
186 }
187 EXPORT_SYMBOL(set_irq_chip_data);
188
189 /*
190  * default enable function
191  */
192 static void default_enable(unsigned int irq)
193 {
194         struct irq_desc *desc = irq_desc + irq;
195
196         desc->chip->unmask(irq);
197         desc->status &= ~IRQ_MASKED;
198 }
199
200 /*
201  * default disable function
202  */
203 static void default_disable(unsigned int irq)
204 {
205         struct irq_desc *desc = irq_desc + irq;
206
207         if (!(desc->status & IRQ_DELAYED_DISABLE))
208                 desc->chip->mask(irq);
209 }
210
211 /*
212  * default startup function
213  */
214 static unsigned int default_startup(unsigned int irq)
215 {
216         irq_desc[irq].chip->enable(irq);
217
218         return 0;
219 }
220
221 /*
222  * Fixup enable/disable function pointers
223  */
224 void irq_chip_set_defaults(struct irq_chip *chip)
225 {
226         if (!chip->enable)
227                 chip->enable = default_enable;
228         if (!chip->disable)
229                 chip->disable = default_disable;
230         if (!chip->startup)
231                 chip->startup = default_startup;
232         if (!chip->shutdown)
233                 chip->shutdown = chip->disable;
234         if (!chip->name)
235                 chip->name = chip->typename;
236 }
237
238 static inline void mask_ack_irq(struct irq_desc *desc, int irq)
239 {
240         if (desc->chip->mask_ack)
241                 desc->chip->mask_ack(irq);
242         else {
243                 desc->chip->mask(irq);
244                 desc->chip->ack(irq);
245         }
246 }
247
248 /**
249  *      handle_simple_irq - Simple and software-decoded IRQs.
250  *      @irq:   the interrupt number
251  *      @desc:  the interrupt description structure for this irq
252  *
253  *      Simple interrupts are either sent from a demultiplexing interrupt
254  *      handler or come from hardware, where no interrupt hardware control
255  *      is necessary.
256  *
257  *      Note: The caller is expected to handle the ack, clear, mask and
258  *      unmask issues if necessary.
259  */
260 void fastcall
261 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
262 {
263         struct irqaction *action;
264         irqreturn_t action_ret;
265         const unsigned int cpu = smp_processor_id();
266
267         spin_lock(&desc->lock);
268
269         if (unlikely(desc->status & IRQ_INPROGRESS))
270                 goto out_unlock;
271         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
272         kstat_cpu(cpu).irqs[irq]++;
273
274         action = desc->action;
275         if (unlikely(!action || (desc->status & IRQ_DISABLED)))
276                 goto out_unlock;
277
278         desc->status |= IRQ_INPROGRESS;
279         spin_unlock(&desc->lock);
280
281         action_ret = handle_IRQ_event(irq, action);
282         if (!noirqdebug)
283                 note_interrupt(irq, desc, action_ret);
284
285         spin_lock(&desc->lock);
286         desc->status &= ~IRQ_INPROGRESS;
287 out_unlock:
288         spin_unlock(&desc->lock);
289 }
290
291 /**
292  *      handle_level_irq - Level type irq handler
293  *      @irq:   the interrupt number
294  *      @desc:  the interrupt description structure for this irq
295  *
296  *      Level type interrupts are active as long as the hardware line has
297  *      the active level. This may require to mask the interrupt and unmask
298  *      it after the associated handler has acknowledged the device, so the
299  *      interrupt line is back to inactive.
300  */
301 void fastcall
302 handle_level_irq(unsigned int irq, struct irq_desc *desc)
303 {
304         unsigned int cpu = smp_processor_id();
305         struct irqaction *action;
306         irqreturn_t action_ret;
307
308         spin_lock(&desc->lock);
309         mask_ack_irq(desc, irq);
310
311         if (unlikely(desc->status & IRQ_INPROGRESS))
312                 goto out_unlock;
313         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
314         kstat_cpu(cpu).irqs[irq]++;
315
316         /*
317          * If its disabled or no action available
318          * keep it masked and get out of here
319          */
320         action = desc->action;
321         if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
322                 desc->status |= IRQ_PENDING;
323                 goto out_unlock;
324         }
325
326         desc->status |= IRQ_INPROGRESS;
327         desc->status &= ~IRQ_PENDING;
328         spin_unlock(&desc->lock);
329
330         action_ret = handle_IRQ_event(irq, action);
331         if (!noirqdebug)
332                 note_interrupt(irq, desc, action_ret);
333
334         spin_lock(&desc->lock);
335         desc->status &= ~IRQ_INPROGRESS;
336         if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
337                 desc->chip->unmask(irq);
338 out_unlock:
339         spin_unlock(&desc->lock);
340 }
341
342 /**
343  *      handle_fasteoi_irq - irq handler for transparent controllers
344  *      @irq:   the interrupt number
345  *      @desc:  the interrupt description structure for this irq
346  *
347  *      Only a single callback will be issued to the chip: an ->eoi()
348  *      call when the interrupt has been serviced. This enables support
349  *      for modern forms of interrupt handlers, which handle the flow
350  *      details in hardware, transparently.
351  */
352 void fastcall
353 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
354 {
355         unsigned int cpu = smp_processor_id();
356         struct irqaction *action;
357         irqreturn_t action_ret;
358
359         spin_lock(&desc->lock);
360
361         if (unlikely(desc->status & IRQ_INPROGRESS))
362                 goto out;
363
364         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
365         kstat_cpu(cpu).irqs[irq]++;
366
367         /*
368          * If its disabled or no action available
369          * keep it masked and get out of here
370          */
371         action = desc->action;
372         if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
373                 desc->status |= IRQ_PENDING;
374                 goto out;
375         }
376
377         desc->status |= IRQ_INPROGRESS;
378         desc->status &= ~IRQ_PENDING;
379         spin_unlock(&desc->lock);
380
381         action_ret = handle_IRQ_event(irq, action);
382         if (!noirqdebug)
383                 note_interrupt(irq, desc, action_ret);
384
385         spin_lock(&desc->lock);
386         desc->status &= ~IRQ_INPROGRESS;
387 out:
388         desc->chip->eoi(irq);
389
390         spin_unlock(&desc->lock);
391 }
392
393 /**
394  *      handle_edge_irq - edge type IRQ handler
395  *      @irq:   the interrupt number
396  *      @desc:  the interrupt description structure for this irq
397  *
398  *      Interrupt occures on the falling and/or rising edge of a hardware
399  *      signal. The occurence is latched into the irq controller hardware
400  *      and must be acked in order to be reenabled. After the ack another
401  *      interrupt can happen on the same source even before the first one
402  *      is handled by the assosiacted event handler. If this happens it
403  *      might be necessary to disable (mask) the interrupt depending on the
404  *      controller hardware. This requires to reenable the interrupt inside
405  *      of the loop which handles the interrupts which have arrived while
406  *      the handler was running. If all pending interrupts are handled, the
407  *      loop is left.
408  */
409 void fastcall
410 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
411 {
412         const unsigned int cpu = smp_processor_id();
413
414         spin_lock(&desc->lock);
415
416         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
417
418         /*
419          * If we're currently running this IRQ, or its disabled,
420          * we shouldn't process the IRQ. Mark it pending, handle
421          * the necessary masking and go out
422          */
423         if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
424                     !desc->action)) {
425                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
426                 mask_ack_irq(desc, irq);
427                 goto out_unlock;
428         }
429
430         kstat_cpu(cpu).irqs[irq]++;
431
432         /* Start handling the irq */
433         desc->chip->ack(irq);
434
435         /* Mark the IRQ currently in progress.*/
436         desc->status |= IRQ_INPROGRESS;
437
438         do {
439                 struct irqaction *action = desc->action;
440                 irqreturn_t action_ret;
441
442                 if (unlikely(!action)) {
443                         desc->chip->mask(irq);
444                         goto out_unlock;
445                 }
446
447                 /*
448                  * When another irq arrived while we were handling
449                  * one, we could have masked the irq.
450                  * Renable it, if it was not disabled in meantime.
451                  */
452                 if (unlikely((desc->status &
453                                (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
454                               (IRQ_PENDING | IRQ_MASKED))) {
455                         desc->chip->unmask(irq);
456                         desc->status &= ~IRQ_MASKED;
457                 }
458
459                 desc->status &= ~IRQ_PENDING;
460                 spin_unlock(&desc->lock);
461                 action_ret = handle_IRQ_event(irq, action);
462                 if (!noirqdebug)
463                         note_interrupt(irq, desc, action_ret);
464                 spin_lock(&desc->lock);
465
466         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
467
468         desc->status &= ~IRQ_INPROGRESS;
469 out_unlock:
470         spin_unlock(&desc->lock);
471 }
472
473 #ifdef CONFIG_SMP
474 /**
475  *      handle_percpu_IRQ - Per CPU local irq handler
476  *      @irq:   the interrupt number
477  *      @desc:  the interrupt description structure for this irq
478  *
479  *      Per CPU interrupts on SMP machines without locking requirements
480  */
481 void fastcall
482 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
483 {
484         irqreturn_t action_ret;
485
486         kstat_this_cpu.irqs[irq]++;
487
488         if (desc->chip->ack)
489                 desc->chip->ack(irq);
490
491         action_ret = handle_IRQ_event(irq, desc->action);
492         if (!noirqdebug)
493                 note_interrupt(irq, desc, action_ret);
494
495         if (desc->chip->eoi)
496                 desc->chip->eoi(irq);
497 }
498
499 #endif /* CONFIG_SMP */
500
501 void
502 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained)
503 {
504         struct irq_desc *desc;
505         unsigned long flags;
506
507         if (irq >= NR_IRQS) {
508                 printk(KERN_ERR
509                        "Trying to install type control for IRQ%d\n", irq);
510                 return;
511         }
512
513         desc = irq_desc + irq;
514
515         if (!handle)
516                 handle = handle_bad_irq;
517
518         if (desc->chip == &no_irq_chip) {
519                 printk(KERN_WARNING "Trying to install %sinterrupt handler "
520                        "for IRQ%d\n", is_chained ? "chained " : " ", irq);
521                 /*
522                  * Some ARM implementations install a handler for really dumb
523                  * interrupt hardware without setting an irq_chip. This worked
524                  * with the ARM no_irq_chip but the check in setup_irq would
525                  * prevent us to setup the interrupt at all. Switch it to
526                  * dummy_irq_chip for easy transition.
527                  */
528                 desc->chip = &dummy_irq_chip;
529         }
530
531         spin_lock_irqsave(&desc->lock, flags);
532
533         /* Uninstall? */
534         if (handle == handle_bad_irq) {
535                 if (desc->chip != &no_irq_chip) {
536                         desc->chip->mask(irq);
537                         desc->chip->ack(irq);
538                 }
539                 desc->status |= IRQ_DISABLED;
540                 desc->depth = 1;
541         }
542         desc->handle_irq = handle;
543
544         if (handle != handle_bad_irq && is_chained) {
545                 desc->status &= ~IRQ_DISABLED;
546                 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
547                 desc->depth = 0;
548                 desc->chip->unmask(irq);
549         }
550         spin_unlock_irqrestore(&desc->lock, flags);
551 }
552
553 void
554 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
555                          irq_flow_handler_t handle)
556 {
557         set_irq_chip(irq, chip);
558         __set_irq_handler(irq, handle, 0);
559 }
560
561 /*
562  * Get a descriptive string for the highlevel handler, for
563  * /proc/interrupts output:
564  */
565 const char *
566 handle_irq_name(irq_flow_handler_t handle)
567 {
568         if (handle == handle_level_irq)
569                 return "level  ";
570         if (handle == handle_fasteoi_irq)
571                 return "fasteoi";
572         if (handle == handle_edge_irq)
573                 return "edge   ";
574         if (handle == handle_simple_irq)
575                 return "simple ";
576 #ifdef CONFIG_SMP
577         if (handle == handle_percpu_irq)
578                 return "percpu ";
579 #endif
580         if (handle == handle_bad_irq)
581                 return "bad    ";
582
583         return NULL;
584 }