blob: b39f32ac8f80a9a1f39ded9e1e47d8215e88412f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/irq/handle.c
3 *
Ingo Molnara34db9b2006-06-29 02:24:50 -07004 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file contains the core interrupt handling code.
Ingo Molnara34db9b2006-06-29 02:24:50 -07008 *
9 * Detailed information is available in Documentation/DocBook/genericirq
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13#include <linux/irq.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080018#include <linux/rculist.h>
19#include <linux/hash.h>
Mike Travis0fa0ebb2009-01-10 22:24:06 -080020#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include "internals.h"
23
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080024/*
25 * lockdep: we want to handle all irq_desc locks as a single lock-class:
26 */
Yinghai Lu48a1b102008-12-11 00:15:01 -080027struct lock_class_key irq_desc_lock_class;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080028
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070029/**
30 * handle_bad_irq - handle spurious and unhandled irqs
Henrik Kretzschmar43a1dd52006-08-31 21:27:44 -070031 * @irq: the interrupt number
32 * @desc: description of the interrupt
Henrik Kretzschmar43a1dd52006-08-31 21:27:44 -070033 *
34 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070035 */
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020036void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070037{
Ingo Molnar43f77752006-06-29 02:24:58 -070038 print_irq_desc(irq, desc);
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020039 kstat_incr_irqs_this_cpu(irq, desc);
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070040 ack_bad_irq(irq);
41}
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043/*
44 * Linux has a controller-independent interrupt architecture.
45 * Every controller has a 'controller-template', that is used
46 * by the main code to do the right thing. Each driver-visible
Ingo Molnar06fcb0c2006-06-29 02:24:40 -070047 * interrupt source is transparently wired to the appropriate
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * controller. Thus drivers need not be aware of the
49 * interrupt-controller.
50 *
51 * The code is designed to be easily extended with new/different
52 * interrupt controllers, without having to do assembly magic or
53 * having to touch the generic code.
54 *
55 * Controller mappings for all interrupt sources:
56 */
Yinghai Lu85c0f902008-08-19 20:49:47 -070057int nr_irqs = NR_IRQS;
Ingo Molnarfa42d102008-08-19 20:50:30 -070058EXPORT_SYMBOL_GPL(nr_irqs);
Yinghai Lud60458b2008-08-19 20:50:00 -070059
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080060#ifdef CONFIG_SPARSE_IRQ
61static struct irq_desc irq_desc_init = {
62 .irq = -1,
63 .status = IRQ_DISABLED,
64 .chip = &no_irq_chip,
65 .handle_irq = handle_bad_irq,
66 .depth = 1,
67 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080068};
69
Yinghai Lu48a1b102008-12-11 00:15:01 -080070void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080071{
72 unsigned long bytes;
73 char *ptr;
74 int node;
75
76 /* Compute how many bytes we need per irq and allocate them */
77 bytes = nr * sizeof(unsigned int);
78
79 node = cpu_to_node(cpu);
80 ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
81 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
82
83 if (ptr)
84 desc->kstat_irqs = (unsigned int *)ptr;
85}
86
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080087static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
88{
89 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
Ingo Molnar793f7b12008-12-26 19:02:20 +010090
91 spin_lock_init(&desc->lock);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080092 desc->irq = irq;
93#ifdef CONFIG_SMP
94 desc->cpu = cpu;
95#endif
96 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
97 init_kstat_irqs(desc, cpu, nr_cpu_ids);
98 if (!desc->kstat_irqs) {
99 printk(KERN_ERR "can not alloc kstat_irqs\n");
100 BUG_ON(1);
101 }
Mike Travis802bf932009-01-10 21:58:09 -0800102 if (!init_alloc_desc_masks(desc, cpu, false)) {
Mike Travis7f7ace02009-01-10 21:58:08 -0800103 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
104 BUG_ON(1);
105 }
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800106 arch_init_chip_data(desc, cpu);
107}
108
109/*
110 * Protect the sparse_irqs:
111 */
Yinghai Lu48a1b102008-12-11 00:15:01 -0800112DEFINE_SPINLOCK(sparse_irq_lock);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800113
Mike Travis0fa0ebb2009-01-10 22:24:06 -0800114struct irq_desc **irq_desc_ptrs __read_mostly;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800115
Yinghai Lu99d093d2008-12-05 18:58:32 -0800116static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
117 [0 ... NR_IRQS_LEGACY-1] = {
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800118 .irq = -1,
119 .status = IRQ_DISABLED,
120 .chip = &no_irq_chip,
121 .handle_irq = handle_bad_irq,
122 .depth = 1,
123 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800124 }
125};
126
Mike Travis542d8652009-01-10 22:24:07 -0800127static unsigned int *kstat_irqs_legacy;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800128
Yinghai Lu13a0c3c2008-12-26 02:05:47 -0800129int __init early_irq_init(void)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800130{
131 struct irq_desc *desc;
132 int legacy_count;
133 int i;
134
Mike Travis9332fcc2009-01-10 22:24:07 -0800135 /* initialize nr_irqs based on nr_cpu_ids */
136 nr_irqs = max_nr_irqs(nr_cpu_ids);
137
Mike Travis95949492009-01-10 22:24:06 -0800138 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
139
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800140 desc = irq_desc_legacy;
141 legacy_count = ARRAY_SIZE(irq_desc_legacy);
142
Mike Travis0fa0ebb2009-01-10 22:24:06 -0800143 /* allocate irq_desc_ptrs array based on nr_irqs */
144 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
145
Mike Travis542d8652009-01-10 22:24:07 -0800146 /* allocate based on nr_cpu_ids */
147 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
148 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
149 sizeof(int));
150
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800151 for (i = 0; i < legacy_count; i++) {
152 desc[i].irq = i;
Mike Travis542d8652009-01-10 22:24:07 -0800153 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
Yinghai Lufa6beb32008-12-22 20:24:09 -0800154 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
Mike Travis7f7ace02009-01-10 21:58:08 -0800155 init_alloc_desc_masks(&desc[i], 0, true);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800156 irq_desc_ptrs[i] = desc + i;
157 }
158
Mike Travis95949492009-01-10 22:24:06 -0800159 for (i = legacy_count; i < nr_irqs; i++)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800160 irq_desc_ptrs[i] = NULL;
161
Yinghai Lu13a0c3c2008-12-26 02:05:47 -0800162 return arch_early_irq_init();
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800163}
164
165struct irq_desc *irq_to_desc(unsigned int irq)
166{
Mike Travis0fa0ebb2009-01-10 22:24:06 -0800167 if (irq_desc_ptrs && irq < nr_irqs)
168 return irq_desc_ptrs[irq];
169
170 return NULL;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800171}
172
173struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
174{
175 struct irq_desc *desc;
176 unsigned long flags;
177 int node;
178
Mike Travis95949492009-01-10 22:24:06 -0800179 if (irq >= nr_irqs) {
Mike Travise2f4d062009-01-10 22:24:06 -0800180 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
181 irq, nr_irqs);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800182 return NULL;
183 }
184
185 desc = irq_desc_ptrs[irq];
186 if (desc)
187 return desc;
188
189 spin_lock_irqsave(&sparse_irq_lock, flags);
190
191 /* We have to check it to avoid races with another CPU */
192 desc = irq_desc_ptrs[irq];
193 if (desc)
194 goto out_unlock;
195
196 node = cpu_to_node(cpu);
197 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
198 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
199 irq, cpu, node);
200 if (!desc) {
201 printk(KERN_ERR "can not alloc irq_desc\n");
202 BUG_ON(1);
203 }
204 init_one_irq_desc(irq, desc, cpu);
205
206 irq_desc_ptrs[irq] = desc;
207
208out_unlock:
209 spin_unlock_irqrestore(&sparse_irq_lock, flags);
210
211 return desc;
212}
213
KOSAKI Motohirof9af0e72008-12-26 12:24:24 +0900214#else /* !CONFIG_SPARSE_IRQ */
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800215
Ravikiran G Thirumalaie729aa12007-05-08 00:29:13 -0700216struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 [0 ... NR_IRQS-1] = {
Zhang, Yanmin4f167fb2005-05-16 21:53:43 -0700218 .status = IRQ_DISABLED,
Ingo Molnarf1c26622006-06-29 02:24:57 -0700219 .chip = &no_irq_chip,
Ingo Molnar7a557132006-06-29 02:24:54 -0700220 .handle_irq = handle_bad_irq,
Thomas Gleixner94d39e12006-06-29 02:24:50 -0700221 .depth = 1,
Yinghai Luaac3f2b2008-09-24 19:04:35 -0700222 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 }
224};
Yinghai Lu08678b02008-08-19 20:50:05 -0700225
Yinghai Lu12026ea2008-12-26 22:38:15 -0800226int __init early_irq_init(void)
227{
228 struct irq_desc *desc;
229 int count;
230 int i;
231
Mike Travis95949492009-01-10 22:24:06 -0800232 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
233
Yinghai Lu12026ea2008-12-26 22:38:15 -0800234 desc = irq_desc;
235 count = ARRAY_SIZE(irq_desc);
236
Mike Travis7f7ace02009-01-10 21:58:08 -0800237 for (i = 0; i < count; i++) {
Yinghai Lu12026ea2008-12-26 22:38:15 -0800238 desc[i].irq = i;
Mike Travis7f7ace02009-01-10 21:58:08 -0800239 init_alloc_desc_masks(&desc[i], 0, true);
240 }
Yinghai Lu12026ea2008-12-26 22:38:15 -0800241 return arch_early_irq_init();
242}
243
KOSAKI Motohirof9af0e72008-12-26 12:24:24 +0900244struct irq_desc *irq_to_desc(unsigned int irq)
245{
246 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
247}
248
249struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
250{
251 return irq_to_desc(irq);
252}
253#endif /* !CONFIG_SPARSE_IRQ */
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255/*
Ingo Molnar77a5afe2006-06-29 02:24:46 -0700256 * What should we do if we get a hw irq event on an illegal vector?
257 * Each architecture has to answer this themself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 */
Ingo Molnar77a5afe2006-06-29 02:24:46 -0700259static void ack_bad(unsigned int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
Thomas Gleixnerd3c60042008-10-16 09:55:00 +0200261 struct irq_desc *desc = irq_to_desc(irq);
Yinghai Lu08678b02008-08-19 20:50:05 -0700262
Yinghai Lu08678b02008-08-19 20:50:05 -0700263 print_irq_desc(irq, desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 ack_bad_irq(irq);
265}
266
Ingo Molnar77a5afe2006-06-29 02:24:46 -0700267/*
268 * NOP functions
269 */
270static void noop(unsigned int irq)
271{
272}
273
274static unsigned int noop_ret(unsigned int irq)
275{
276 return 0;
277}
278
279/*
280 * Generic no controller implementation
281 */
Ingo Molnarf1c26622006-06-29 02:24:57 -0700282struct irq_chip no_irq_chip = {
283 .name = "none",
Ingo Molnar77a5afe2006-06-29 02:24:46 -0700284 .startup = noop_ret,
285 .shutdown = noop,
286 .enable = noop,
287 .disable = noop,
288 .ack = ack_bad,
289 .end = noop,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290};
291
292/*
Thomas Gleixnerf8b54732006-07-01 22:30:08 +0100293 * Generic dummy implementation which can be used for
294 * real dumb interrupt sources
295 */
296struct irq_chip dummy_irq_chip = {
297 .name = "dummy",
298 .startup = noop_ret,
299 .shutdown = noop,
300 .enable = noop,
301 .disable = noop,
302 .ack = noop,
303 .mask = noop,
304 .unmask = noop,
305 .end = noop,
306};
307
308/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Special, empty irq handler:
310 */
David Howells7d12e782006-10-05 14:55:46 +0100311irqreturn_t no_action(int cpl, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
313 return IRQ_NONE;
314}
315
Ingo Molnar8d28bc72006-06-29 02:24:46 -0700316/**
317 * handle_IRQ_event - irq action chain handler
318 * @irq: the interrupt number
Ingo Molnar8d28bc72006-06-29 02:24:46 -0700319 * @action: the interrupt action chain for this irq
320 *
321 * Handles the action chain of an irq event
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 */
David Howells7d12e782006-10-05 14:55:46 +0100323irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
Jan Beulich908dcec2006-06-23 02:06:00 -0700325 irqreturn_t ret, retval = IRQ_NONE;
326 unsigned int status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Thomas Gleixner3cca53b2006-07-01 19:29:31 -0700328 if (!(action->flags & IRQF_DISABLED))
Ingo Molnar366c7f52006-07-03 00:25:25 -0700329 local_irq_enable_in_hardirq();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 do {
David Howells7d12e782006-10-05 14:55:46 +0100332 ret = action->handler(irq, action->dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 if (ret == IRQ_HANDLED)
334 status |= action->flags;
335 retval |= ret;
336 action = action->next;
337 } while (action);
338
Thomas Gleixner3cca53b2006-07-01 19:29:31 -0700339 if (status & IRQF_SAMPLE_RANDOM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 add_interrupt_randomness(irq);
341 local_irq_disable();
342
343 return retval;
344}
345
David Howellsaf8c65b2006-09-25 23:32:07 -0700346#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
Ingo Molnar8d28bc72006-06-29 02:24:46 -0700347/**
348 * __do_IRQ - original all in one highlevel IRQ handler
349 * @irq: the interrupt number
Ingo Molnar8d28bc72006-06-29 02:24:46 -0700350 *
351 * __do_IRQ handles all normal device IRQ's (the special
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 * SMP cross-CPU interrupts have their own specific
353 * handlers).
Ingo Molnar8d28bc72006-06-29 02:24:46 -0700354 *
355 * This is the original x86 implementation which is used for every
356 * interrupt type.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800358unsigned int __do_IRQ(unsigned int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359{
Yinghai Lu08678b02008-08-19 20:50:05 -0700360 struct irq_desc *desc = irq_to_desc(irq);
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700361 struct irqaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 unsigned int status;
363
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +0200364 kstat_incr_irqs_this_cpu(irq, desc);
365
Karsten Wiesef26fdd52005-09-06 15:17:25 -0700366 if (CHECK_IRQ_PER_CPU(desc->status)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 irqreturn_t action_ret;
368
369 /*
370 * No locking required for CPU-local interrupts:
371 */
Yinghai Lu48a1b102008-12-11 00:15:01 -0800372 if (desc->chip->ack) {
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700373 desc->chip->ack(irq);
Yinghai Lu48a1b102008-12-11 00:15:01 -0800374 /* get new one */
375 desc = irq_remap_to_desc(irq, desc);
376 }
Russ Andersonc642b832007-11-14 17:00:15 -0800377 if (likely(!(desc->status & IRQ_DISABLED))) {
378 action_ret = handle_IRQ_event(irq, desc->action);
379 if (!noirqdebug)
380 note_interrupt(irq, desc, action_ret);
381 }
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700382 desc->chip->end(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 return 1;
384 }
385
386 spin_lock(&desc->lock);
Yinghai Lu48a1b102008-12-11 00:15:01 -0800387 if (desc->chip->ack) {
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700388 desc->chip->ack(irq);
Yinghai Lu48a1b102008-12-11 00:15:01 -0800389 desc = irq_remap_to_desc(irq, desc);
390 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 /*
392 * REPLAY is when Linux resends an IRQ that was dropped earlier
393 * WAITING is used by probe to mark irqs that are being tested
394 */
395 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
396 status |= IRQ_PENDING; /* we _want_ to handle it */
397
398 /*
399 * If the IRQ is disabled for whatever reason, we cannot
400 * use the action we have.
401 */
402 action = NULL;
403 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
404 action = desc->action;
405 status &= ~IRQ_PENDING; /* we commit to handling */
406 status |= IRQ_INPROGRESS; /* we are handling it */
407 }
408 desc->status = status;
409
410 /*
411 * If there is no IRQ handler or it was disabled, exit early.
412 * Since we set PENDING, if another processor is handling
413 * a different instance of this same irq, the other processor
414 * will take care of it.
415 */
416 if (unlikely(!action))
417 goto out;
418
419 /*
420 * Edge triggered interrupts need to remember
421 * pending events.
422 * This applies to any hw interrupts that allow a second
423 * instance of the same irq to arrive while we are in do_IRQ
424 * or in the handler. But the code here only handles the _second_
425 * instance of the irq, not the third or fourth. So it is mostly
426 * useful for irq hardware that does not mask cleanly in an
427 * SMP environment.
428 */
429 for (;;) {
430 irqreturn_t action_ret;
431
432 spin_unlock(&desc->lock);
433
David Howells7d12e782006-10-05 14:55:46 +0100434 action_ret = handle_IRQ_event(irq, action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (!noirqdebug)
David Howells7d12e782006-10-05 14:55:46 +0100436 note_interrupt(irq, desc, action_ret);
Linus Torvaldsb42172f2006-11-22 09:32:06 -0800437
438 spin_lock(&desc->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 if (likely(!(desc->status & IRQ_PENDING)))
440 break;
441 desc->status &= ~IRQ_PENDING;
442 }
443 desc->status &= ~IRQ_INPROGRESS;
444
445out:
446 /*
447 * The ->end() handler has to deal with interrupts which got
448 * disabled while the handler was running.
449 */
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700450 desc->chip->end(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 spin_unlock(&desc->lock);
452
453 return 1;
454}
David Howellsaf8c65b2006-09-25 23:32:07 -0700455#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Ingo Molnar243c7622006-07-03 00:25:06 -0700457void early_init_irq_lock_class(void)
458{
Thomas Gleixner10e58082008-10-16 14:19:04 +0200459 struct irq_desc *desc;
Ingo Molnar243c7622006-07-03 00:25:06 -0700460 int i;
461
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800462 for_each_irq_desc(i, desc) {
Thomas Gleixner10e58082008-10-16 14:19:04 +0200463 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800464 }
Yinghai Lu08678b02008-08-19 20:50:05 -0700465}
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800466
467#ifdef CONFIG_SPARSE_IRQ
468unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
469{
470 struct irq_desc *desc = irq_to_desc(irq);
KOSAKI Motohiro26ddd8d2008-12-26 14:24:10 +0900471 return desc ? desc->kstat_irqs[cpu] : 0;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800472}
473#endif
474EXPORT_SYMBOL(kstat_irqs_cpu);
475