genirq: Add irq disabled flag to irq_data state
[linux-2.6.git] / kernel / irq / irqdesc.c
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17
18 #include "internals.h"
19
20 /*
21  * lockdep: we want to handle all irq_desc locks as a single lock-class:
22  */
23 static struct lock_class_key irq_desc_lock_class;
24
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init init_irq_default_affinity(void)
27 {
28         alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29         cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40         if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41                 return -ENOMEM;
42
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44         if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45                 free_cpumask_var(desc->irq_data.affinity);
46                 return -ENOMEM;
47         }
48 #endif
49         return 0;
50 }
51
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54         desc->irq_data.node = node;
55         cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57         cpumask_clear(desc->pending_mask);
58 #endif
59 }
60
61 static inline int desc_node(struct irq_desc *desc)
62 {
63         return desc->irq_data.node;
64 }
65
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
74 {
75         int cpu;
76
77         desc->irq_data.irq = irq;
78         desc->irq_data.chip = &no_irq_chip;
79         desc->irq_data.chip_data = NULL;
80         desc->irq_data.handler_data = NULL;
81         desc->irq_data.msi_desc = NULL;
82         irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
83         irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
84         desc->istate = IRQS_DISABLED;
85         desc->handle_irq = handle_bad_irq;
86         desc->depth = 1;
87         desc->irq_count = 0;
88         desc->irqs_unhandled = 0;
89         desc->name = NULL;
90         for_each_possible_cpu(cpu)
91                 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
92         desc_smp_init(desc, node);
93 }
94
95 int nr_irqs = NR_IRQS;
96 EXPORT_SYMBOL_GPL(nr_irqs);
97
98 static DEFINE_MUTEX(sparse_irq_lock);
99 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
100
101 #ifdef CONFIG_SPARSE_IRQ
102
103 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
104
105 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
106 {
107         radix_tree_insert(&irq_desc_tree, irq, desc);
108 }
109
110 struct irq_desc *irq_to_desc(unsigned int irq)
111 {
112         return radix_tree_lookup(&irq_desc_tree, irq);
113 }
114
115 static void delete_irq_desc(unsigned int irq)
116 {
117         radix_tree_delete(&irq_desc_tree, irq);
118 }
119
120 #ifdef CONFIG_SMP
121 static void free_masks(struct irq_desc *desc)
122 {
123 #ifdef CONFIG_GENERIC_PENDING_IRQ
124         free_cpumask_var(desc->pending_mask);
125 #endif
126         free_cpumask_var(desc->irq_data.affinity);
127 }
128 #else
129 static inline void free_masks(struct irq_desc *desc) { }
130 #endif
131
132 static struct irq_desc *alloc_desc(int irq, int node)
133 {
134         struct irq_desc *desc;
135         gfp_t gfp = GFP_KERNEL;
136
137         desc = kzalloc_node(sizeof(*desc), gfp, node);
138         if (!desc)
139                 return NULL;
140         /* allocate based on nr_cpu_ids */
141         desc->kstat_irqs = alloc_percpu(unsigned int);
142         if (!desc->kstat_irqs)
143                 goto err_desc;
144
145         if (alloc_masks(desc, gfp, node))
146                 goto err_kstat;
147
148         raw_spin_lock_init(&desc->lock);
149         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
150
151         desc_set_defaults(irq, desc, node);
152
153         return desc;
154
155 err_kstat:
156         free_percpu(desc->kstat_irqs);
157 err_desc:
158         kfree(desc);
159         return NULL;
160 }
161
162 static void free_desc(unsigned int irq)
163 {
164         struct irq_desc *desc = irq_to_desc(irq);
165
166         unregister_irq_proc(irq, desc);
167
168         mutex_lock(&sparse_irq_lock);
169         delete_irq_desc(irq);
170         mutex_unlock(&sparse_irq_lock);
171
172         free_masks(desc);
173         free_percpu(desc->kstat_irqs);
174         kfree(desc);
175 }
176
177 static int alloc_descs(unsigned int start, unsigned int cnt, int node)
178 {
179         struct irq_desc *desc;
180         int i;
181
182         for (i = 0; i < cnt; i++) {
183                 desc = alloc_desc(start + i, node);
184                 if (!desc)
185                         goto err;
186                 mutex_lock(&sparse_irq_lock);
187                 irq_insert_desc(start + i, desc);
188                 mutex_unlock(&sparse_irq_lock);
189         }
190         return start;
191
192 err:
193         for (i--; i >= 0; i--)
194                 free_desc(start + i);
195
196         mutex_lock(&sparse_irq_lock);
197         bitmap_clear(allocated_irqs, start, cnt);
198         mutex_unlock(&sparse_irq_lock);
199         return -ENOMEM;
200 }
201
202 static int irq_expand_nr_irqs(unsigned int nr)
203 {
204         if (nr > IRQ_BITMAP_BITS)
205                 return -ENOMEM;
206         nr_irqs = nr;
207         return 0;
208 }
209
210 int __init early_irq_init(void)
211 {
212         int i, initcnt, node = first_online_node;
213         struct irq_desc *desc;
214
215         init_irq_default_affinity();
216
217         /* Let arch update nr_irqs and return the nr of preallocated irqs */
218         initcnt = arch_probe_nr_irqs();
219         printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
220
221         if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
222                 nr_irqs = IRQ_BITMAP_BITS;
223
224         if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
225                 initcnt = IRQ_BITMAP_BITS;
226
227         if (initcnt > nr_irqs)
228                 nr_irqs = initcnt;
229
230         for (i = 0; i < initcnt; i++) {
231                 desc = alloc_desc(i, node);
232                 set_bit(i, allocated_irqs);
233                 irq_insert_desc(i, desc);
234         }
235         return arch_early_irq_init();
236 }
237
238 #else /* !CONFIG_SPARSE_IRQ */
239
240 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
241         [0 ... NR_IRQS-1] = {
242                 .istate         = IRQS_DISABLED,
243                 .handle_irq     = handle_bad_irq,
244                 .depth          = 1,
245                 .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
246         }
247 };
248
249 int __init early_irq_init(void)
250 {
251         int count, i, node = first_online_node;
252         struct irq_desc *desc;
253
254         init_irq_default_affinity();
255
256         printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
257
258         desc = irq_desc;
259         count = ARRAY_SIZE(irq_desc);
260
261         for (i = 0; i < count; i++) {
262                 desc[i].irq_data.irq = i;
263                 desc[i].irq_data.chip = &no_irq_chip;
264                 desc[i].kstat_irqs = alloc_percpu(unsigned int);
265                 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
266                 alloc_masks(desc + i, GFP_KERNEL, node);
267                 desc_smp_init(desc + i, node);
268                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
269         }
270         return arch_early_irq_init();
271 }
272
273 struct irq_desc *irq_to_desc(unsigned int irq)
274 {
275         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276 }
277
278 static void free_desc(unsigned int irq)
279 {
280         dynamic_irq_cleanup(irq);
281 }
282
283 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
284 {
285         return start;
286 }
287
288 static int irq_expand_nr_irqs(unsigned int nr)
289 {
290         return -ENOMEM;
291 }
292
293 #endif /* !CONFIG_SPARSE_IRQ */
294
295 /* Dynamic interrupt handling */
296
297 /**
298  * irq_free_descs - free irq descriptors
299  * @from:       Start of descriptor range
300  * @cnt:        Number of consecutive irqs to free
301  */
302 void irq_free_descs(unsigned int from, unsigned int cnt)
303 {
304         int i;
305
306         if (from >= nr_irqs || (from + cnt) > nr_irqs)
307                 return;
308
309         for (i = 0; i < cnt; i++)
310                 free_desc(from + i);
311
312         mutex_lock(&sparse_irq_lock);
313         bitmap_clear(allocated_irqs, from, cnt);
314         mutex_unlock(&sparse_irq_lock);
315 }
316
317 /**
318  * irq_alloc_descs - allocate and initialize a range of irq descriptors
319  * @irq:        Allocate for specific irq number if irq >= 0
320  * @from:       Start the search from this irq number
321  * @cnt:        Number of consecutive irqs to allocate.
322  * @node:       Preferred node on which the irq descriptor should be allocated
323  *
324  * Returns the first irq number or error code
325  */
326 int __ref
327 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
328 {
329         int start, ret;
330
331         if (!cnt)
332                 return -EINVAL;
333
334         mutex_lock(&sparse_irq_lock);
335
336         start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
337                                            from, cnt, 0);
338         ret = -EEXIST;
339         if (irq >=0 && start != irq)
340                 goto err;
341
342         if (start + cnt > nr_irqs) {
343                 ret = irq_expand_nr_irqs(start + cnt);
344                 if (ret)
345                         goto err;
346         }
347
348         bitmap_set(allocated_irqs, start, cnt);
349         mutex_unlock(&sparse_irq_lock);
350         return alloc_descs(start, cnt, node);
351
352 err:
353         mutex_unlock(&sparse_irq_lock);
354         return ret;
355 }
356
357 /**
358  * irq_reserve_irqs - mark irqs allocated
359  * @from:       mark from irq number
360  * @cnt:        number of irqs to mark
361  *
362  * Returns 0 on success or an appropriate error code
363  */
364 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
365 {
366         unsigned int start;
367         int ret = 0;
368
369         if (!cnt || (from + cnt) > nr_irqs)
370                 return -EINVAL;
371
372         mutex_lock(&sparse_irq_lock);
373         start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
374         if (start == from)
375                 bitmap_set(allocated_irqs, start, cnt);
376         else
377                 ret = -EEXIST;
378         mutex_unlock(&sparse_irq_lock);
379         return ret;
380 }
381
382 /**
383  * irq_get_next_irq - get next allocated irq number
384  * @offset:     where to start the search
385  *
386  * Returns next irq number after offset or nr_irqs if none is found.
387  */
388 unsigned int irq_get_next_irq(unsigned int offset)
389 {
390         return find_next_bit(allocated_irqs, nr_irqs, offset);
391 }
392
393 struct irq_desc *
394 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
395 {
396         struct irq_desc *desc = irq_to_desc(irq);
397
398         if (desc) {
399                 if (bus)
400                         chip_bus_lock(desc);
401                 raw_spin_lock_irqsave(&desc->lock, *flags);
402         }
403         return desc;
404 }
405
406 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
407 {
408         raw_spin_unlock_irqrestore(&desc->lock, flags);
409         if (bus)
410                 chip_bus_sync_unlock(desc);
411 }
412
413 /**
414  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
415  * @irq:        irq number to initialize
416  */
417 void dynamic_irq_cleanup(unsigned int irq)
418 {
419         struct irq_desc *desc = irq_to_desc(irq);
420         unsigned long flags;
421
422         raw_spin_lock_irqsave(&desc->lock, flags);
423         desc_set_defaults(irq, desc, desc_node(desc));
424         raw_spin_unlock_irqrestore(&desc->lock, flags);
425 }
426
427 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
428 {
429         struct irq_desc *desc = irq_to_desc(irq);
430
431         return desc && desc->kstat_irqs ?
432                         *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
433 }
434
435 #ifdef CONFIG_GENERIC_HARDIRQS
436 unsigned int kstat_irqs(unsigned int irq)
437 {
438         struct irq_desc *desc = irq_to_desc(irq);
439         int cpu;
440         int sum = 0;
441
442         if (!desc || !desc->kstat_irqs)
443                 return 0;
444         for_each_possible_cpu(cpu)
445                 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
446         return sum;
447 }
448 #endif /* CONFIG_GENERIC_HARDIRQS */