]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - arch/powerpc/kernel/irq.c
Merge commit 'v2.6.35-rc6' into devicetree/next
[linux-3.10.git] / arch / powerpc / kernel / irq.c
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30
31 #undef DEBUG
32
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
56 #include <linux/of.h>
57 #include <linux/of_irq.h>
58
59 #include <asm/uaccess.h>
60 #include <asm/system.h>
61 #include <asm/io.h>
62 #include <asm/pgtable.h>
63 #include <asm/irq.h>
64 #include <asm/cache.h>
65 #include <asm/prom.h>
66 #include <asm/ptrace.h>
67 #include <asm/machdep.h>
68 #include <asm/udbg.h>
69 #ifdef CONFIG_PPC64
70 #include <asm/paca.h>
71 #include <asm/firmware.h>
72 #include <asm/lv1call.h>
73 #endif
74 #define CREATE_TRACE_POINTS
75 #include <asm/trace.h>
76
77 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
78 EXPORT_PER_CPU_SYMBOL(irq_stat);
79
80 int __irq_offset_value;
81
82 #ifdef CONFIG_PPC32
83 EXPORT_SYMBOL(__irq_offset_value);
84 atomic_t ppc_n_lost_interrupts;
85
86 #ifdef CONFIG_TAU_INT
87 extern int tau_initialized;
88 extern int tau_interrupts(int);
89 #endif
90 #endif /* CONFIG_PPC32 */
91
92 #ifdef CONFIG_PPC64
93
94 #ifndef CONFIG_SPARSE_IRQ
95 EXPORT_SYMBOL(irq_desc);
96 #endif
97
98 int distribute_irqs = 1;
99
100 static inline notrace unsigned long get_hard_enabled(void)
101 {
102         unsigned long enabled;
103
104         __asm__ __volatile__("lbz %0,%1(13)"
105         : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
106
107         return enabled;
108 }
109
110 static inline notrace void set_soft_enabled(unsigned long enable)
111 {
112         __asm__ __volatile__("stb %0,%1(13)"
113         : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114 }
115
116 notrace void raw_local_irq_restore(unsigned long en)
117 {
118         /*
119          * get_paca()->soft_enabled = en;
120          * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
121          * That was allowed before, and in such a case we do need to take care
122          * that gcc will set soft_enabled directly via r13, not choose to use
123          * an intermediate register, lest we're preempted to a different cpu.
124          */
125         set_soft_enabled(en);
126         if (!en)
127                 return;
128
129 #ifdef CONFIG_PPC_STD_MMU_64
130         if (firmware_has_feature(FW_FEATURE_ISERIES)) {
131                 /*
132                  * Do we need to disable preemption here?  Not really: in the
133                  * unlikely event that we're preempted to a different cpu in
134                  * between getting r13, loading its lppaca_ptr, and loading
135                  * its any_int, we might call iseries_handle_interrupts without
136                  * an interrupt pending on the new cpu, but that's no disaster,
137                  * is it?  And the business of preempting us off the old cpu
138                  * would itself involve a local_irq_restore which handles the
139                  * interrupt to that cpu.
140                  *
141                  * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
142                  * to avoid any preemption checking added into get_paca().
143                  */
144                 if (local_paca->lppaca_ptr->int_dword.any_int)
145                         iseries_handle_interrupts();
146         }
147 #endif /* CONFIG_PPC_STD_MMU_64 */
148
149         /*
150          * if (get_paca()->hard_enabled) return;
151          * But again we need to take care that gcc gets hard_enabled directly
152          * via r13, not choose to use an intermediate register, lest we're
153          * preempted to a different cpu in between the two instructions.
154          */
155         if (get_hard_enabled())
156                 return;
157
158         /*
159          * Need to hard-enable interrupts here.  Since currently disabled,
160          * no need to take further asm precautions against preemption; but
161          * use local_paca instead of get_paca() to avoid preemption checking.
162          */
163         local_paca->hard_enabled = en;
164         if ((int)mfspr(SPRN_DEC) < 0)
165                 mtspr(SPRN_DEC, 1);
166
167         /*
168          * Force the delivery of pending soft-disabled interrupts on PS3.
169          * Any HV call will have this side effect.
170          */
171         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
172                 u64 tmp;
173                 lv1_get_version_info(&tmp);
174         }
175
176         __hard_irq_enable();
177 }
178 EXPORT_SYMBOL(raw_local_irq_restore);
179 #endif /* CONFIG_PPC64 */
180
181 static int show_other_interrupts(struct seq_file *p, int prec)
182 {
183         int j;
184
185 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
186         if (tau_initialized) {
187                 seq_printf(p, "%*s: ", prec, "TAU");
188                 for_each_online_cpu(j)
189                         seq_printf(p, "%10u ", tau_interrupts(j));
190                 seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
191         }
192 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
193
194         seq_printf(p, "%*s: ", prec, "LOC");
195         for_each_online_cpu(j)
196                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
197         seq_printf(p, "  Local timer interrupts\n");
198
199         seq_printf(p, "%*s: ", prec, "SPU");
200         for_each_online_cpu(j)
201                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
202         seq_printf(p, "  Spurious interrupts\n");
203
204         seq_printf(p, "%*s: ", prec, "CNT");
205         for_each_online_cpu(j)
206                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
207         seq_printf(p, "  Performance monitoring interrupts\n");
208
209         seq_printf(p, "%*s: ", prec, "MCE");
210         for_each_online_cpu(j)
211                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
212         seq_printf(p, "  Machine check exceptions\n");
213
214         return 0;
215 }
216
217 int show_interrupts(struct seq_file *p, void *v)
218 {
219         unsigned long flags, any_count = 0;
220         int i = *(loff_t *) v, j, prec;
221         struct irqaction *action;
222         struct irq_desc *desc;
223
224         if (i > nr_irqs)
225                 return 0;
226
227         for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
228                 j *= 10;
229
230         if (i == nr_irqs)
231                 return show_other_interrupts(p, prec);
232
233         /* print header */
234         if (i == 0) {
235                 seq_printf(p, "%*s", prec + 8, "");
236                 for_each_online_cpu(j)
237                         seq_printf(p, "CPU%-8d", j);
238                 seq_putc(p, '\n');
239         }
240
241         desc = irq_to_desc(i);
242         if (!desc)
243                 return 0;
244
245         raw_spin_lock_irqsave(&desc->lock, flags);
246         for_each_online_cpu(j)
247                 any_count |= kstat_irqs_cpu(i, j);
248         action = desc->action;
249         if (!action && !any_count)
250                 goto out;
251
252         seq_printf(p, "%*d: ", prec, i);
253         for_each_online_cpu(j)
254                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
255
256         if (desc->chip)
257                 seq_printf(p, "  %-16s", desc->chip->name);
258         else
259                 seq_printf(p, "  %-16s", "None");
260         seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
261
262         if (action) {
263                 seq_printf(p, "     %s", action->name);
264                 while ((action = action->next) != NULL)
265                         seq_printf(p, ", %s", action->name);
266         }
267
268         seq_putc(p, '\n');
269 out:
270         raw_spin_unlock_irqrestore(&desc->lock, flags);
271         return 0;
272 }
273
274 /*
275  * /proc/stat helpers
276  */
277 u64 arch_irq_stat_cpu(unsigned int cpu)
278 {
279         u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
280
281         sum += per_cpu(irq_stat, cpu).pmu_irqs;
282         sum += per_cpu(irq_stat, cpu).mce_exceptions;
283         sum += per_cpu(irq_stat, cpu).spurious_irqs;
284
285         return sum;
286 }
287
288 #ifdef CONFIG_HOTPLUG_CPU
289 void fixup_irqs(const struct cpumask *map)
290 {
291         struct irq_desc *desc;
292         unsigned int irq;
293         static int warned;
294         cpumask_var_t mask;
295
296         alloc_cpumask_var(&mask, GFP_KERNEL);
297
298         for_each_irq(irq) {
299                 desc = irq_to_desc(irq);
300                 if (!desc)
301                         continue;
302
303                 if (desc->status & IRQ_PER_CPU)
304                         continue;
305
306                 cpumask_and(mask, desc->affinity, map);
307                 if (cpumask_any(mask) >= nr_cpu_ids) {
308                         printk("Breaking affinity for irq %i\n", irq);
309                         cpumask_copy(mask, map);
310                 }
311                 if (desc->chip->set_affinity)
312                         desc->chip->set_affinity(irq, mask);
313                 else if (desc->action && !(warned++))
314                         printk("Cannot set affinity for irq %i\n", irq);
315         }
316
317         free_cpumask_var(mask);
318
319         local_irq_enable();
320         mdelay(1);
321         local_irq_disable();
322 }
323 #endif
324
325 static inline void handle_one_irq(unsigned int irq)
326 {
327         struct thread_info *curtp, *irqtp;
328         unsigned long saved_sp_limit;
329         struct irq_desc *desc;
330
331         /* Switch to the irq stack to handle this */
332         curtp = current_thread_info();
333         irqtp = hardirq_ctx[smp_processor_id()];
334
335         if (curtp == irqtp) {
336                 /* We're already on the irq stack, just handle it */
337                 generic_handle_irq(irq);
338                 return;
339         }
340
341         desc = irq_to_desc(irq);
342         saved_sp_limit = current->thread.ksp_limit;
343
344         irqtp->task = curtp->task;
345         irqtp->flags = 0;
346
347         /* Copy the softirq bits in preempt_count so that the
348          * softirq checks work in the hardirq context. */
349         irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
350                                (curtp->preempt_count & SOFTIRQ_MASK);
351
352         current->thread.ksp_limit = (unsigned long)irqtp +
353                 _ALIGN_UP(sizeof(struct thread_info), 16);
354
355         call_handle_irq(irq, desc, irqtp, desc->handle_irq);
356         current->thread.ksp_limit = saved_sp_limit;
357         irqtp->task = NULL;
358
359         /* Set any flag that may have been set on the
360          * alternate stack
361          */
362         if (irqtp->flags)
363                 set_bits(irqtp->flags, &curtp->flags);
364 }
365
366 static inline void check_stack_overflow(void)
367 {
368 #ifdef CONFIG_DEBUG_STACKOVERFLOW
369         long sp;
370
371         sp = __get_SP() & (THREAD_SIZE-1);
372
373         /* check for stack overflow: is there less than 2KB free? */
374         if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
375                 printk("do_IRQ: stack overflow: %ld\n",
376                         sp - sizeof(struct thread_info));
377                 dump_stack();
378         }
379 #endif
380 }
381
382 void do_IRQ(struct pt_regs *regs)
383 {
384         struct pt_regs *old_regs = set_irq_regs(regs);
385         unsigned int irq;
386
387         trace_irq_entry(regs);
388
389         irq_enter();
390
391         check_stack_overflow();
392
393         irq = ppc_md.get_irq();
394
395         if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
396                 handle_one_irq(irq);
397         else if (irq != NO_IRQ_IGNORE)
398                 __get_cpu_var(irq_stat).spurious_irqs++;
399
400         irq_exit();
401         set_irq_regs(old_regs);
402
403 #ifdef CONFIG_PPC_ISERIES
404         if (firmware_has_feature(FW_FEATURE_ISERIES) &&
405                         get_lppaca()->int_dword.fields.decr_int) {
406                 get_lppaca()->int_dword.fields.decr_int = 0;
407                 /* Signal a fake decrementer interrupt */
408                 timer_interrupt(regs);
409         }
410 #endif
411
412         trace_irq_exit(regs);
413 }
414
415 void __init init_IRQ(void)
416 {
417         if (ppc_md.init_IRQ)
418                 ppc_md.init_IRQ();
419
420         exc_lvl_ctx_init();
421
422         irq_ctx_init();
423 }
424
425 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
426 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
427 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
428 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
429
430 void exc_lvl_ctx_init(void)
431 {
432         struct thread_info *tp;
433         int i;
434
435         for_each_possible_cpu(i) {
436                 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
437                 tp = critirq_ctx[i];
438                 tp->cpu = i;
439                 tp->preempt_count = 0;
440
441 #ifdef CONFIG_BOOKE
442                 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
443                 tp = dbgirq_ctx[i];
444                 tp->cpu = i;
445                 tp->preempt_count = 0;
446
447                 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
448                 tp = mcheckirq_ctx[i];
449                 tp->cpu = i;
450                 tp->preempt_count = HARDIRQ_OFFSET;
451 #endif
452         }
453 }
454 #endif
455
456 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
457 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
458
459 void irq_ctx_init(void)
460 {
461         struct thread_info *tp;
462         int i;
463
464         for_each_possible_cpu(i) {
465                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
466                 tp = softirq_ctx[i];
467                 tp->cpu = i;
468                 tp->preempt_count = 0;
469
470                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
471                 tp = hardirq_ctx[i];
472                 tp->cpu = i;
473                 tp->preempt_count = HARDIRQ_OFFSET;
474         }
475 }
476
477 static inline void do_softirq_onstack(void)
478 {
479         struct thread_info *curtp, *irqtp;
480         unsigned long saved_sp_limit = current->thread.ksp_limit;
481
482         curtp = current_thread_info();
483         irqtp = softirq_ctx[smp_processor_id()];
484         irqtp->task = curtp->task;
485         current->thread.ksp_limit = (unsigned long)irqtp +
486                                     _ALIGN_UP(sizeof(struct thread_info), 16);
487         call_do_softirq(irqtp);
488         current->thread.ksp_limit = saved_sp_limit;
489         irqtp->task = NULL;
490 }
491
492 void do_softirq(void)
493 {
494         unsigned long flags;
495
496         if (in_interrupt())
497                 return;
498
499         local_irq_save(flags);
500
501         if (local_softirq_pending())
502                 do_softirq_onstack();
503
504         local_irq_restore(flags);
505 }
506
507
508 /*
509  * IRQ controller and virtual interrupts
510  */
511
512 static LIST_HEAD(irq_hosts);
513 static DEFINE_RAW_SPINLOCK(irq_big_lock);
514 static unsigned int revmap_trees_allocated;
515 static DEFINE_MUTEX(revmap_trees_mutex);
516 struct irq_map_entry irq_map[NR_IRQS];
517 static unsigned int irq_virq_count = NR_IRQS;
518 static struct irq_host *irq_default_host;
519
520 irq_hw_number_t virq_to_hw(unsigned int virq)
521 {
522         return irq_map[virq].hwirq;
523 }
524 EXPORT_SYMBOL_GPL(virq_to_hw);
525
526 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
527 {
528         return h->of_node != NULL && h->of_node == np;
529 }
530
531 struct irq_host *irq_alloc_host(struct device_node *of_node,
532                                 unsigned int revmap_type,
533                                 unsigned int revmap_arg,
534                                 struct irq_host_ops *ops,
535                                 irq_hw_number_t inval_irq)
536 {
537         struct irq_host *host;
538         unsigned int size = sizeof(struct irq_host);
539         unsigned int i;
540         unsigned int *rmap;
541         unsigned long flags;
542
543         /* Allocate structure and revmap table if using linear mapping */
544         if (revmap_type == IRQ_HOST_MAP_LINEAR)
545                 size += revmap_arg * sizeof(unsigned int);
546         host = zalloc_maybe_bootmem(size, GFP_KERNEL);
547         if (host == NULL)
548                 return NULL;
549
550         /* Fill structure */
551         host->revmap_type = revmap_type;
552         host->inval_irq = inval_irq;
553         host->ops = ops;
554         host->of_node = of_node_get(of_node);
555
556         if (host->ops->match == NULL)
557                 host->ops->match = default_irq_host_match;
558
559         raw_spin_lock_irqsave(&irq_big_lock, flags);
560
561         /* If it's a legacy controller, check for duplicates and
562          * mark it as allocated (we use irq 0 host pointer for that
563          */
564         if (revmap_type == IRQ_HOST_MAP_LEGACY) {
565                 if (irq_map[0].host != NULL) {
566                         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
567                         /* If we are early boot, we can't free the structure,
568                          * too bad...
569                          * this will be fixed once slab is made available early
570                          * instead of the current cruft
571                          */
572                         if (mem_init_done)
573                                 kfree(host);
574                         return NULL;
575                 }
576                 irq_map[0].host = host;
577         }
578
579         list_add(&host->link, &irq_hosts);
580         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
581
582         /* Additional setups per revmap type */
583         switch(revmap_type) {
584         case IRQ_HOST_MAP_LEGACY:
585                 /* 0 is always the invalid number for legacy */
586                 host->inval_irq = 0;
587                 /* setup us as the host for all legacy interrupts */
588                 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
589                         irq_map[i].hwirq = i;
590                         smp_wmb();
591                         irq_map[i].host = host;
592                         smp_wmb();
593
594                         /* Clear norequest flags */
595                         irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
596
597                         /* Legacy flags are left to default at this point,
598                          * one can then use irq_create_mapping() to
599                          * explicitly change them
600                          */
601                         ops->map(host, i, i);
602                 }
603                 break;
604         case IRQ_HOST_MAP_LINEAR:
605                 rmap = (unsigned int *)(host + 1);
606                 for (i = 0; i < revmap_arg; i++)
607                         rmap[i] = NO_IRQ;
608                 host->revmap_data.linear.size = revmap_arg;
609                 smp_wmb();
610                 host->revmap_data.linear.revmap = rmap;
611                 break;
612         default:
613                 break;
614         }
615
616         pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
617
618         return host;
619 }
620
621 struct irq_host *irq_find_host(struct device_node *node)
622 {
623         struct irq_host *h, *found = NULL;
624         unsigned long flags;
625
626         /* We might want to match the legacy controller last since
627          * it might potentially be set to match all interrupts in
628          * the absence of a device node. This isn't a problem so far
629          * yet though...
630          */
631         raw_spin_lock_irqsave(&irq_big_lock, flags);
632         list_for_each_entry(h, &irq_hosts, link)
633                 if (h->ops->match(h, node)) {
634                         found = h;
635                         break;
636                 }
637         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
638         return found;
639 }
640 EXPORT_SYMBOL_GPL(irq_find_host);
641
642 void irq_set_default_host(struct irq_host *host)
643 {
644         pr_debug("irq: Default host set to @0x%p\n", host);
645
646         irq_default_host = host;
647 }
648
649 void irq_set_virq_count(unsigned int count)
650 {
651         pr_debug("irq: Trying to set virq count to %d\n", count);
652
653         BUG_ON(count < NUM_ISA_INTERRUPTS);
654         if (count < NR_IRQS)
655                 irq_virq_count = count;
656 }
657
658 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
659                             irq_hw_number_t hwirq)
660 {
661         struct irq_desc *desc;
662
663         desc = irq_to_desc_alloc_node(virq, 0);
664         if (!desc) {
665                 pr_debug("irq: -> allocating desc failed\n");
666                 goto error;
667         }
668
669         /* Clear IRQ_NOREQUEST flag */
670         desc->status &= ~IRQ_NOREQUEST;
671
672         /* map it */
673         smp_wmb();
674         irq_map[virq].hwirq = hwirq;
675         smp_mb();
676
677         if (host->ops->map(host, virq, hwirq)) {
678                 pr_debug("irq: -> mapping failed, freeing\n");
679                 goto error;
680         }
681
682         return 0;
683
684 error:
685         irq_free_virt(virq, 1);
686         return -1;
687 }
688
689 unsigned int irq_create_direct_mapping(struct irq_host *host)
690 {
691         unsigned int virq;
692
693         if (host == NULL)
694                 host = irq_default_host;
695
696         BUG_ON(host == NULL);
697         WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
698
699         virq = irq_alloc_virt(host, 1, 0);
700         if (virq == NO_IRQ) {
701                 pr_debug("irq: create_direct virq allocation failed\n");
702                 return NO_IRQ;
703         }
704
705         pr_debug("irq: create_direct obtained virq %d\n", virq);
706
707         if (irq_setup_virq(host, virq, virq))
708                 return NO_IRQ;
709
710         return virq;
711 }
712
713 unsigned int irq_create_mapping(struct irq_host *host,
714                                 irq_hw_number_t hwirq)
715 {
716         unsigned int virq, hint;
717
718         pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
719
720         /* Look for default host if nececssary */
721         if (host == NULL)
722                 host = irq_default_host;
723         if (host == NULL) {
724                 printk(KERN_WARNING "irq_create_mapping called for"
725                        " NULL host, hwirq=%lx\n", hwirq);
726                 WARN_ON(1);
727                 return NO_IRQ;
728         }
729         pr_debug("irq: -> using host @%p\n", host);
730
731         /* Check if mapping already exist, if it does, call
732          * host->ops->map() to update the flags
733          */
734         virq = irq_find_mapping(host, hwirq);
735         if (virq != NO_IRQ) {
736                 if (host->ops->remap)
737                         host->ops->remap(host, virq, hwirq);
738                 pr_debug("irq: -> existing mapping on virq %d\n", virq);
739                 return virq;
740         }
741
742         /* Get a virtual interrupt number */
743         if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
744                 /* Handle legacy */
745                 virq = (unsigned int)hwirq;
746                 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
747                         return NO_IRQ;
748                 return virq;
749         } else {
750                 /* Allocate a virtual interrupt number */
751                 hint = hwirq % irq_virq_count;
752                 virq = irq_alloc_virt(host, 1, hint);
753                 if (virq == NO_IRQ) {
754                         pr_debug("irq: -> virq allocation failed\n");
755                         return NO_IRQ;
756                 }
757         }
758
759         if (irq_setup_virq(host, virq, hwirq))
760                 return NO_IRQ;
761
762         printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
763                 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
764
765         return virq;
766 }
767 EXPORT_SYMBOL_GPL(irq_create_mapping);
768
769 unsigned int irq_create_of_mapping(struct device_node *controller,
770                                    const u32 *intspec, unsigned int intsize)
771 {
772         struct irq_host *host;
773         irq_hw_number_t hwirq;
774         unsigned int type = IRQ_TYPE_NONE;
775         unsigned int virq;
776
777         if (controller == NULL)
778                 host = irq_default_host;
779         else
780                 host = irq_find_host(controller);
781         if (host == NULL) {
782                 printk(KERN_WARNING "irq: no irq host found for %s !\n",
783                        controller->full_name);
784                 return NO_IRQ;
785         }
786
787         /* If host has no translation, then we assume interrupt line */
788         if (host->ops->xlate == NULL)
789                 hwirq = intspec[0];
790         else {
791                 if (host->ops->xlate(host, controller, intspec, intsize,
792                                      &hwirq, &type))
793                         return NO_IRQ;
794         }
795
796         /* Create mapping */
797         virq = irq_create_mapping(host, hwirq);
798         if (virq == NO_IRQ)
799                 return virq;
800
801         /* Set type if specified and different than the current one */
802         if (type != IRQ_TYPE_NONE &&
803             type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
804                 set_irq_type(virq, type);
805         return virq;
806 }
807 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
808
809 void irq_dispose_mapping(unsigned int virq)
810 {
811         struct irq_host *host;
812         irq_hw_number_t hwirq;
813
814         if (virq == NO_IRQ)
815                 return;
816
817         host = irq_map[virq].host;
818         WARN_ON (host == NULL);
819         if (host == NULL)
820                 return;
821
822         /* Never unmap legacy interrupts */
823         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
824                 return;
825
826         /* remove chip and handler */
827         set_irq_chip_and_handler(virq, NULL, NULL);
828
829         /* Make sure it's completed */
830         synchronize_irq(virq);
831
832         /* Tell the PIC about it */
833         if (host->ops->unmap)
834                 host->ops->unmap(host, virq);
835         smp_mb();
836
837         /* Clear reverse map */
838         hwirq = irq_map[virq].hwirq;
839         switch(host->revmap_type) {
840         case IRQ_HOST_MAP_LINEAR:
841                 if (hwirq < host->revmap_data.linear.size)
842                         host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
843                 break;
844         case IRQ_HOST_MAP_TREE:
845                 /*
846                  * Check if radix tree allocated yet, if not then nothing to
847                  * remove.
848                  */
849                 smp_rmb();
850                 if (revmap_trees_allocated < 1)
851                         break;
852                 mutex_lock(&revmap_trees_mutex);
853                 radix_tree_delete(&host->revmap_data.tree, hwirq);
854                 mutex_unlock(&revmap_trees_mutex);
855                 break;
856         }
857
858         /* Destroy map */
859         smp_mb();
860         irq_map[virq].hwirq = host->inval_irq;
861
862         /* Set some flags */
863         irq_to_desc(virq)->status |= IRQ_NOREQUEST;
864
865         /* Free it */
866         irq_free_virt(virq, 1);
867 }
868 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
869
870 unsigned int irq_find_mapping(struct irq_host *host,
871                               irq_hw_number_t hwirq)
872 {
873         unsigned int i;
874         unsigned int hint = hwirq % irq_virq_count;
875
876         /* Look for default host if nececssary */
877         if (host == NULL)
878                 host = irq_default_host;
879         if (host == NULL)
880                 return NO_IRQ;
881
882         /* legacy -> bail early */
883         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
884                 return hwirq;
885
886         /* Slow path does a linear search of the map */
887         if (hint < NUM_ISA_INTERRUPTS)
888                 hint = NUM_ISA_INTERRUPTS;
889         i = hint;
890         do  {
891                 if (irq_map[i].host == host &&
892                     irq_map[i].hwirq == hwirq)
893                         return i;
894                 i++;
895                 if (i >= irq_virq_count)
896                         i = NUM_ISA_INTERRUPTS;
897         } while(i != hint);
898         return NO_IRQ;
899 }
900 EXPORT_SYMBOL_GPL(irq_find_mapping);
901
902
903 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
904                                      irq_hw_number_t hwirq)
905 {
906         struct irq_map_entry *ptr;
907         unsigned int virq;
908
909         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
910
911         /*
912          * Check if the radix tree exists and has bee initialized.
913          * If not, we fallback to slow mode
914          */
915         if (revmap_trees_allocated < 2)
916                 return irq_find_mapping(host, hwirq);
917
918         /* Now try to resolve */
919         /*
920          * No rcu_read_lock(ing) needed, the ptr returned can't go under us
921          * as it's referencing an entry in the static irq_map table.
922          */
923         ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
924
925         /*
926          * If found in radix tree, then fine.
927          * Else fallback to linear lookup - this should not happen in practice
928          * as it means that we failed to insert the node in the radix tree.
929          */
930         if (ptr)
931                 virq = ptr - irq_map;
932         else
933                 virq = irq_find_mapping(host, hwirq);
934
935         return virq;
936 }
937
938 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
939                              irq_hw_number_t hwirq)
940 {
941
942         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
943
944         /*
945          * Check if the radix tree exists yet.
946          * If not, then the irq will be inserted into the tree when it gets
947          * initialized.
948          */
949         smp_rmb();
950         if (revmap_trees_allocated < 1)
951                 return;
952
953         if (virq != NO_IRQ) {
954                 mutex_lock(&revmap_trees_mutex);
955                 radix_tree_insert(&host->revmap_data.tree, hwirq,
956                                   &irq_map[virq]);
957                 mutex_unlock(&revmap_trees_mutex);
958         }
959 }
960
961 unsigned int irq_linear_revmap(struct irq_host *host,
962                                irq_hw_number_t hwirq)
963 {
964         unsigned int *revmap;
965
966         WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
967
968         /* Check revmap bounds */
969         if (unlikely(hwirq >= host->revmap_data.linear.size))
970                 return irq_find_mapping(host, hwirq);
971
972         /* Check if revmap was allocated */
973         revmap = host->revmap_data.linear.revmap;
974         if (unlikely(revmap == NULL))
975                 return irq_find_mapping(host, hwirq);
976
977         /* Fill up revmap with slow path if no mapping found */
978         if (unlikely(revmap[hwirq] == NO_IRQ))
979                 revmap[hwirq] = irq_find_mapping(host, hwirq);
980
981         return revmap[hwirq];
982 }
983
984 unsigned int irq_alloc_virt(struct irq_host *host,
985                             unsigned int count,
986                             unsigned int hint)
987 {
988         unsigned long flags;
989         unsigned int i, j, found = NO_IRQ;
990
991         if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
992                 return NO_IRQ;
993
994         raw_spin_lock_irqsave(&irq_big_lock, flags);
995
996         /* Use hint for 1 interrupt if any */
997         if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
998             hint < irq_virq_count && irq_map[hint].host == NULL) {
999                 found = hint;
1000                 goto hint_found;
1001         }
1002
1003         /* Look for count consecutive numbers in the allocatable
1004          * (non-legacy) space
1005          */
1006         for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1007                 if (irq_map[i].host != NULL)
1008                         j = 0;
1009                 else
1010                         j++;
1011
1012                 if (j == count) {
1013                         found = i - count + 1;
1014                         break;
1015                 }
1016         }
1017         if (found == NO_IRQ) {
1018                 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1019                 return NO_IRQ;
1020         }
1021  hint_found:
1022         for (i = found; i < (found + count); i++) {
1023                 irq_map[i].hwirq = host->inval_irq;
1024                 smp_wmb();
1025                 irq_map[i].host = host;
1026         }
1027         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1028         return found;
1029 }
1030
1031 void irq_free_virt(unsigned int virq, unsigned int count)
1032 {
1033         unsigned long flags;
1034         unsigned int i;
1035
1036         WARN_ON (virq < NUM_ISA_INTERRUPTS);
1037         WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1038
1039         raw_spin_lock_irqsave(&irq_big_lock, flags);
1040         for (i = virq; i < (virq + count); i++) {
1041                 struct irq_host *host;
1042
1043                 if (i < NUM_ISA_INTERRUPTS ||
1044                     (virq + count) > irq_virq_count)
1045                         continue;
1046
1047                 host = irq_map[i].host;
1048                 irq_map[i].hwirq = host->inval_irq;
1049                 smp_wmb();
1050                 irq_map[i].host = NULL;
1051         }
1052         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1053 }
1054
1055 int arch_early_irq_init(void)
1056 {
1057         struct irq_desc *desc;
1058         int i;
1059
1060         for (i = 0; i < NR_IRQS; i++) {
1061                 desc = irq_to_desc(i);
1062                 if (desc)
1063                         desc->status |= IRQ_NOREQUEST;
1064         }
1065
1066         return 0;
1067 }
1068
1069 int arch_init_chip_data(struct irq_desc *desc, int node)
1070 {
1071         desc->status |= IRQ_NOREQUEST;
1072         return 0;
1073 }
1074
1075 /* We need to create the radix trees late */
1076 static int irq_late_init(void)
1077 {
1078         struct irq_host *h;
1079         unsigned int i;
1080
1081         /*
1082          * No mutual exclusion with respect to accessors of the tree is needed
1083          * here as the synchronization is done via the state variable
1084          * revmap_trees_allocated.
1085          */
1086         list_for_each_entry(h, &irq_hosts, link) {
1087                 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1088                         INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1089         }
1090
1091         /*
1092          * Make sure the radix trees inits are visible before setting
1093          * the flag
1094          */
1095         smp_wmb();
1096         revmap_trees_allocated = 1;
1097
1098         /*
1099          * Insert the reverse mapping for those interrupts already present
1100          * in irq_map[].
1101          */
1102         mutex_lock(&revmap_trees_mutex);
1103         for (i = 0; i < irq_virq_count; i++) {
1104                 if (irq_map[i].host &&
1105                     (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1106                         radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1107                                           irq_map[i].hwirq, &irq_map[i]);
1108         }
1109         mutex_unlock(&revmap_trees_mutex);
1110
1111         /*
1112          * Make sure the radix trees insertions are visible before setting
1113          * the flag
1114          */
1115         smp_wmb();
1116         revmap_trees_allocated = 2;
1117
1118         return 0;
1119 }
1120 arch_initcall(irq_late_init);
1121
1122 #ifdef CONFIG_VIRQ_DEBUG
1123 static int virq_debug_show(struct seq_file *m, void *private)
1124 {
1125         unsigned long flags;
1126         struct irq_desc *desc;
1127         const char *p;
1128         char none[] = "none";
1129         int i;
1130
1131         seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1132                       "chip name", "host name");
1133
1134         for (i = 1; i < nr_irqs; i++) {
1135                 desc = irq_to_desc(i);
1136                 if (!desc)
1137                         continue;
1138
1139                 raw_spin_lock_irqsave(&desc->lock, flags);
1140
1141                 if (desc->action && desc->action->handler) {
1142                         seq_printf(m, "%5d  ", i);
1143                         seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1144
1145                         if (desc->chip && desc->chip->name)
1146                                 p = desc->chip->name;
1147                         else
1148                                 p = none;
1149                         seq_printf(m, "%-15s  ", p);
1150
1151                         if (irq_map[i].host && irq_map[i].host->of_node)
1152                                 p = irq_map[i].host->of_node->full_name;
1153                         else
1154                                 p = none;
1155                         seq_printf(m, "%s\n", p);
1156                 }
1157
1158                 raw_spin_unlock_irqrestore(&desc->lock, flags);
1159         }
1160
1161         return 0;
1162 }
1163
1164 static int virq_debug_open(struct inode *inode, struct file *file)
1165 {
1166         return single_open(file, virq_debug_show, inode->i_private);
1167 }
1168
1169 static const struct file_operations virq_debug_fops = {
1170         .open = virq_debug_open,
1171         .read = seq_read,
1172         .llseek = seq_lseek,
1173         .release = single_release,
1174 };
1175
1176 static int __init irq_debugfs_init(void)
1177 {
1178         if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1179                                  NULL, &virq_debug_fops) == NULL)
1180                 return -ENOMEM;
1181
1182         return 0;
1183 }
1184 __initcall(irq_debugfs_init);
1185 #endif /* CONFIG_VIRQ_DEBUG */
1186
1187 #ifdef CONFIG_PPC64
1188 static int __init setup_noirqdistrib(char *str)
1189 {
1190         distribute_irqs = 0;
1191         return 1;
1192 }
1193
1194 __setup("noirqdistrib", setup_noirqdistrib);
1195 #endif /* CONFIG_PPC64 */