MIPS: Octeon: Rewrite interrupt handling code.
[linux-2.6.git] / arch / mips / cavium-octeon / octeon-irq.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks
7  */
8
9 #include <linux/interrupt.h>
10 #include <linux/bitops.h>
11 #include <linux/percpu.h>
12 #include <linux/irq.h>
13 #include <linux/smp.h>
14
15 #include <asm/octeon/octeon.h>
16
17 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
18 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
19
20 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
21 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
22
23 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
24
25 union octeon_ciu_chip_data {
26         void *p;
27         unsigned long l;
28         struct {
29                 unsigned int line:6;
30                 unsigned int bit:6;
31         } s;
32 };
33
34 struct octeon_core_chip_data {
35         struct mutex core_irq_mutex;
36         bool current_en;
37         bool desired_en;
38         u8 bit;
39 };
40
41 #define MIPS_CORE_IRQ_LINES 8
42
43 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
44
45 static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit,
46                                               struct irq_chip *chip,
47                                               irq_flow_handler_t handler)
48 {
49         union octeon_ciu_chip_data cd;
50
51         irq_set_chip_and_handler(irq, chip, handler);
52
53         cd.l = 0;
54         cd.s.line = line;
55         cd.s.bit = bit;
56
57         irq_set_chip_data(irq, cd.p);
58         octeon_irq_ciu_to_irq[line][bit] = irq;
59 }
60
61 static int octeon_coreid_for_cpu(int cpu)
62 {
63 #ifdef CONFIG_SMP
64         return cpu_logical_map(cpu);
65 #else
66         return cvmx_get_core_num();
67 #endif
68 }
69
70 static int octeon_cpu_for_coreid(int coreid)
71 {
72 #ifdef CONFIG_SMP
73         return cpu_number_map(coreid);
74 #else
75         return smp_processor_id();
76 #endif
77 }
78
79 static void octeon_irq_core_ack(struct irq_data *data)
80 {
81         struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
82         unsigned int bit = cd->bit;
83
84         /*
85          * We don't need to disable IRQs to make these atomic since
86          * they are already disabled earlier in the low level
87          * interrupt code.
88          */
89         clear_c0_status(0x100 << bit);
90         /* The two user interrupts must be cleared manually. */
91         if (bit < 2)
92                 clear_c0_cause(0x100 << bit);
93 }
94
95 static void octeon_irq_core_eoi(struct irq_data *data)
96 {
97         struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
98
99         /*
100          * We don't need to disable IRQs to make these atomic since
101          * they are already disabled earlier in the low level
102          * interrupt code.
103          */
104         set_c0_status(0x100 << cd->bit);
105 }
106
107 static void octeon_irq_core_set_enable_local(void *arg)
108 {
109         struct irq_data *data = arg;
110         struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
111         unsigned int mask = 0x100 << cd->bit;
112
113         /*
114          * Interrupts are already disabled, so these are atomic.
115          */
116         if (cd->desired_en)
117                 set_c0_status(mask);
118         else
119                 clear_c0_status(mask);
120
121 }
122
123 static void octeon_irq_core_disable(struct irq_data *data)
124 {
125         struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
126         cd->desired_en = false;
127 }
128
129 static void octeon_irq_core_enable(struct irq_data *data)
130 {
131         struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
132         cd->desired_en = true;
133 }
134
135 static void octeon_irq_core_bus_lock(struct irq_data *data)
136 {
137         struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
138
139         mutex_lock(&cd->core_irq_mutex);
140 }
141
142 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
143 {
144         struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
145
146         if (cd->desired_en != cd->current_en) {
147                 on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
148
149                 cd->current_en = cd->desired_en;
150         }
151
152         mutex_unlock(&cd->core_irq_mutex);
153 }
154
155
156 static void octeon_irq_core_cpu_online(struct irq_data *data)
157 {
158         if (irqd_irq_disabled(data))
159                 octeon_irq_core_eoi(data);
160 }
161
162 static void octeon_irq_core_cpu_offline(struct irq_data *data)
163 {
164         if (irqd_irq_disabled(data))
165                 octeon_irq_core_ack(data);
166 }
167
168 static struct irq_chip octeon_irq_chip_core = {
169         .name = "Core",
170         .irq_enable = octeon_irq_core_enable,
171         .irq_disable = octeon_irq_core_disable,
172         .irq_ack = octeon_irq_core_ack,
173         .irq_eoi = octeon_irq_core_eoi,
174         .irq_bus_lock = octeon_irq_core_bus_lock,
175         .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
176
177         .irq_cpu_online = octeon_irq_core_cpu_online,
178         .irq_cpu_offline = octeon_irq_core_cpu_offline,
179 };
180
181 static void __init octeon_irq_init_core(void)
182 {
183         int i;
184         int irq;
185         struct octeon_core_chip_data *cd;
186
187         for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
188                 cd = &octeon_irq_core_chip_data[i];
189                 cd->current_en = false;
190                 cd->desired_en = false;
191                 cd->bit = i;
192                 mutex_init(&cd->core_irq_mutex);
193
194                 irq = OCTEON_IRQ_SW0 + i;
195                 switch (irq) {
196                 case OCTEON_IRQ_TIMER:
197                 case OCTEON_IRQ_SW0:
198                 case OCTEON_IRQ_SW1:
199                 case OCTEON_IRQ_5:
200                 case OCTEON_IRQ_PERF:
201                         irq_set_chip_data(irq, cd);
202                         irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
203                                                  handle_percpu_irq);
204                         break;
205                 default:
206                         break;
207                 }
208         }
209 }
210
211 static int next_cpu_for_irq(struct irq_data *data)
212 {
213
214 #ifdef CONFIG_SMP
215         int cpu;
216         int weight = cpumask_weight(data->affinity);
217
218         if (weight > 1) {
219                 cpu = smp_processor_id();
220                 for (;;) {
221                         cpu = cpumask_next(cpu, data->affinity);
222                         if (cpu >= nr_cpu_ids) {
223                                 cpu = -1;
224                                 continue;
225                         } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
226                                 break;
227                         }
228                 }
229         } else if (weight == 1) {
230                 cpu = cpumask_first(data->affinity);
231         } else {
232                 cpu = smp_processor_id();
233         }
234         return cpu;
235 #else
236         return smp_processor_id();
237 #endif
238 }
239
240 static void octeon_irq_ciu_enable(struct irq_data *data)
241 {
242         int cpu = next_cpu_for_irq(data);
243         int coreid = octeon_coreid_for_cpu(cpu);
244         unsigned long *pen;
245         unsigned long flags;
246         union octeon_ciu_chip_data cd;
247
248         cd.p = irq_data_get_irq_chip_data(data);
249
250         if (cd.s.line == 0) {
251                 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
252                 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
253                 set_bit(cd.s.bit, pen);
254                 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
255                 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
256         } else {
257                 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
258                 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
259                 set_bit(cd.s.bit, pen);
260                 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
261                 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
262         }
263 }
264
265 static void octeon_irq_ciu_enable_local(struct irq_data *data)
266 {
267         unsigned long *pen;
268         unsigned long flags;
269         union octeon_ciu_chip_data cd;
270
271         cd.p = irq_data_get_irq_chip_data(data);
272
273         if (cd.s.line == 0) {
274                 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
275                 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
276                 set_bit(cd.s.bit, pen);
277                 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
278                 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
279         } else {
280                 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
281                 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
282                 set_bit(cd.s.bit, pen);
283                 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
284                 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
285         }
286 }
287
288 static void octeon_irq_ciu_disable_local(struct irq_data *data)
289 {
290         unsigned long *pen;
291         unsigned long flags;
292         union octeon_ciu_chip_data cd;
293
294         cd.p = irq_data_get_irq_chip_data(data);
295
296         if (cd.s.line == 0) {
297                 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
298                 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
299                 clear_bit(cd.s.bit, pen);
300                 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
301                 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
302         } else {
303                 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
304                 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
305                 clear_bit(cd.s.bit, pen);
306                 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
307                 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
308         }
309 }
310
311 static void octeon_irq_ciu_disable_all(struct irq_data *data)
312 {
313         unsigned long flags;
314         unsigned long *pen;
315         int cpu;
316         union octeon_ciu_chip_data cd;
317
318         wmb(); /* Make sure flag changes arrive before register updates. */
319
320         cd.p = irq_data_get_irq_chip_data(data);
321
322         if (cd.s.line == 0) {
323                 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
324                 for_each_online_cpu(cpu) {
325                         int coreid = octeon_coreid_for_cpu(cpu);
326                         pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
327                         clear_bit(cd.s.bit, pen);
328                         cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
329                 }
330                 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
331         } else {
332                 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
333                 for_each_online_cpu(cpu) {
334                         int coreid = octeon_coreid_for_cpu(cpu);
335                         pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
336                         clear_bit(cd.s.bit, pen);
337                         cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
338                 }
339                 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
340         }
341 }
342
343 static void octeon_irq_ciu_enable_all(struct irq_data *data)
344 {
345         unsigned long flags;
346         unsigned long *pen;
347         int cpu;
348         union octeon_ciu_chip_data cd;
349
350         cd.p = irq_data_get_irq_chip_data(data);
351
352         if (cd.s.line == 0) {
353                 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
354                 for_each_online_cpu(cpu) {
355                         int coreid = octeon_coreid_for_cpu(cpu);
356                         pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
357                         set_bit(cd.s.bit, pen);
358                         cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
359                 }
360                 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
361         } else {
362                 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
363                 for_each_online_cpu(cpu) {
364                         int coreid = octeon_coreid_for_cpu(cpu);
365                         pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
366                         set_bit(cd.s.bit, pen);
367                         cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
368                 }
369                 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
370         }
371 }
372
373 /*
374  * Enable the irq on the next core in the affinity set for chips that
375  * have the EN*_W1{S,C} registers.
376  */
377 static void octeon_irq_ciu_enable_v2(struct irq_data *data)
378 {
379         u64 mask;
380         int cpu = next_cpu_for_irq(data);
381         union octeon_ciu_chip_data cd;
382
383         cd.p = irq_data_get_irq_chip_data(data);
384         mask = 1ull << (cd.s.bit);
385
386         /*
387          * Called under the desc lock, so these should never get out
388          * of sync.
389          */
390         if (cd.s.line == 0) {
391                 int index = octeon_coreid_for_cpu(cpu) * 2;
392                 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
393                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
394         } else {
395                 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
396                 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
397                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
398         }
399 }
400
401 /*
402  * Enable the irq on the current CPU for chips that
403  * have the EN*_W1{S,C} registers.
404  */
405 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
406 {
407         u64 mask;
408         union octeon_ciu_chip_data cd;
409
410         cd.p = irq_data_get_irq_chip_data(data);
411         mask = 1ull << (cd.s.bit);
412
413         if (cd.s.line == 0) {
414                 int index = cvmx_get_core_num() * 2;
415                 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
416                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
417         } else {
418                 int index = cvmx_get_core_num() * 2 + 1;
419                 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
420                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
421         }
422 }
423
424 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
425 {
426         u64 mask;
427         union octeon_ciu_chip_data cd;
428
429         cd.p = irq_data_get_irq_chip_data(data);
430         mask = 1ull << (cd.s.bit);
431
432         if (cd.s.line == 0) {
433                 int index = cvmx_get_core_num() * 2;
434                 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
435                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
436         } else {
437                 int index = cvmx_get_core_num() * 2 + 1;
438                 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
439                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
440         }
441 }
442
443 /*
444  * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
445  */
446 static void octeon_irq_ciu_ack(struct irq_data *data)
447 {
448         u64 mask;
449         union octeon_ciu_chip_data cd;
450
451         cd.p = data->chip_data;
452         mask = 1ull << (cd.s.bit);
453
454         if (cd.s.line == 0) {
455                 int index = cvmx_get_core_num() * 2;
456                 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
457         } else {
458                 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
459         }
460 }
461
462 /*
463  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
464  * registers.
465  */
466 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
467 {
468         int cpu;
469         u64 mask;
470         union octeon_ciu_chip_data cd;
471
472         wmb(); /* Make sure flag changes arrive before register updates. */
473
474         cd.p = data->chip_data;
475         mask = 1ull << (cd.s.bit);
476
477         if (cd.s.line == 0) {
478                 for_each_online_cpu(cpu) {
479                         int index = octeon_coreid_for_cpu(cpu) * 2;
480                         clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
481                         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
482                 }
483         } else {
484                 for_each_online_cpu(cpu) {
485                         int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
486                         clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
487                         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
488                 }
489         }
490 }
491
492 /*
493  * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
494  * registers.
495  */
496 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
497 {
498         int cpu;
499         u64 mask;
500         union octeon_ciu_chip_data cd;
501
502         cd.p = data->chip_data;
503         mask = 1ull << (cd.s.bit);
504
505         if (cd.s.line == 0) {
506                 for_each_online_cpu(cpu) {
507                         int index = octeon_coreid_for_cpu(cpu) * 2;
508                         set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
509                         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
510                 }
511         } else {
512                 for_each_online_cpu(cpu) {
513                         int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
514                         set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
515                         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
516                 }
517         }
518 }
519
520 static void octeon_irq_cpu_online_mbox(struct irq_data *data)
521 {
522         if (irqd_irq_disabled(data))
523                 octeon_irq_ciu_enable_local(data);
524 }
525
526 static void octeon_irq_cpu_online_mbox_v2(struct irq_data *data)
527 {
528         if (irqd_irq_disabled(data))
529                 octeon_irq_ciu_enable_local_v2(data);
530 }
531
532 static void octeon_irq_cpu_offline_mbox(struct irq_data *data)
533 {
534         if (irqd_irq_disabled(data))
535                 octeon_irq_ciu_disable_local(data);
536 }
537
538 static void octeon_irq_cpu_offline_mbox_v2(struct irq_data *data)
539 {
540         if (irqd_irq_disabled(data))
541                 octeon_irq_ciu_disable_local_v2(data);
542 }
543
544 #ifdef CONFIG_SMP
545
546 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
547 {
548         int cpu = smp_processor_id();
549         cpumask_t new_affinity;
550
551         if (!cpumask_test_cpu(cpu, data->affinity))
552                 return;
553
554         if (cpumask_weight(data->affinity) > 1) {
555                 /*
556                  * It has multi CPU affinity, just remove this CPU
557                  * from the affinity set.
558                  */
559                 cpumask_copy(&new_affinity, data->affinity);
560                 cpumask_clear_cpu(cpu, &new_affinity);
561         } else {
562                 /* Otherwise, put it on lowest numbered online CPU. */
563                 cpumask_clear(&new_affinity);
564                 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
565         }
566         __irq_set_affinity_locked(data, &new_affinity);
567 }
568
569 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
570                                        const struct cpumask *dest, bool force)
571 {
572         int cpu;
573         struct irq_desc *desc = irq_to_desc(data->irq);
574         int enable_one = (desc->status & IRQ_DISABLED) == 0;
575         unsigned long flags;
576         union octeon_ciu_chip_data cd;
577
578         cd.p = data->chip_data;
579
580         /*
581          * For non-v2 CIU, we will allow only single CPU affinity.
582          * This removes the need to do locking in the .ack/.eoi
583          * functions.
584          */
585         if (cpumask_weight(dest) != 1)
586                 return -EINVAL;
587
588         if (desc->status & IRQ_DISABLED)
589                 return 0;
590
591         if (cd.s.line == 0) {
592                 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
593                 for_each_online_cpu(cpu) {
594                         int coreid = octeon_coreid_for_cpu(cpu);
595                         unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
596
597                         if (cpumask_test_cpu(cpu, dest) && enable_one) {
598                                 enable_one = 0;
599                                 set_bit(cd.s.bit, pen);
600                         } else {
601                                 clear_bit(cd.s.bit, pen);
602                         }
603                         cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
604                 }
605                 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
606         } else {
607                 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
608                 for_each_online_cpu(cpu) {
609                         int coreid = octeon_coreid_for_cpu(cpu);
610                         unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
611
612                         if (cpumask_test_cpu(cpu, dest) && enable_one) {
613                                 enable_one = 0;
614                                 set_bit(cd.s.bit, pen);
615                         } else {
616                                 clear_bit(cd.s.bit, pen);
617                         }
618                         cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
619                 }
620                 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
621         }
622         return 0;
623 }
624
625 /*
626  * Set affinity for the irq for chips that have the EN*_W1{S,C}
627  * registers.
628  */
629 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
630                                           const struct cpumask *dest,
631                                           bool force)
632 {
633         int cpu;
634         struct irq_desc *desc = irq_to_desc(data->irq);
635         int enable_one = (desc->status & IRQ_DISABLED) == 0;
636         u64 mask;
637         union octeon_ciu_chip_data cd;
638
639         if (desc->status & IRQ_DISABLED)
640                 return 0;
641
642         cd.p = data->chip_data;
643         mask = 1ull << cd.s.bit;
644
645         if (cd.s.line == 0) {
646                 for_each_online_cpu(cpu) {
647                         unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
648                         int index = octeon_coreid_for_cpu(cpu) * 2;
649                         if (cpumask_test_cpu(cpu, dest) && enable_one) {
650                                 enable_one = 0;
651                                 set_bit(cd.s.bit, pen);
652                                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
653                         } else {
654                                 clear_bit(cd.s.bit, pen);
655                                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
656                         }
657                 }
658         } else {
659                 for_each_online_cpu(cpu) {
660                         unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
661                         int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
662                         if (cpumask_test_cpu(cpu, dest) && enable_one) {
663                                 enable_one = 0;
664                                 set_bit(cd.s.bit, pen);
665                                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
666                         } else {
667                                 clear_bit(cd.s.bit, pen);
668                                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
669                         }
670                 }
671         }
672         return 0;
673 }
674 #endif
675
676 /*
677  * The v1 CIU code already masks things, so supply a dummy version to
678  * the core chip code.
679  */
680 static void octeon_irq_dummy_mask(struct irq_data *data)
681 {
682         return;
683 }
684
685 /*
686  * Newer octeon chips have support for lockless CIU operation.
687  */
688 static struct irq_chip octeon_irq_chip_ciu_v2 = {
689         .name = "CIU",
690         .irq_enable = octeon_irq_ciu_enable_v2,
691         .irq_disable = octeon_irq_ciu_disable_all_v2,
692         .irq_mask = octeon_irq_ciu_disable_local_v2,
693         .irq_unmask = octeon_irq_ciu_enable_v2,
694 #ifdef CONFIG_SMP
695         .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
696         .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
697 #endif
698 };
699
700 static struct irq_chip octeon_irq_chip_ciu_edge_v2 = {
701         .name = "CIU-E",
702         .irq_enable = octeon_irq_ciu_enable_v2,
703         .irq_disable = octeon_irq_ciu_disable_all_v2,
704         .irq_ack = octeon_irq_ciu_ack,
705         .irq_mask = octeon_irq_ciu_disable_local_v2,
706         .irq_unmask = octeon_irq_ciu_enable_v2,
707 #ifdef CONFIG_SMP
708         .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
709         .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
710 #endif
711 };
712
713 static struct irq_chip octeon_irq_chip_ciu = {
714         .name = "CIU",
715         .irq_enable = octeon_irq_ciu_enable,
716         .irq_disable = octeon_irq_ciu_disable_all,
717         .irq_mask = octeon_irq_dummy_mask,
718 #ifdef CONFIG_SMP
719         .irq_set_affinity = octeon_irq_ciu_set_affinity,
720         .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
721 #endif
722 };
723
724 static struct irq_chip octeon_irq_chip_ciu_edge = {
725         .name = "CIU-E",
726         .irq_enable = octeon_irq_ciu_enable,
727         .irq_disable = octeon_irq_ciu_disable_all,
728         .irq_mask = octeon_irq_dummy_mask,
729         .irq_ack = octeon_irq_ciu_ack,
730 #ifdef CONFIG_SMP
731         .irq_set_affinity = octeon_irq_ciu_set_affinity,
732         .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
733 #endif
734 };
735
736 /* The mbox versions don't do any affinity or round-robin. */
737 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
738         .name = "CIU-M",
739         .irq_enable = octeon_irq_ciu_enable_all_v2,
740         .irq_disable = octeon_irq_ciu_disable_all_v2,
741         .irq_ack = octeon_irq_ciu_disable_local_v2,
742         .irq_eoi = octeon_irq_ciu_enable_local_v2,
743
744         .irq_cpu_online = octeon_irq_cpu_online_mbox_v2,
745         .irq_cpu_offline = octeon_irq_cpu_offline_mbox_v2,
746 };
747
748 static struct irq_chip octeon_irq_chip_ciu_mbox = {
749         .name = "CIU-M",
750         .irq_enable = octeon_irq_ciu_enable_all,
751         .irq_disable = octeon_irq_ciu_disable_all,
752
753         .irq_cpu_online = octeon_irq_cpu_online_mbox,
754         .irq_cpu_offline = octeon_irq_cpu_offline_mbox,
755 };
756
757 /*
758  * Watchdog interrupts are special.  They are associated with a single
759  * core, so we hardwire the affinity to that core.
760  */
761 static void octeon_irq_ciu_wd_enable(struct irq_data *data)
762 {
763         unsigned long flags;
764         unsigned long *pen;
765         int coreid = data->irq - OCTEON_IRQ_WDOG0;      /* Bit 0-63 of EN1 */
766         int cpu = octeon_cpu_for_coreid(coreid);
767
768         raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
769         pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
770         set_bit(coreid, pen);
771         cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
772         raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
773 }
774
775 /*
776  * Watchdog interrupts are special.  They are associated with a single
777  * core, so we hardwire the affinity to that core.
778  */
779 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
780 {
781         int coreid = data->irq - OCTEON_IRQ_WDOG0;
782         int cpu = octeon_cpu_for_coreid(coreid);
783
784         set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
785         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
786 }
787
788
789 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
790         .name = "CIU-W",
791         .irq_enable = octeon_irq_ciu1_wd_enable_v2,
792         .irq_disable = octeon_irq_ciu_disable_all_v2,
793         .irq_mask = octeon_irq_ciu_disable_local_v2,
794         .irq_unmask = octeon_irq_ciu_enable_local_v2,
795 };
796
797 static struct irq_chip octeon_irq_chip_ciu_wd = {
798         .name = "CIU-W",
799         .irq_enable = octeon_irq_ciu_wd_enable,
800         .irq_disable = octeon_irq_ciu_disable_all,
801         .irq_mask = octeon_irq_dummy_mask,
802 };
803
804 static void octeon_irq_ip2_v1(void)
805 {
806         const unsigned long core_id = cvmx_get_core_num();
807         u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
808
809         ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
810         clear_c0_status(STATUSF_IP2);
811         if (likely(ciu_sum)) {
812                 int bit = fls64(ciu_sum) - 1;
813                 int irq = octeon_irq_ciu_to_irq[0][bit];
814                 if (likely(irq))
815                         do_IRQ(irq);
816                 else
817                         spurious_interrupt();
818         } else {
819                 spurious_interrupt();
820         }
821         set_c0_status(STATUSF_IP2);
822 }
823
824 static void octeon_irq_ip2_v2(void)
825 {
826         const unsigned long core_id = cvmx_get_core_num();
827         u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
828
829         ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
830         if (likely(ciu_sum)) {
831                 int bit = fls64(ciu_sum) - 1;
832                 int irq = octeon_irq_ciu_to_irq[0][bit];
833                 if (likely(irq))
834                         do_IRQ(irq);
835                 else
836                         spurious_interrupt();
837         } else {
838                 spurious_interrupt();
839         }
840 }
841 static void octeon_irq_ip3_v1(void)
842 {
843         u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
844
845         ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
846         clear_c0_status(STATUSF_IP3);
847         if (likely(ciu_sum)) {
848                 int bit = fls64(ciu_sum) - 1;
849                 int irq = octeon_irq_ciu_to_irq[1][bit];
850                 if (likely(irq))
851                         do_IRQ(irq);
852                 else
853                         spurious_interrupt();
854         } else {
855                 spurious_interrupt();
856         }
857         set_c0_status(STATUSF_IP3);
858 }
859
860 static void octeon_irq_ip3_v2(void)
861 {
862         u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
863
864         ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
865         if (likely(ciu_sum)) {
866                 int bit = fls64(ciu_sum) - 1;
867                 int irq = octeon_irq_ciu_to_irq[1][bit];
868                 if (likely(irq))
869                         do_IRQ(irq);
870                 else
871                         spurious_interrupt();
872         } else {
873                 spurious_interrupt();
874         }
875 }
876
877 static void octeon_irq_ip4_mask(void)
878 {
879         clear_c0_status(STATUSF_IP4);
880         spurious_interrupt();
881 }
882
883 static void (*octeon_irq_ip2)(void);
884 static void (*octeon_irq_ip3)(void);
885 static void (*octeon_irq_ip4)(void);
886
887 void __cpuinitdata (*octeon_irq_setup_secondary)(void);
888
889 static void __cpuinit octeon_irq_percpu_enable(void)
890 {
891         irq_cpu_online();
892 }
893
894 static void __cpuinit octeon_irq_init_ciu_percpu(void)
895 {
896         int coreid = cvmx_get_core_num();
897         /*
898          * Disable All CIU Interrupts. The ones we need will be
899          * enabled later.  Read the SUM register so we know the write
900          * completed.
901          */
902         cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
903         cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
904         cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
905         cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
906         cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
907 }
908
909 static void __cpuinit octeon_irq_setup_secondary_ciu(void)
910 {
911
912         __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
913         __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
914
915         octeon_irq_init_ciu_percpu();
916         octeon_irq_percpu_enable();
917
918         /* Enable the CIU lines */
919         set_c0_status(STATUSF_IP3 | STATUSF_IP2);
920         clear_c0_status(STATUSF_IP4);
921 }
922
923 static void __init octeon_irq_init_ciu(void)
924 {
925         unsigned int i;
926         struct irq_chip *chip;
927         struct irq_chip *chip_edge;
928         struct irq_chip *chip_mbox;
929         struct irq_chip *chip_wd;
930
931         octeon_irq_init_ciu_percpu();
932         octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
933
934         if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
935             OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
936             OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
937             OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
938                 octeon_irq_ip2 = octeon_irq_ip2_v2;
939                 octeon_irq_ip3 = octeon_irq_ip3_v2;
940                 chip = &octeon_irq_chip_ciu_v2;
941                 chip_edge = &octeon_irq_chip_ciu_edge_v2;
942                 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
943                 chip_wd = &octeon_irq_chip_ciu_wd_v2;
944         } else {
945                 octeon_irq_ip2 = octeon_irq_ip2_v1;
946                 octeon_irq_ip3 = octeon_irq_ip3_v1;
947                 chip = &octeon_irq_chip_ciu;
948                 chip_edge = &octeon_irq_chip_ciu_edge;
949                 chip_mbox = &octeon_irq_chip_ciu_mbox;
950                 chip_wd = &octeon_irq_chip_ciu_wd;
951         }
952         octeon_irq_ip4 = octeon_irq_ip4_mask;
953
954         /* Mips internal */
955         octeon_irq_init_core();
956
957         /* CIU_0 */
958         for (i = 0; i < 16; i++)
959                 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
960         for (i = 0; i < 16; i++)
961                 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq);
962
963         octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
964         octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
965
966         octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq);
967         octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq);
968
969         for (i = 0; i < 4; i++)
970                 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
971         for (i = 0; i < 4; i++)
972                 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
973
974         octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq);
975         octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
976         octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq);
977
978         for (i = 0; i < 2; i++)
979                 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq);
980
981         octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq);
982         octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq);
983
984         for (i = 0; i < 4; i++)
985                 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq);
986
987         octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
988         octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq);
989         octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq);
990         octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq);
991         octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq);
992         octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq);
993         octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq);
994         octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
995
996         /* CIU_1 */
997         for (i = 0; i < 16; i++)
998                 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
999
1000         octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq);
1001         octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
1002         octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq);
1003         octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq);
1004         octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq);
1005         octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq);
1006         octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq);
1007         octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq);
1008         octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq);
1009         octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq);
1010         octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq);
1011         octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq);
1012         octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq);
1013         octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq);
1014         octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq);
1015         octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq);
1016         octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq);
1017         octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq);
1018         octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq);
1019         octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq);
1020
1021         octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq);
1022
1023         octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq);
1024
1025         octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq);
1026
1027         octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq);
1028         octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq);
1029         octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq);
1030         octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq);
1031         octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq);
1032         octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq);
1033         octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq);
1034
1035         /* Enable the CIU lines */
1036         set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1037         clear_c0_status(STATUSF_IP4);
1038 }
1039
1040 void __init arch_init_irq(void)
1041 {
1042 #ifdef CONFIG_SMP
1043         /* Set the default affinity to the boot cpu. */
1044         cpumask_clear(irq_default_affinity);
1045         cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
1046 #endif
1047         octeon_irq_init_ciu();
1048 }
1049
1050 asmlinkage void plat_irq_dispatch(void)
1051 {
1052         unsigned long cop0_cause;
1053         unsigned long cop0_status;
1054
1055         while (1) {
1056                 cop0_cause = read_c0_cause();
1057                 cop0_status = read_c0_status();
1058                 cop0_cause &= cop0_status;
1059                 cop0_cause &= ST0_IM;
1060
1061                 if (unlikely(cop0_cause & STATUSF_IP2))
1062                         octeon_irq_ip2();
1063                 else if (unlikely(cop0_cause & STATUSF_IP3))
1064                         octeon_irq_ip3();
1065                 else if (unlikely(cop0_cause & STATUSF_IP4))
1066                         octeon_irq_ip4();
1067                 else if (likely(cop0_cause))
1068                         do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
1069                 else
1070                         break;
1071         }
1072 }
1073
1074 #ifdef CONFIG_HOTPLUG_CPU
1075
1076 void fixup_irqs(void)
1077 {
1078         irq_cpu_offline();
1079 }
1080
1081 #endif /* CONFIG_HOTPLUG_CPU */