35397a569b249fce9b54a947fbe84abbb34a14ad
[linux-2.6.git] / virt / kvm / irq_comm.c
1 /*
2  * irq_comm.c: Common API for in kernel interrupt controller
3  * Copyright (c) 2007, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  * Authors:
18  *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19  *
20  */
21
22 #include <linux/kvm_host.h>
23
24 #include <asm/msidef.h>
25
26 #include "irq.h"
27
28 #include "ioapic.h"
29
30 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
31                            struct kvm *kvm, int level)
32 {
33 #ifdef CONFIG_X86
34         return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
35 #else
36         return -1;
37 #endif
38 }
39
40 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
41                               struct kvm *kvm, int level)
42 {
43         return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
44 }
45
46 void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
47                                    union kvm_ioapic_redirect_entry *entry,
48                                    unsigned long *deliver_bitmask)
49 {
50         int i;
51         struct kvm_vcpu *vcpu;
52
53         bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
54
55         if (entry->fields.dest_mode == 0) {     /* Physical mode. */
56                 if (entry->fields.dest_id == 0xFF) {    /* Broadcast. */
57                         for (i = 0; i < KVM_MAX_VCPUS; ++i)
58                                 if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
59                                         __set_bit(i, deliver_bitmask);
60                         /* Lowest priority shouldn't combine with broadcast */
61                         if (entry->fields.delivery_mode ==
62                             IOAPIC_LOWEST_PRIORITY && printk_ratelimit())
63                                 printk(KERN_INFO "kvm: apic: phys broadcast "
64                                                   "and lowest prio\n");
65                         return;
66                 }
67                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
68                         vcpu = kvm->vcpus[i];
69                         if (!vcpu)
70                                 continue;
71                         if (kvm_apic_match_physical_addr(vcpu->arch.apic,
72                                         entry->fields.dest_id)) {
73                                 if (vcpu->arch.apic)
74                                         __set_bit(i, deliver_bitmask);
75                                 break;
76                         }
77                 }
78         } else if (entry->fields.dest_id != 0) /* Logical mode, MDA non-zero. */
79                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
80                         vcpu = kvm->vcpus[i];
81                         if (!vcpu)
82                                 continue;
83                         if (vcpu->arch.apic &&
84                             kvm_apic_match_logical_addr(vcpu->arch.apic,
85                                         entry->fields.dest_id))
86                                 __set_bit(i, deliver_bitmask);
87                 }
88
89         switch (entry->fields.delivery_mode) {
90         case IOAPIC_LOWEST_PRIORITY:
91                 /* Select one in deliver_bitmask */
92                 vcpu = kvm_get_lowest_prio_vcpu(kvm,
93                                 entry->fields.vector, deliver_bitmask);
94                 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
95                 if (!vcpu)
96                         return;
97                 __set_bit(vcpu->vcpu_id, deliver_bitmask);
98                 break;
99         case IOAPIC_FIXED:
100         case IOAPIC_NMI:
101                 break;
102         default:
103                 if (printk_ratelimit())
104                         printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
105                                 entry->fields.delivery_mode);
106                 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
107         }
108 }
109
110 static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
111                        struct kvm *kvm, int level)
112 {
113         union kvm_ioapic_redirect_entry entry;
114
115         entry.bits = 0;
116         entry.fields.dest_id = (e->msi.address_lo &
117                         MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
118         entry.fields.vector = (e->msi.data &
119                         MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
120         entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
121                         (unsigned long *)&e->msi.address_lo);
122         entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
123                         (unsigned long *)&e->msi.data);
124         entry.fields.delivery_mode = test_bit(
125                         MSI_DATA_DELIVERY_MODE_SHIFT,
126                         (unsigned long *)&e->msi.data);
127
128         /* TODO Deal with RH bit of MSI message address */
129         return ioapic_deliver_entry(kvm, &entry);
130 }
131
132 /* This should be called with the kvm->lock mutex held
133  * Return value:
134  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
135  *  = 0   Interrupt was coalesced (previous irq is still pending)
136  *  > 0   Number of CPUs interrupt was delivered to
137  */
138 int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
139 {
140         struct kvm_kernel_irq_routing_entry *e;
141         unsigned long *irq_state, sig_level;
142         int ret = -1;
143
144         if (irq < KVM_IOAPIC_NUM_PINS) {
145                 irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
146
147                 /* Logical OR for level trig interrupt */
148                 if (level)
149                         set_bit(irq_source_id, irq_state);
150                 else
151                         clear_bit(irq_source_id, irq_state);
152                 sig_level = !!(*irq_state);
153         } else /* Deal with MSI/MSI-X */
154                 sig_level = 1;
155
156         /* Not possible to detect if the guest uses the PIC or the
157          * IOAPIC.  So set the bit in both. The guest will ignore
158          * writes to the unused one.
159          */
160         list_for_each_entry(e, &kvm->irq_routing, link)
161                 if (e->gsi == irq) {
162                         int r = e->set(e, kvm, sig_level);
163                         if (r < 0)
164                                 continue;
165
166                         ret = r + ((ret < 0) ? 0 : ret);
167                 }
168         return ret;
169 }
170
171 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
172 {
173         struct kvm_kernel_irq_routing_entry *e;
174         struct kvm_irq_ack_notifier *kian;
175         struct hlist_node *n;
176         unsigned gsi = pin;
177
178         list_for_each_entry(e, &kvm->irq_routing, link)
179                 if (e->irqchip.irqchip == irqchip &&
180                     e->irqchip.pin == pin) {
181                         gsi = e->gsi;
182                         break;
183                 }
184
185         hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link)
186                 if (kian->gsi == gsi)
187                         kian->irq_acked(kian);
188 }
189
190 void kvm_register_irq_ack_notifier(struct kvm *kvm,
191                                    struct kvm_irq_ack_notifier *kian)
192 {
193         hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
194 }
195
196 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
197 {
198         hlist_del_init(&kian->link);
199 }
200
201 /* The caller must hold kvm->lock mutex */
202 int kvm_request_irq_source_id(struct kvm *kvm)
203 {
204         unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
205         int irq_source_id = find_first_zero_bit(bitmap,
206                                 sizeof(kvm->arch.irq_sources_bitmap));
207
208         if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
209                 printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
210                 return -EFAULT;
211         }
212
213         ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
214         set_bit(irq_source_id, bitmap);
215
216         return irq_source_id;
217 }
218
219 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
220 {
221         int i;
222
223         ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
224
225         if (irq_source_id < 0 ||
226             irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
227                 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
228                 return;
229         }
230         for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
231                 clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
232         clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
233 }
234
235 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
236                                     struct kvm_irq_mask_notifier *kimn)
237 {
238         kimn->irq = irq;
239         hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
240 }
241
242 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
243                                       struct kvm_irq_mask_notifier *kimn)
244 {
245         hlist_del(&kimn->link);
246 }
247
248 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
249 {
250         struct kvm_irq_mask_notifier *kimn;
251         struct hlist_node *n;
252
253         hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
254                 if (kimn->irq == irq)
255                         kimn->func(kimn, mask);
256 }
257
258 static void __kvm_free_irq_routing(struct list_head *irq_routing)
259 {
260         struct kvm_kernel_irq_routing_entry *e, *n;
261
262         list_for_each_entry_safe(e, n, irq_routing, link)
263                 kfree(e);
264 }
265
266 void kvm_free_irq_routing(struct kvm *kvm)
267 {
268         __kvm_free_irq_routing(&kvm->irq_routing);
269 }
270
271 static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
272                                const struct kvm_irq_routing_entry *ue)
273 {
274         int r = -EINVAL;
275         int delta;
276
277         e->gsi = ue->gsi;
278         switch (ue->type) {
279         case KVM_IRQ_ROUTING_IRQCHIP:
280                 delta = 0;
281                 switch (ue->u.irqchip.irqchip) {
282                 case KVM_IRQCHIP_PIC_MASTER:
283                         e->set = kvm_set_pic_irq;
284                         break;
285                 case KVM_IRQCHIP_PIC_SLAVE:
286                         e->set = kvm_set_pic_irq;
287                         delta = 8;
288                         break;
289                 case KVM_IRQCHIP_IOAPIC:
290                                 e->set = kvm_set_ioapic_irq;
291                         break;
292                 default:
293                         goto out;
294                 }
295                 e->irqchip.irqchip = ue->u.irqchip.irqchip;
296                 e->irqchip.pin = ue->u.irqchip.pin + delta;
297                 break;
298         case KVM_IRQ_ROUTING_MSI:
299                 e->set = kvm_set_msi;
300                 e->msi.address_lo = ue->u.msi.address_lo;
301                 e->msi.address_hi = ue->u.msi.address_hi;
302                 e->msi.data = ue->u.msi.data;
303                 break;
304         default:
305                 goto out;
306         }
307         r = 0;
308 out:
309         return r;
310 }
311
312
313 int kvm_set_irq_routing(struct kvm *kvm,
314                         const struct kvm_irq_routing_entry *ue,
315                         unsigned nr,
316                         unsigned flags)
317 {
318         struct list_head irq_list = LIST_HEAD_INIT(irq_list);
319         struct list_head tmp = LIST_HEAD_INIT(tmp);
320         struct kvm_kernel_irq_routing_entry *e = NULL;
321         unsigned i;
322         int r;
323
324         for (i = 0; i < nr; ++i) {
325                 r = -EINVAL;
326                 if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
327                         goto out;
328                 if (ue->flags)
329                         goto out;
330                 r = -ENOMEM;
331                 e = kzalloc(sizeof(*e), GFP_KERNEL);
332                 if (!e)
333                         goto out;
334                 r = setup_routing_entry(e, ue);
335                 if (r)
336                         goto out;
337                 ++ue;
338                 list_add(&e->link, &irq_list);
339                 e = NULL;
340         }
341
342         mutex_lock(&kvm->lock);
343         list_splice(&kvm->irq_routing, &tmp);
344         INIT_LIST_HEAD(&kvm->irq_routing);
345         list_splice(&irq_list, &kvm->irq_routing);
346         INIT_LIST_HEAD(&irq_list);
347         list_splice(&tmp, &irq_list);
348         mutex_unlock(&kvm->lock);
349
350         r = 0;
351
352 out:
353         kfree(e);
354         __kvm_free_irq_routing(&irq_list);
355         return r;
356 }
357
358 #define IOAPIC_ROUTING_ENTRY(irq) \
359         { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
360           .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
361 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
362
363 #ifdef CONFIG_X86
364 #  define PIC_ROUTING_ENTRY(irq) \
365         { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
366           .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
367 #  define ROUTING_ENTRY2(irq) \
368         IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
369 #else
370 #  define ROUTING_ENTRY2(irq) \
371         IOAPIC_ROUTING_ENTRY(irq)
372 #endif
373
374 static const struct kvm_irq_routing_entry default_routing[] = {
375         ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
376         ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
377         ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
378         ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
379         ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
380         ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
381         ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
382         ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
383         ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
384         ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
385         ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
386         ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
387 #ifdef CONFIG_IA64
388         ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
389         ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
390         ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
391         ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
392         ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
393         ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
394         ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
395         ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
396         ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
397         ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
398         ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
399         ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
400 #endif
401 };
402
403 int kvm_setup_default_irq_routing(struct kvm *kvm)
404 {
405         return kvm_set_irq_routing(kvm, default_routing,
406                                    ARRAY_SIZE(default_routing), 0);
407 }