KVM: Add include guards for coalesced_mmio.h
[linux-3.10.git] / virt / kvm / assigned-dev.c
1 /*
2  * Kernel-based Virtual Machine - device assignment support
3  *
4  * Copyright (C) 2006-9 Red Hat, Inc
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2.  See
7  * the COPYING file in the top-level directory.
8  *
9  */
10
11 #include <linux/kvm_host.h>
12 #include <linux/kvm.h>
13 #include <linux/uaccess.h>
14 #include <linux/vmalloc.h>
15 #include <linux/errno.h>
16 #include <linux/spinlock.h>
17 #include <linux/pci.h>
18 #include <linux/interrupt.h>
19 #include "irq.h"
20
21 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
22                                                       int assigned_dev_id)
23 {
24         struct list_head *ptr;
25         struct kvm_assigned_dev_kernel *match;
26
27         list_for_each(ptr, head) {
28                 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
29                 if (match->assigned_dev_id == assigned_dev_id)
30                         return match;
31         }
32         return NULL;
33 }
34
35 static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
36                                     *assigned_dev, int irq)
37 {
38         int i, index;
39         struct msix_entry *host_msix_entries;
40
41         host_msix_entries = assigned_dev->host_msix_entries;
42
43         index = -1;
44         for (i = 0; i < assigned_dev->entries_nr; i++)
45                 if (irq == host_msix_entries[i].vector) {
46                         index = i;
47                         break;
48                 }
49         if (index < 0) {
50                 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
51                 return 0;
52         }
53
54         return index;
55 }
56
57 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
58 {
59         struct kvm_assigned_dev_kernel *assigned_dev;
60         struct kvm *kvm;
61         int i;
62
63         assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
64                                     interrupt_work);
65         kvm = assigned_dev->kvm;
66
67         spin_lock_irq(&assigned_dev->assigned_dev_lock);
68         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
69                 struct kvm_guest_msix_entry *guest_entries =
70                         assigned_dev->guest_msix_entries;
71                 for (i = 0; i < assigned_dev->entries_nr; i++) {
72                         if (!(guest_entries[i].flags &
73                                         KVM_ASSIGNED_MSIX_PENDING))
74                                 continue;
75                         guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
76                         kvm_set_irq(assigned_dev->kvm,
77                                     assigned_dev->irq_source_id,
78                                     guest_entries[i].vector, 1);
79                 }
80         } else
81                 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
82                             assigned_dev->guest_irq, 1);
83
84         spin_unlock_irq(&assigned_dev->assigned_dev_lock);
85 }
86
87 static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
88 {
89         unsigned long flags;
90         struct kvm_assigned_dev_kernel *assigned_dev =
91                 (struct kvm_assigned_dev_kernel *) dev_id;
92
93         spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
94         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
95                 int index = find_index_from_host_irq(assigned_dev, irq);
96                 if (index < 0)
97                         goto out;
98                 assigned_dev->guest_msix_entries[index].flags |=
99                         KVM_ASSIGNED_MSIX_PENDING;
100         }
101
102         schedule_work(&assigned_dev->interrupt_work);
103
104         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
105                 disable_irq_nosync(irq);
106                 assigned_dev->host_irq_disabled = true;
107         }
108
109 out:
110         spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
111         return IRQ_HANDLED;
112 }
113
114 /* Ack the irq line for an assigned device */
115 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
116 {
117         struct kvm_assigned_dev_kernel *dev;
118         unsigned long flags;
119
120         if (kian->gsi == -1)
121                 return;
122
123         dev = container_of(kian, struct kvm_assigned_dev_kernel,
124                            ack_notifier);
125
126         kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
127
128         /* The guest irq may be shared so this ack may be
129          * from another device.
130          */
131         spin_lock_irqsave(&dev->assigned_dev_lock, flags);
132         if (dev->host_irq_disabled) {
133                 enable_irq(dev->host_irq);
134                 dev->host_irq_disabled = false;
135         }
136         spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
137 }
138
139 static void deassign_guest_irq(struct kvm *kvm,
140                                struct kvm_assigned_dev_kernel *assigned_dev)
141 {
142         kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
143         assigned_dev->ack_notifier.gsi = -1;
144
145         if (assigned_dev->irq_source_id != -1)
146                 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
147         assigned_dev->irq_source_id = -1;
148         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
149 }
150
151 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
152 static void deassign_host_irq(struct kvm *kvm,
153                               struct kvm_assigned_dev_kernel *assigned_dev)
154 {
155         /*
156          * In kvm_free_device_irq, cancel_work_sync return true if:
157          * 1. work is scheduled, and then cancelled.
158          * 2. work callback is executed.
159          *
160          * The first one ensured that the irq is disabled and no more events
161          * would happen. But for the second one, the irq may be enabled (e.g.
162          * for MSI). So we disable irq here to prevent further events.
163          *
164          * Notice this maybe result in nested disable if the interrupt type is
165          * INTx, but it's OK for we are going to free it.
166          *
167          * If this function is a part of VM destroy, please ensure that till
168          * now, the kvm state is still legal for probably we also have to wait
169          * interrupt_work done.
170          */
171         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
172                 int i;
173                 for (i = 0; i < assigned_dev->entries_nr; i++)
174                         disable_irq_nosync(assigned_dev->
175                                            host_msix_entries[i].vector);
176
177                 cancel_work_sync(&assigned_dev->interrupt_work);
178
179                 for (i = 0; i < assigned_dev->entries_nr; i++)
180                         free_irq(assigned_dev->host_msix_entries[i].vector,
181                                  (void *)assigned_dev);
182
183                 assigned_dev->entries_nr = 0;
184                 kfree(assigned_dev->host_msix_entries);
185                 kfree(assigned_dev->guest_msix_entries);
186                 pci_disable_msix(assigned_dev->dev);
187         } else {
188                 /* Deal with MSI and INTx */
189                 disable_irq_nosync(assigned_dev->host_irq);
190                 cancel_work_sync(&assigned_dev->interrupt_work);
191
192                 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
193
194                 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
195                         pci_disable_msi(assigned_dev->dev);
196         }
197
198         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
199 }
200
201 static int kvm_deassign_irq(struct kvm *kvm,
202                             struct kvm_assigned_dev_kernel *assigned_dev,
203                             unsigned long irq_requested_type)
204 {
205         unsigned long guest_irq_type, host_irq_type;
206
207         if (!irqchip_in_kernel(kvm))
208                 return -EINVAL;
209         /* no irq assignment to deassign */
210         if (!assigned_dev->irq_requested_type)
211                 return -ENXIO;
212
213         host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
214         guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
215
216         if (host_irq_type)
217                 deassign_host_irq(kvm, assigned_dev);
218         if (guest_irq_type)
219                 deassign_guest_irq(kvm, assigned_dev);
220
221         return 0;
222 }
223
224 static void kvm_free_assigned_irq(struct kvm *kvm,
225                                   struct kvm_assigned_dev_kernel *assigned_dev)
226 {
227         kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
228 }
229
230 static void kvm_free_assigned_device(struct kvm *kvm,
231                                      struct kvm_assigned_dev_kernel
232                                      *assigned_dev)
233 {
234         kvm_free_assigned_irq(kvm, assigned_dev);
235
236         pci_reset_function(assigned_dev->dev);
237
238         pci_release_regions(assigned_dev->dev);
239         pci_disable_device(assigned_dev->dev);
240         pci_dev_put(assigned_dev->dev);
241
242         list_del(&assigned_dev->list);
243         kfree(assigned_dev);
244 }
245
246 void kvm_free_all_assigned_devices(struct kvm *kvm)
247 {
248         struct list_head *ptr, *ptr2;
249         struct kvm_assigned_dev_kernel *assigned_dev;
250
251         list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
252                 assigned_dev = list_entry(ptr,
253                                           struct kvm_assigned_dev_kernel,
254                                           list);
255
256                 kvm_free_assigned_device(kvm, assigned_dev);
257         }
258 }
259
260 static int assigned_device_enable_host_intx(struct kvm *kvm,
261                                             struct kvm_assigned_dev_kernel *dev)
262 {
263         dev->host_irq = dev->dev->irq;
264         /* Even though this is PCI, we don't want to use shared
265          * interrupts. Sharing host devices with guest-assigned devices
266          * on the same interrupt line is not a happy situation: there
267          * are going to be long delays in accepting, acking, etc.
268          */
269         if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
270                         0, "kvm_assigned_intx_device", (void *)dev))
271                 return -EIO;
272         return 0;
273 }
274
275 #ifdef __KVM_HAVE_MSI
276 static int assigned_device_enable_host_msi(struct kvm *kvm,
277                                            struct kvm_assigned_dev_kernel *dev)
278 {
279         int r;
280
281         if (!dev->dev->msi_enabled) {
282                 r = pci_enable_msi(dev->dev);
283                 if (r)
284                         return r;
285         }
286
287         dev->host_irq = dev->dev->irq;
288         if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
289                         "kvm_assigned_msi_device", (void *)dev)) {
290                 pci_disable_msi(dev->dev);
291                 return -EIO;
292         }
293
294         return 0;
295 }
296 #endif
297
298 #ifdef __KVM_HAVE_MSIX
299 static int assigned_device_enable_host_msix(struct kvm *kvm,
300                                             struct kvm_assigned_dev_kernel *dev)
301 {
302         int i, r = -EINVAL;
303
304         /* host_msix_entries and guest_msix_entries should have been
305          * initialized */
306         if (dev->entries_nr == 0)
307                 return r;
308
309         r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
310         if (r)
311                 return r;
312
313         for (i = 0; i < dev->entries_nr; i++) {
314                 r = request_irq(dev->host_msix_entries[i].vector,
315                                 kvm_assigned_dev_intr, 0,
316                                 "kvm_assigned_msix_device",
317                                 (void *)dev);
318                 /* FIXME: free requested_irq's on failure */
319                 if (r)
320                         return r;
321         }
322
323         return 0;
324 }
325
326 #endif
327
328 static int assigned_device_enable_guest_intx(struct kvm *kvm,
329                                 struct kvm_assigned_dev_kernel *dev,
330                                 struct kvm_assigned_irq *irq)
331 {
332         dev->guest_irq = irq->guest_irq;
333         dev->ack_notifier.gsi = irq->guest_irq;
334         return 0;
335 }
336
337 #ifdef __KVM_HAVE_MSI
338 static int assigned_device_enable_guest_msi(struct kvm *kvm,
339                         struct kvm_assigned_dev_kernel *dev,
340                         struct kvm_assigned_irq *irq)
341 {
342         dev->guest_irq = irq->guest_irq;
343         dev->ack_notifier.gsi = -1;
344         dev->host_irq_disabled = false;
345         return 0;
346 }
347 #endif
348
349 #ifdef __KVM_HAVE_MSIX
350 static int assigned_device_enable_guest_msix(struct kvm *kvm,
351                         struct kvm_assigned_dev_kernel *dev,
352                         struct kvm_assigned_irq *irq)
353 {
354         dev->guest_irq = irq->guest_irq;
355         dev->ack_notifier.gsi = -1;
356         dev->host_irq_disabled = false;
357         return 0;
358 }
359 #endif
360
361 static int assign_host_irq(struct kvm *kvm,
362                            struct kvm_assigned_dev_kernel *dev,
363                            __u32 host_irq_type)
364 {
365         int r = -EEXIST;
366
367         if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
368                 return r;
369
370         switch (host_irq_type) {
371         case KVM_DEV_IRQ_HOST_INTX:
372                 r = assigned_device_enable_host_intx(kvm, dev);
373                 break;
374 #ifdef __KVM_HAVE_MSI
375         case KVM_DEV_IRQ_HOST_MSI:
376                 r = assigned_device_enable_host_msi(kvm, dev);
377                 break;
378 #endif
379 #ifdef __KVM_HAVE_MSIX
380         case KVM_DEV_IRQ_HOST_MSIX:
381                 r = assigned_device_enable_host_msix(kvm, dev);
382                 break;
383 #endif
384         default:
385                 r = -EINVAL;
386         }
387
388         if (!r)
389                 dev->irq_requested_type |= host_irq_type;
390
391         return r;
392 }
393
394 static int assign_guest_irq(struct kvm *kvm,
395                             struct kvm_assigned_dev_kernel *dev,
396                             struct kvm_assigned_irq *irq,
397                             unsigned long guest_irq_type)
398 {
399         int id;
400         int r = -EEXIST;
401
402         if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
403                 return r;
404
405         id = kvm_request_irq_source_id(kvm);
406         if (id < 0)
407                 return id;
408
409         dev->irq_source_id = id;
410
411         switch (guest_irq_type) {
412         case KVM_DEV_IRQ_GUEST_INTX:
413                 r = assigned_device_enable_guest_intx(kvm, dev, irq);
414                 break;
415 #ifdef __KVM_HAVE_MSI
416         case KVM_DEV_IRQ_GUEST_MSI:
417                 r = assigned_device_enable_guest_msi(kvm, dev, irq);
418                 break;
419 #endif
420 #ifdef __KVM_HAVE_MSIX
421         case KVM_DEV_IRQ_GUEST_MSIX:
422                 r = assigned_device_enable_guest_msix(kvm, dev, irq);
423                 break;
424 #endif
425         default:
426                 r = -EINVAL;
427         }
428
429         if (!r) {
430                 dev->irq_requested_type |= guest_irq_type;
431                 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
432         } else
433                 kvm_free_irq_source_id(kvm, dev->irq_source_id);
434
435         return r;
436 }
437
438 /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
439 static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
440                                    struct kvm_assigned_irq *assigned_irq)
441 {
442         int r = -EINVAL;
443         struct kvm_assigned_dev_kernel *match;
444         unsigned long host_irq_type, guest_irq_type;
445
446         if (!capable(CAP_SYS_RAWIO))
447                 return -EPERM;
448
449         if (!irqchip_in_kernel(kvm))
450                 return r;
451
452         mutex_lock(&kvm->lock);
453         r = -ENODEV;
454         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
455                                       assigned_irq->assigned_dev_id);
456         if (!match)
457                 goto out;
458
459         host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
460         guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
461
462         r = -EINVAL;
463         /* can only assign one type at a time */
464         if (hweight_long(host_irq_type) > 1)
465                 goto out;
466         if (hweight_long(guest_irq_type) > 1)
467                 goto out;
468         if (host_irq_type == 0 && guest_irq_type == 0)
469                 goto out;
470
471         r = 0;
472         if (host_irq_type)
473                 r = assign_host_irq(kvm, match, host_irq_type);
474         if (r)
475                 goto out;
476
477         if (guest_irq_type)
478                 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
479 out:
480         mutex_unlock(&kvm->lock);
481         return r;
482 }
483
484 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
485                                          struct kvm_assigned_irq
486                                          *assigned_irq)
487 {
488         int r = -ENODEV;
489         struct kvm_assigned_dev_kernel *match;
490
491         mutex_lock(&kvm->lock);
492
493         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
494                                       assigned_irq->assigned_dev_id);
495         if (!match)
496                 goto out;
497
498         r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
499 out:
500         mutex_unlock(&kvm->lock);
501         return r;
502 }
503
504 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
505                                       struct kvm_assigned_pci_dev *assigned_dev)
506 {
507         int r = 0;
508         struct kvm_assigned_dev_kernel *match;
509         struct pci_dev *dev;
510
511         mutex_lock(&kvm->lock);
512         down_read(&kvm->slots_lock);
513
514         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
515                                       assigned_dev->assigned_dev_id);
516         if (match) {
517                 /* device already assigned */
518                 r = -EEXIST;
519                 goto out;
520         }
521
522         match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
523         if (match == NULL) {
524                 printk(KERN_INFO "%s: Couldn't allocate memory\n",
525                        __func__);
526                 r = -ENOMEM;
527                 goto out;
528         }
529         dev = pci_get_bus_and_slot(assigned_dev->busnr,
530                                    assigned_dev->devfn);
531         if (!dev) {
532                 printk(KERN_INFO "%s: host device not found\n", __func__);
533                 r = -EINVAL;
534                 goto out_free;
535         }
536         if (pci_enable_device(dev)) {
537                 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
538                 r = -EBUSY;
539                 goto out_put;
540         }
541         r = pci_request_regions(dev, "kvm_assigned_device");
542         if (r) {
543                 printk(KERN_INFO "%s: Could not get access to device regions\n",
544                        __func__);
545                 goto out_disable;
546         }
547
548         pci_reset_function(dev);
549
550         match->assigned_dev_id = assigned_dev->assigned_dev_id;
551         match->host_busnr = assigned_dev->busnr;
552         match->host_devfn = assigned_dev->devfn;
553         match->flags = assigned_dev->flags;
554         match->dev = dev;
555         spin_lock_init(&match->assigned_dev_lock);
556         match->irq_source_id = -1;
557         match->kvm = kvm;
558         match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
559         INIT_WORK(&match->interrupt_work,
560                   kvm_assigned_dev_interrupt_work_handler);
561
562         list_add(&match->list, &kvm->arch.assigned_dev_head);
563
564         if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
565                 if (!kvm->arch.iommu_domain) {
566                         r = kvm_iommu_map_guest(kvm);
567                         if (r)
568                                 goto out_list_del;
569                 }
570                 r = kvm_assign_device(kvm, match);
571                 if (r)
572                         goto out_list_del;
573         }
574
575 out:
576         up_read(&kvm->slots_lock);
577         mutex_unlock(&kvm->lock);
578         return r;
579 out_list_del:
580         list_del(&match->list);
581         pci_release_regions(dev);
582 out_disable:
583         pci_disable_device(dev);
584 out_put:
585         pci_dev_put(dev);
586 out_free:
587         kfree(match);
588         up_read(&kvm->slots_lock);
589         mutex_unlock(&kvm->lock);
590         return r;
591 }
592
593 static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
594                 struct kvm_assigned_pci_dev *assigned_dev)
595 {
596         int r = 0;
597         struct kvm_assigned_dev_kernel *match;
598
599         mutex_lock(&kvm->lock);
600
601         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
602                                       assigned_dev->assigned_dev_id);
603         if (!match) {
604                 printk(KERN_INFO "%s: device hasn't been assigned before, "
605                   "so cannot be deassigned\n", __func__);
606                 r = -EINVAL;
607                 goto out;
608         }
609
610         if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
611                 kvm_deassign_device(kvm, match);
612
613         kvm_free_assigned_device(kvm, match);
614
615 out:
616         mutex_unlock(&kvm->lock);
617         return r;
618 }
619
620
621 #ifdef __KVM_HAVE_MSIX
622 static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
623                                     struct kvm_assigned_msix_nr *entry_nr)
624 {
625         int r = 0;
626         struct kvm_assigned_dev_kernel *adev;
627
628         mutex_lock(&kvm->lock);
629
630         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
631                                       entry_nr->assigned_dev_id);
632         if (!adev) {
633                 r = -EINVAL;
634                 goto msix_nr_out;
635         }
636
637         if (adev->entries_nr == 0) {
638                 adev->entries_nr = entry_nr->entry_nr;
639                 if (adev->entries_nr == 0 ||
640                     adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
641                         r = -EINVAL;
642                         goto msix_nr_out;
643                 }
644
645                 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
646                                                 entry_nr->entry_nr,
647                                                 GFP_KERNEL);
648                 if (!adev->host_msix_entries) {
649                         r = -ENOMEM;
650                         goto msix_nr_out;
651                 }
652                 adev->guest_msix_entries = kzalloc(
653                                 sizeof(struct kvm_guest_msix_entry) *
654                                 entry_nr->entry_nr, GFP_KERNEL);
655                 if (!adev->guest_msix_entries) {
656                         kfree(adev->host_msix_entries);
657                         r = -ENOMEM;
658                         goto msix_nr_out;
659                 }
660         } else /* Not allowed set MSI-X number twice */
661                 r = -EINVAL;
662 msix_nr_out:
663         mutex_unlock(&kvm->lock);
664         return r;
665 }
666
667 static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
668                                        struct kvm_assigned_msix_entry *entry)
669 {
670         int r = 0, i;
671         struct kvm_assigned_dev_kernel *adev;
672
673         mutex_lock(&kvm->lock);
674
675         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
676                                       entry->assigned_dev_id);
677
678         if (!adev) {
679                 r = -EINVAL;
680                 goto msix_entry_out;
681         }
682
683         for (i = 0; i < adev->entries_nr; i++)
684                 if (adev->guest_msix_entries[i].vector == 0 ||
685                     adev->guest_msix_entries[i].entry == entry->entry) {
686                         adev->guest_msix_entries[i].entry = entry->entry;
687                         adev->guest_msix_entries[i].vector = entry->gsi;
688                         adev->host_msix_entries[i].entry = entry->entry;
689                         break;
690                 }
691         if (i == adev->entries_nr) {
692                 r = -ENOSPC;
693                 goto msix_entry_out;
694         }
695
696 msix_entry_out:
697         mutex_unlock(&kvm->lock);
698
699         return r;
700 }
701 #endif
702
703 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
704                                   unsigned long arg)
705 {
706         void __user *argp = (void __user *)arg;
707         int r = -ENOTTY;
708
709         switch (ioctl) {
710         case KVM_ASSIGN_PCI_DEVICE: {
711                 struct kvm_assigned_pci_dev assigned_dev;
712
713                 r = -EFAULT;
714                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
715                         goto out;
716                 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
717                 if (r)
718                         goto out;
719                 break;
720         }
721         case KVM_ASSIGN_IRQ: {
722                 r = -EOPNOTSUPP;
723                 break;
724         }
725 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
726         case KVM_ASSIGN_DEV_IRQ: {
727                 struct kvm_assigned_irq assigned_irq;
728
729                 r = -EFAULT;
730                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
731                         goto out;
732                 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
733                 if (r)
734                         goto out;
735                 break;
736         }
737         case KVM_DEASSIGN_DEV_IRQ: {
738                 struct kvm_assigned_irq assigned_irq;
739
740                 r = -EFAULT;
741                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
742                         goto out;
743                 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
744                 if (r)
745                         goto out;
746                 break;
747         }
748 #endif
749 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
750         case KVM_DEASSIGN_PCI_DEVICE: {
751                 struct kvm_assigned_pci_dev assigned_dev;
752
753                 r = -EFAULT;
754                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
755                         goto out;
756                 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
757                 if (r)
758                         goto out;
759                 break;
760         }
761 #endif
762 #ifdef KVM_CAP_IRQ_ROUTING
763         case KVM_SET_GSI_ROUTING: {
764                 struct kvm_irq_routing routing;
765                 struct kvm_irq_routing __user *urouting;
766                 struct kvm_irq_routing_entry *entries;
767
768                 r = -EFAULT;
769                 if (copy_from_user(&routing, argp, sizeof(routing)))
770                         goto out;
771                 r = -EINVAL;
772                 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
773                         goto out;
774                 if (routing.flags)
775                         goto out;
776                 r = -ENOMEM;
777                 entries = vmalloc(routing.nr * sizeof(*entries));
778                 if (!entries)
779                         goto out;
780                 r = -EFAULT;
781                 urouting = argp;
782                 if (copy_from_user(entries, urouting->entries,
783                                    routing.nr * sizeof(*entries)))
784                         goto out_free_irq_routing;
785                 r = kvm_set_irq_routing(kvm, entries, routing.nr,
786                                         routing.flags);
787         out_free_irq_routing:
788                 vfree(entries);
789                 break;
790         }
791 #endif /* KVM_CAP_IRQ_ROUTING */
792 #ifdef __KVM_HAVE_MSIX
793         case KVM_ASSIGN_SET_MSIX_NR: {
794                 struct kvm_assigned_msix_nr entry_nr;
795                 r = -EFAULT;
796                 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
797                         goto out;
798                 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
799                 if (r)
800                         goto out;
801                 break;
802         }
803         case KVM_ASSIGN_SET_MSIX_ENTRY: {
804                 struct kvm_assigned_msix_entry entry;
805                 r = -EFAULT;
806                 if (copy_from_user(&entry, argp, sizeof entry))
807                         goto out;
808                 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
809                 if (r)
810                         goto out;
811                 break;
812         }
813 #endif
814         }
815 out:
816         return r;
817 }
818