2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
26 #include <linux/list.h>
27 #include <linux/kvm_host.h>
28 #include <linux/pci.h>
29 #include <linux/stat.h>
30 #include <linux/dmar.h>
31 #include <linux/iommu.h>
32 #include <linux/intel-iommu.h>
34 static int allow_unsafe_assigned_interrupts;
35 module_param_named(allow_unsafe_assigned_interrupts,
36 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
38 "Enable device assignment on platforms without interrupt remapping support.");
40 static int kvm_iommu_unmap_memslots(struct kvm *kvm);
41 static void kvm_iommu_put_pages(struct kvm *kvm,
42 gfn_t base_gfn, unsigned long npages);
44 static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
45 gfn_t gfn, unsigned long size)
50 pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
51 end_gfn = gfn + (size >> PAGE_SHIFT);
54 if (is_error_pfn(pfn))
58 gfn_to_pfn_memslot(kvm, slot, gfn++);
63 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
68 struct iommu_domain *domain = kvm->arch.iommu_domain;
71 /* check if iommu exists and in use */
76 end_gfn = gfn + slot->npages;
78 flags = IOMMU_READ | IOMMU_WRITE;
79 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
83 while (gfn < end_gfn) {
84 unsigned long page_size;
86 /* Check if already mapped */
87 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
92 /* Get the page size we could use to map */
93 page_size = kvm_host_page_size(kvm, gfn);
95 /* Make sure the page_size does not exceed the memslot */
96 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
99 /* Make sure gfn is aligned to the page size we want to map */
100 while ((gfn << PAGE_SHIFT) & (page_size - 1))
104 * Pin all pages we are about to map in memory. This is
105 * important because we unmap and unpin in 4kb steps later.
107 pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
108 if (is_error_pfn(pfn)) {
113 /* Map into IO address space */
114 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
115 get_order(page_size), flags);
117 printk(KERN_ERR "kvm_iommu_map_address:"
118 "iommu failed to map pfn=%llx\n", pfn);
122 gfn += page_size >> PAGE_SHIFT;
130 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
134 static int kvm_iommu_map_memslots(struct kvm *kvm)
137 struct kvm_memslots *slots;
139 idx = srcu_read_lock(&kvm->srcu);
140 slots = kvm_memslots(kvm);
142 for (i = 0; i < slots->nmemslots; i++) {
143 r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
147 srcu_read_unlock(&kvm->srcu, idx);
152 int kvm_assign_device(struct kvm *kvm,
153 struct kvm_assigned_dev_kernel *assigned_dev)
155 struct pci_dev *pdev = NULL;
156 struct iommu_domain *domain = kvm->arch.iommu_domain;
159 /* check if iommu exists and in use */
163 pdev = assigned_dev->dev;
167 r = iommu_attach_device(domain, &pdev->dev);
169 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
170 pci_domain_nr(pdev->bus),
172 PCI_SLOT(pdev->devfn),
173 PCI_FUNC(pdev->devfn));
177 last_flags = kvm->arch.iommu_flags;
178 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
179 IOMMU_CAP_CACHE_COHERENCY))
180 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
182 /* Check if need to update IOMMU page table for guest memory */
183 if ((last_flags ^ kvm->arch.iommu_flags) ==
184 KVM_IOMMU_CACHE_COHERENCY) {
185 kvm_iommu_unmap_memslots(kvm);
186 r = kvm_iommu_map_memslots(kvm);
191 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
193 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
194 assigned_dev->host_segnr,
195 assigned_dev->host_busnr,
196 PCI_SLOT(assigned_dev->host_devfn),
197 PCI_FUNC(assigned_dev->host_devfn));
201 kvm_iommu_unmap_memslots(kvm);
205 int kvm_deassign_device(struct kvm *kvm,
206 struct kvm_assigned_dev_kernel *assigned_dev)
208 struct iommu_domain *domain = kvm->arch.iommu_domain;
209 struct pci_dev *pdev = NULL;
211 /* check if iommu exists and in use */
215 pdev = assigned_dev->dev;
219 iommu_detach_device(domain, &pdev->dev);
221 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
223 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
224 assigned_dev->host_segnr,
225 assigned_dev->host_busnr,
226 PCI_SLOT(assigned_dev->host_devfn),
227 PCI_FUNC(assigned_dev->host_devfn));
232 int kvm_iommu_map_guest(struct kvm *kvm)
236 if (!iommu_present(&pci_bus_type)) {
237 printk(KERN_ERR "%s: iommu not found\n", __func__);
241 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
242 if (!kvm->arch.iommu_domain)
245 if (!allow_unsafe_assigned_interrupts &&
246 !iommu_domain_has_cap(kvm->arch.iommu_domain,
247 IOMMU_CAP_INTR_REMAP)) {
248 printk(KERN_WARNING "%s: No interrupt remapping support,"
249 " disallowing device assignment."
250 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
251 " module option.\n", __func__);
252 iommu_domain_free(kvm->arch.iommu_domain);
253 kvm->arch.iommu_domain = NULL;
257 r = kvm_iommu_map_memslots(kvm);
264 kvm_iommu_unmap_memslots(kvm);
268 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
272 for (i = 0; i < npages; ++i)
273 kvm_release_pfn_clean(pfn + i);
276 static void kvm_iommu_put_pages(struct kvm *kvm,
277 gfn_t base_gfn, unsigned long npages)
279 struct iommu_domain *domain;
284 domain = kvm->arch.iommu_domain;
285 end_gfn = base_gfn + npages;
288 /* check if iommu exists and in use */
292 while (gfn < end_gfn) {
293 unsigned long unmap_pages;
296 /* Get physical address */
297 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
298 pfn = phys >> PAGE_SHIFT;
300 /* Unmap address from IO address space */
301 order = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
302 unmap_pages = 1ULL << order;
304 /* Unpin all pages we just unmapped to not leak any memory */
305 kvm_unpin_pages(kvm, pfn, unmap_pages);
311 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
314 struct kvm_memslots *slots;
316 idx = srcu_read_lock(&kvm->srcu);
317 slots = kvm_memslots(kvm);
319 for (i = 0; i < slots->nmemslots; i++) {
320 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
321 slots->memslots[i].npages);
323 srcu_read_unlock(&kvm->srcu, idx);
328 int kvm_iommu_unmap_guest(struct kvm *kvm)
330 struct iommu_domain *domain = kvm->arch.iommu_domain;
332 /* check if iommu exists and in use */
336 kvm_iommu_unmap_memslots(kvm);
337 iommu_domain_free(domain);