intel-iommu: dump mappings but don't die on pte already set
[linux-2.6.git] / drivers / pci / intel-iommu.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  * Author: Fenghua Yu <fenghua.yu@intel.com>
22  */
23
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
42 #include "pci.h"
43
44 #define ROOT_SIZE               VTD_PAGE_SIZE
45 #define CONTEXT_SIZE            VTD_PAGE_SIZE
46
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50 #define IOAPIC_RANGE_START      (0xfee00000)
51 #define IOAPIC_RANGE_END        (0xfeefffff)
52 #define IOVA_START_ADDR         (0x1000)
53
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
56 #define MAX_AGAW_WIDTH 64
57
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw)  ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60
61 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
64
65
66 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67    are never going to work. */
68 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69 {
70         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71 }
72
73 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74 {
75         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76 }
77 static inline unsigned long page_to_dma_pfn(struct page *pg)
78 {
79         return mm_to_dma_pfn(page_to_pfn(pg));
80 }
81 static inline unsigned long virt_to_dma_pfn(void *p)
82 {
83         return page_to_dma_pfn(virt_to_page(p));
84 }
85
86 /* global iommu list, set NULL for ignored DMAR units */
87 static struct intel_iommu **g_iommus;
88
89 static int rwbf_quirk;
90
91 /*
92  * 0: Present
93  * 1-11: Reserved
94  * 12-63: Context Ptr (12 - (haw-1))
95  * 64-127: Reserved
96  */
97 struct root_entry {
98         u64     val;
99         u64     rsvd1;
100 };
101 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102 static inline bool root_present(struct root_entry *root)
103 {
104         return (root->val & 1);
105 }
106 static inline void set_root_present(struct root_entry *root)
107 {
108         root->val |= 1;
109 }
110 static inline void set_root_value(struct root_entry *root, unsigned long value)
111 {
112         root->val |= value & VTD_PAGE_MASK;
113 }
114
115 static inline struct context_entry *
116 get_context_addr_from_root(struct root_entry *root)
117 {
118         return (struct context_entry *)
119                 (root_present(root)?phys_to_virt(
120                 root->val & VTD_PAGE_MASK) :
121                 NULL);
122 }
123
124 /*
125  * low 64 bits:
126  * 0: present
127  * 1: fault processing disable
128  * 2-3: translation type
129  * 12-63: address space root
130  * high 64 bits:
131  * 0-2: address width
132  * 3-6: aval
133  * 8-23: domain id
134  */
135 struct context_entry {
136         u64 lo;
137         u64 hi;
138 };
139
140 static inline bool context_present(struct context_entry *context)
141 {
142         return (context->lo & 1);
143 }
144 static inline void context_set_present(struct context_entry *context)
145 {
146         context->lo |= 1;
147 }
148
149 static inline void context_set_fault_enable(struct context_entry *context)
150 {
151         context->lo &= (((u64)-1) << 2) | 1;
152 }
153
154 static inline void context_set_translation_type(struct context_entry *context,
155                                                 unsigned long value)
156 {
157         context->lo &= (((u64)-1) << 4) | 3;
158         context->lo |= (value & 3) << 2;
159 }
160
161 static inline void context_set_address_root(struct context_entry *context,
162                                             unsigned long value)
163 {
164         context->lo |= value & VTD_PAGE_MASK;
165 }
166
167 static inline void context_set_address_width(struct context_entry *context,
168                                              unsigned long value)
169 {
170         context->hi |= value & 7;
171 }
172
173 static inline void context_set_domain_id(struct context_entry *context,
174                                          unsigned long value)
175 {
176         context->hi |= (value & ((1 << 16) - 1)) << 8;
177 }
178
179 static inline void context_clear_entry(struct context_entry *context)
180 {
181         context->lo = 0;
182         context->hi = 0;
183 }
184
185 /*
186  * 0: readable
187  * 1: writable
188  * 2-6: reserved
189  * 7: super page
190  * 8-10: available
191  * 11: snoop behavior
192  * 12-63: Host physcial address
193  */
194 struct dma_pte {
195         u64 val;
196 };
197
198 static inline void dma_clear_pte(struct dma_pte *pte)
199 {
200         pte->val = 0;
201 }
202
203 static inline void dma_set_pte_readable(struct dma_pte *pte)
204 {
205         pte->val |= DMA_PTE_READ;
206 }
207
208 static inline void dma_set_pte_writable(struct dma_pte *pte)
209 {
210         pte->val |= DMA_PTE_WRITE;
211 }
212
213 static inline void dma_set_pte_snp(struct dma_pte *pte)
214 {
215         pte->val |= DMA_PTE_SNP;
216 }
217
218 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
219 {
220         pte->val = (pte->val & ~3) | (prot & 3);
221 }
222
223 static inline u64 dma_pte_addr(struct dma_pte *pte)
224 {
225         return (pte->val & VTD_PAGE_MASK);
226 }
227
228 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
229 {
230         pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
231 }
232
233 static inline bool dma_pte_present(struct dma_pte *pte)
234 {
235         return (pte->val & 3) != 0;
236 }
237
238 /*
239  * This domain is a statically identity mapping domain.
240  *      1. This domain creats a static 1:1 mapping to all usable memory.
241  *      2. It maps to each iommu if successful.
242  *      3. Each iommu mapps to this domain if successful.
243  */
244 struct dmar_domain *si_domain;
245
246 /* devices under the same p2p bridge are owned in one domain */
247 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
248
249 /* domain represents a virtual machine, more than one devices
250  * across iommus may be owned in one domain, e.g. kvm guest.
251  */
252 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 1)
253
254 /* si_domain contains mulitple devices */
255 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 2)
256
257 struct dmar_domain {
258         int     id;                     /* domain id */
259         unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
260
261         struct list_head devices;       /* all devices' list */
262         struct iova_domain iovad;       /* iova's that belong to this domain */
263
264         struct dma_pte  *pgd;           /* virtual address */
265         spinlock_t      mapping_lock;   /* page table lock */
266         int             gaw;            /* max guest address width */
267
268         /* adjusted guest address width, 0 is level 2 30-bit */
269         int             agaw;
270
271         int             flags;          /* flags to find out type of domain */
272
273         int             iommu_coherency;/* indicate coherency of iommu access */
274         int             iommu_snooping; /* indicate snooping control feature*/
275         int             iommu_count;    /* reference count of iommu */
276         spinlock_t      iommu_lock;     /* protect iommu set in domain */
277         u64             max_addr;       /* maximum mapped address */
278 };
279
280 /* PCI domain-device relationship */
281 struct device_domain_info {
282         struct list_head link;  /* link to domain siblings */
283         struct list_head global; /* link to global list */
284         int segment;            /* PCI domain */
285         u8 bus;                 /* PCI bus number */
286         u8 devfn;               /* PCI devfn number */
287         struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
288         struct intel_iommu *iommu; /* IOMMU used by this device */
289         struct dmar_domain *domain; /* pointer to domain */
290 };
291
292 static void flush_unmaps_timeout(unsigned long data);
293
294 DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
295
296 #define HIGH_WATER_MARK 250
297 struct deferred_flush_tables {
298         int next;
299         struct iova *iova[HIGH_WATER_MARK];
300         struct dmar_domain *domain[HIGH_WATER_MARK];
301 };
302
303 static struct deferred_flush_tables *deferred_flush;
304
305 /* bitmap for indexing intel_iommus */
306 static int g_num_of_iommus;
307
308 static DEFINE_SPINLOCK(async_umap_flush_lock);
309 static LIST_HEAD(unmaps_to_do);
310
311 static int timer_on;
312 static long list_size;
313
314 static void domain_remove_dev_info(struct dmar_domain *domain);
315
316 #ifdef CONFIG_DMAR_DEFAULT_ON
317 int dmar_disabled = 0;
318 #else
319 int dmar_disabled = 1;
320 #endif /*CONFIG_DMAR_DEFAULT_ON*/
321
322 static int __initdata dmar_map_gfx = 1;
323 static int dmar_forcedac;
324 static int intel_iommu_strict;
325
326 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
327 static DEFINE_SPINLOCK(device_domain_lock);
328 static LIST_HEAD(device_domain_list);
329
330 static struct iommu_ops intel_iommu_ops;
331
332 static int __init intel_iommu_setup(char *str)
333 {
334         if (!str)
335                 return -EINVAL;
336         while (*str) {
337                 if (!strncmp(str, "on", 2)) {
338                         dmar_disabled = 0;
339                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
340                 } else if (!strncmp(str, "off", 3)) {
341                         dmar_disabled = 1;
342                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
343                 } else if (!strncmp(str, "igfx_off", 8)) {
344                         dmar_map_gfx = 0;
345                         printk(KERN_INFO
346                                 "Intel-IOMMU: disable GFX device mapping\n");
347                 } else if (!strncmp(str, "forcedac", 8)) {
348                         printk(KERN_INFO
349                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
350                         dmar_forcedac = 1;
351                 } else if (!strncmp(str, "strict", 6)) {
352                         printk(KERN_INFO
353                                 "Intel-IOMMU: disable batched IOTLB flush\n");
354                         intel_iommu_strict = 1;
355                 }
356
357                 str += strcspn(str, ",");
358                 while (*str == ',')
359                         str++;
360         }
361         return 0;
362 }
363 __setup("intel_iommu=", intel_iommu_setup);
364
365 static struct kmem_cache *iommu_domain_cache;
366 static struct kmem_cache *iommu_devinfo_cache;
367 static struct kmem_cache *iommu_iova_cache;
368
369 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
370 {
371         unsigned int flags;
372         void *vaddr;
373
374         /* trying to avoid low memory issues */
375         flags = current->flags & PF_MEMALLOC;
376         current->flags |= PF_MEMALLOC;
377         vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
378         current->flags &= (~PF_MEMALLOC | flags);
379         return vaddr;
380 }
381
382
383 static inline void *alloc_pgtable_page(void)
384 {
385         unsigned int flags;
386         void *vaddr;
387
388         /* trying to avoid low memory issues */
389         flags = current->flags & PF_MEMALLOC;
390         current->flags |= PF_MEMALLOC;
391         vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
392         current->flags &= (~PF_MEMALLOC | flags);
393         return vaddr;
394 }
395
396 static inline void free_pgtable_page(void *vaddr)
397 {
398         free_page((unsigned long)vaddr);
399 }
400
401 static inline void *alloc_domain_mem(void)
402 {
403         return iommu_kmem_cache_alloc(iommu_domain_cache);
404 }
405
406 static void free_domain_mem(void *vaddr)
407 {
408         kmem_cache_free(iommu_domain_cache, vaddr);
409 }
410
411 static inline void * alloc_devinfo_mem(void)
412 {
413         return iommu_kmem_cache_alloc(iommu_devinfo_cache);
414 }
415
416 static inline void free_devinfo_mem(void *vaddr)
417 {
418         kmem_cache_free(iommu_devinfo_cache, vaddr);
419 }
420
421 struct iova *alloc_iova_mem(void)
422 {
423         return iommu_kmem_cache_alloc(iommu_iova_cache);
424 }
425
426 void free_iova_mem(struct iova *iova)
427 {
428         kmem_cache_free(iommu_iova_cache, iova);
429 }
430
431
432 static inline int width_to_agaw(int width);
433
434 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
435 {
436         unsigned long sagaw;
437         int agaw = -1;
438
439         sagaw = cap_sagaw(iommu->cap);
440         for (agaw = width_to_agaw(max_gaw);
441              agaw >= 0; agaw--) {
442                 if (test_bit(agaw, &sagaw))
443                         break;
444         }
445
446         return agaw;
447 }
448
449 /*
450  * Calculate max SAGAW for each iommu.
451  */
452 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
453 {
454         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
455 }
456
457 /*
458  * calculate agaw for each iommu.
459  * "SAGAW" may be different across iommus, use a default agaw, and
460  * get a supported less agaw for iommus that don't support the default agaw.
461  */
462 int iommu_calculate_agaw(struct intel_iommu *iommu)
463 {
464         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
465 }
466
467 /* This functionin only returns single iommu in a domain */
468 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
469 {
470         int iommu_id;
471
472         /* si_domain and vm domain should not get here. */
473         BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
474         BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
475
476         iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
477         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
478                 return NULL;
479
480         return g_iommus[iommu_id];
481 }
482
483 static void domain_update_iommu_coherency(struct dmar_domain *domain)
484 {
485         int i;
486
487         domain->iommu_coherency = 1;
488
489         i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
490         for (; i < g_num_of_iommus; ) {
491                 if (!ecap_coherent(g_iommus[i]->ecap)) {
492                         domain->iommu_coherency = 0;
493                         break;
494                 }
495                 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
496         }
497 }
498
499 static void domain_update_iommu_snooping(struct dmar_domain *domain)
500 {
501         int i;
502
503         domain->iommu_snooping = 1;
504
505         i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
506         for (; i < g_num_of_iommus; ) {
507                 if (!ecap_sc_support(g_iommus[i]->ecap)) {
508                         domain->iommu_snooping = 0;
509                         break;
510                 }
511                 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
512         }
513 }
514
515 /* Some capabilities may be different across iommus */
516 static void domain_update_iommu_cap(struct dmar_domain *domain)
517 {
518         domain_update_iommu_coherency(domain);
519         domain_update_iommu_snooping(domain);
520 }
521
522 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
523 {
524         struct dmar_drhd_unit *drhd = NULL;
525         int i;
526
527         for_each_drhd_unit(drhd) {
528                 if (drhd->ignored)
529                         continue;
530                 if (segment != drhd->segment)
531                         continue;
532
533                 for (i = 0; i < drhd->devices_cnt; i++) {
534                         if (drhd->devices[i] &&
535                             drhd->devices[i]->bus->number == bus &&
536                             drhd->devices[i]->devfn == devfn)
537                                 return drhd->iommu;
538                         if (drhd->devices[i] &&
539                             drhd->devices[i]->subordinate &&
540                             drhd->devices[i]->subordinate->number <= bus &&
541                             drhd->devices[i]->subordinate->subordinate >= bus)
542                                 return drhd->iommu;
543                 }
544
545                 if (drhd->include_all)
546                         return drhd->iommu;
547         }
548
549         return NULL;
550 }
551
552 static void domain_flush_cache(struct dmar_domain *domain,
553                                void *addr, int size)
554 {
555         if (!domain->iommu_coherency)
556                 clflush_cache_range(addr, size);
557 }
558
559 /* Gets context entry for a given bus and devfn */
560 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
561                 u8 bus, u8 devfn)
562 {
563         struct root_entry *root;
564         struct context_entry *context;
565         unsigned long phy_addr;
566         unsigned long flags;
567
568         spin_lock_irqsave(&iommu->lock, flags);
569         root = &iommu->root_entry[bus];
570         context = get_context_addr_from_root(root);
571         if (!context) {
572                 context = (struct context_entry *)alloc_pgtable_page();
573                 if (!context) {
574                         spin_unlock_irqrestore(&iommu->lock, flags);
575                         return NULL;
576                 }
577                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
578                 phy_addr = virt_to_phys((void *)context);
579                 set_root_value(root, phy_addr);
580                 set_root_present(root);
581                 __iommu_flush_cache(iommu, root, sizeof(*root));
582         }
583         spin_unlock_irqrestore(&iommu->lock, flags);
584         return &context[devfn];
585 }
586
587 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
588 {
589         struct root_entry *root;
590         struct context_entry *context;
591         int ret;
592         unsigned long flags;
593
594         spin_lock_irqsave(&iommu->lock, flags);
595         root = &iommu->root_entry[bus];
596         context = get_context_addr_from_root(root);
597         if (!context) {
598                 ret = 0;
599                 goto out;
600         }
601         ret = context_present(&context[devfn]);
602 out:
603         spin_unlock_irqrestore(&iommu->lock, flags);
604         return ret;
605 }
606
607 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
608 {
609         struct root_entry *root;
610         struct context_entry *context;
611         unsigned long flags;
612
613         spin_lock_irqsave(&iommu->lock, flags);
614         root = &iommu->root_entry[bus];
615         context = get_context_addr_from_root(root);
616         if (context) {
617                 context_clear_entry(&context[devfn]);
618                 __iommu_flush_cache(iommu, &context[devfn], \
619                         sizeof(*context));
620         }
621         spin_unlock_irqrestore(&iommu->lock, flags);
622 }
623
624 static void free_context_table(struct intel_iommu *iommu)
625 {
626         struct root_entry *root;
627         int i;
628         unsigned long flags;
629         struct context_entry *context;
630
631         spin_lock_irqsave(&iommu->lock, flags);
632         if (!iommu->root_entry) {
633                 goto out;
634         }
635         for (i = 0; i < ROOT_ENTRY_NR; i++) {
636                 root = &iommu->root_entry[i];
637                 context = get_context_addr_from_root(root);
638                 if (context)
639                         free_pgtable_page(context);
640         }
641         free_pgtable_page(iommu->root_entry);
642         iommu->root_entry = NULL;
643 out:
644         spin_unlock_irqrestore(&iommu->lock, flags);
645 }
646
647 /* page table handling */
648 #define LEVEL_STRIDE            (9)
649 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
650
651 static inline int agaw_to_level(int agaw)
652 {
653         return agaw + 2;
654 }
655
656 static inline int agaw_to_width(int agaw)
657 {
658         return 30 + agaw * LEVEL_STRIDE;
659
660 }
661
662 static inline int width_to_agaw(int width)
663 {
664         return (width - 30) / LEVEL_STRIDE;
665 }
666
667 static inline unsigned int level_to_offset_bits(int level)
668 {
669         return (level - 1) * LEVEL_STRIDE;
670 }
671
672 static inline int pfn_level_offset(unsigned long pfn, int level)
673 {
674         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
675 }
676
677 static inline unsigned long level_mask(int level)
678 {
679         return -1UL << level_to_offset_bits(level);
680 }
681
682 static inline unsigned long level_size(int level)
683 {
684         return 1UL << level_to_offset_bits(level);
685 }
686
687 static inline unsigned long align_to_level(unsigned long pfn, int level)
688 {
689         return (pfn + level_size(level) - 1) & level_mask(level);
690 }
691
692 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
693                                       unsigned long pfn)
694 {
695         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
696         struct dma_pte *parent, *pte = NULL;
697         int level = agaw_to_level(domain->agaw);
698         int offset;
699         unsigned long flags;
700
701         BUG_ON(!domain->pgd);
702         BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
703         parent = domain->pgd;
704
705         spin_lock_irqsave(&domain->mapping_lock, flags);
706         while (level > 0) {
707                 void *tmp_page;
708
709                 offset = pfn_level_offset(pfn, level);
710                 pte = &parent[offset];
711                 if (level == 1)
712                         break;
713
714                 if (!dma_pte_present(pte)) {
715                         tmp_page = alloc_pgtable_page();
716
717                         if (!tmp_page) {
718                                 spin_unlock_irqrestore(&domain->mapping_lock,
719                                         flags);
720                                 return NULL;
721                         }
722                         domain_flush_cache(domain, tmp_page, PAGE_SIZE);
723                         dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
724                         /*
725                          * high level table always sets r/w, last level page
726                          * table control read/write
727                          */
728                         dma_set_pte_readable(pte);
729                         dma_set_pte_writable(pte);
730                         domain_flush_cache(domain, pte, sizeof(*pte));
731                 }
732                 parent = phys_to_virt(dma_pte_addr(pte));
733                 level--;
734         }
735
736         spin_unlock_irqrestore(&domain->mapping_lock, flags);
737         return pte;
738 }
739
740 /* return address's pte at specific level */
741 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
742                                          unsigned long pfn,
743                                          int level)
744 {
745         struct dma_pte *parent, *pte = NULL;
746         int total = agaw_to_level(domain->agaw);
747         int offset;
748
749         parent = domain->pgd;
750         while (level <= total) {
751                 offset = pfn_level_offset(pfn, total);
752                 pte = &parent[offset];
753                 if (level == total)
754                         return pte;
755
756                 if (!dma_pte_present(pte))
757                         break;
758                 parent = phys_to_virt(dma_pte_addr(pte));
759                 total--;
760         }
761         return NULL;
762 }
763
764 /* clear last level pte, a tlb flush should be followed */
765 static void dma_pte_clear_range(struct dmar_domain *domain,
766                                 unsigned long start_pfn,
767                                 unsigned long last_pfn)
768 {
769         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
770         struct dma_pte *first_pte, *pte;
771
772         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
773         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
774
775         /* we don't need lock here; nobody else touches the iova range */
776         while (start_pfn <= last_pfn) {
777                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
778                 if (!pte) {
779                         start_pfn = align_to_level(start_pfn + 1, 2);
780                         continue;
781                 }
782                 while (start_pfn <= last_pfn &&
783                        (unsigned long)pte >> VTD_PAGE_SHIFT ==
784                        (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
785                         dma_clear_pte(pte);
786                         start_pfn++;
787                         pte++;
788                 }
789                 domain_flush_cache(domain, first_pte,
790                                    (void *)pte - (void *)first_pte);
791         }
792 }
793
794 /* free page table pages. last level pte should already be cleared */
795 static void dma_pte_free_pagetable(struct dmar_domain *domain,
796                                    unsigned long start_pfn,
797                                    unsigned long last_pfn)
798 {
799         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
800         struct dma_pte *pte;
801         int total = agaw_to_level(domain->agaw);
802         int level;
803         unsigned long tmp;
804
805         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
806         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
807
808         /* we don't need lock here, nobody else touches the iova range */
809         level = 2;
810         while (level <= total) {
811                 tmp = align_to_level(start_pfn, level);
812
813                 /* Only clear this pte/pmd if we're asked to clear its
814                    _whole_ range */
815                 if (tmp + level_size(level) - 1 > last_pfn)
816                         return;
817
818                 while (tmp <= last_pfn) {
819                         pte = dma_pfn_level_pte(domain, tmp, level);
820                         if (pte) {
821                                 free_pgtable_page(
822                                         phys_to_virt(dma_pte_addr(pte)));
823                                 dma_clear_pte(pte);
824                                 domain_flush_cache(domain, pte, sizeof(*pte));
825                         }
826                         tmp += level_size(level);
827                 }
828                 level++;
829         }
830         /* free pgd */
831         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
832                 free_pgtable_page(domain->pgd);
833                 domain->pgd = NULL;
834         }
835 }
836
837 /* iommu handling */
838 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
839 {
840         struct root_entry *root;
841         unsigned long flags;
842
843         root = (struct root_entry *)alloc_pgtable_page();
844         if (!root)
845                 return -ENOMEM;
846
847         __iommu_flush_cache(iommu, root, ROOT_SIZE);
848
849         spin_lock_irqsave(&iommu->lock, flags);
850         iommu->root_entry = root;
851         spin_unlock_irqrestore(&iommu->lock, flags);
852
853         return 0;
854 }
855
856 static void iommu_set_root_entry(struct intel_iommu *iommu)
857 {
858         void *addr;
859         u32 sts;
860         unsigned long flag;
861
862         addr = iommu->root_entry;
863
864         spin_lock_irqsave(&iommu->register_lock, flag);
865         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
866
867         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
868
869         /* Make sure hardware complete it */
870         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
871                       readl, (sts & DMA_GSTS_RTPS), sts);
872
873         spin_unlock_irqrestore(&iommu->register_lock, flag);
874 }
875
876 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
877 {
878         u32 val;
879         unsigned long flag;
880
881         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
882                 return;
883
884         spin_lock_irqsave(&iommu->register_lock, flag);
885         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
886
887         /* Make sure hardware complete it */
888         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
889                       readl, (!(val & DMA_GSTS_WBFS)), val);
890
891         spin_unlock_irqrestore(&iommu->register_lock, flag);
892 }
893
894 /* return value determine if we need a write buffer flush */
895 static void __iommu_flush_context(struct intel_iommu *iommu,
896                                   u16 did, u16 source_id, u8 function_mask,
897                                   u64 type)
898 {
899         u64 val = 0;
900         unsigned long flag;
901
902         switch (type) {
903         case DMA_CCMD_GLOBAL_INVL:
904                 val = DMA_CCMD_GLOBAL_INVL;
905                 break;
906         case DMA_CCMD_DOMAIN_INVL:
907                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
908                 break;
909         case DMA_CCMD_DEVICE_INVL:
910                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
911                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
912                 break;
913         default:
914                 BUG();
915         }
916         val |= DMA_CCMD_ICC;
917
918         spin_lock_irqsave(&iommu->register_lock, flag);
919         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
920
921         /* Make sure hardware complete it */
922         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
923                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
924
925         spin_unlock_irqrestore(&iommu->register_lock, flag);
926 }
927
928 /* return value determine if we need a write buffer flush */
929 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
930                                 u64 addr, unsigned int size_order, u64 type)
931 {
932         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
933         u64 val = 0, val_iva = 0;
934         unsigned long flag;
935
936         switch (type) {
937         case DMA_TLB_GLOBAL_FLUSH:
938                 /* global flush doesn't need set IVA_REG */
939                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
940                 break;
941         case DMA_TLB_DSI_FLUSH:
942                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
943                 break;
944         case DMA_TLB_PSI_FLUSH:
945                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
946                 /* Note: always flush non-leaf currently */
947                 val_iva = size_order | addr;
948                 break;
949         default:
950                 BUG();
951         }
952         /* Note: set drain read/write */
953 #if 0
954         /*
955          * This is probably to be super secure.. Looks like we can
956          * ignore it without any impact.
957          */
958         if (cap_read_drain(iommu->cap))
959                 val |= DMA_TLB_READ_DRAIN;
960 #endif
961         if (cap_write_drain(iommu->cap))
962                 val |= DMA_TLB_WRITE_DRAIN;
963
964         spin_lock_irqsave(&iommu->register_lock, flag);
965         /* Note: Only uses first TLB reg currently */
966         if (val_iva)
967                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
968         dmar_writeq(iommu->reg + tlb_offset + 8, val);
969
970         /* Make sure hardware complete it */
971         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
972                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
973
974         spin_unlock_irqrestore(&iommu->register_lock, flag);
975
976         /* check IOTLB invalidation granularity */
977         if (DMA_TLB_IAIG(val) == 0)
978                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
979         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
980                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
981                         (unsigned long long)DMA_TLB_IIRG(type),
982                         (unsigned long long)DMA_TLB_IAIG(val));
983 }
984
985 static struct device_domain_info *iommu_support_dev_iotlb(
986         struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
987 {
988         int found = 0;
989         unsigned long flags;
990         struct device_domain_info *info;
991         struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
992
993         if (!ecap_dev_iotlb_support(iommu->ecap))
994                 return NULL;
995
996         if (!iommu->qi)
997                 return NULL;
998
999         spin_lock_irqsave(&device_domain_lock, flags);
1000         list_for_each_entry(info, &domain->devices, link)
1001                 if (info->bus == bus && info->devfn == devfn) {
1002                         found = 1;
1003                         break;
1004                 }
1005         spin_unlock_irqrestore(&device_domain_lock, flags);
1006
1007         if (!found || !info->dev)
1008                 return NULL;
1009
1010         if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1011                 return NULL;
1012
1013         if (!dmar_find_matched_atsr_unit(info->dev))
1014                 return NULL;
1015
1016         info->iommu = iommu;
1017
1018         return info;
1019 }
1020
1021 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1022 {
1023         if (!info)
1024                 return;
1025
1026         pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1027 }
1028
1029 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1030 {
1031         if (!info->dev || !pci_ats_enabled(info->dev))
1032                 return;
1033
1034         pci_disable_ats(info->dev);
1035 }
1036
1037 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1038                                   u64 addr, unsigned mask)
1039 {
1040         u16 sid, qdep;
1041         unsigned long flags;
1042         struct device_domain_info *info;
1043
1044         spin_lock_irqsave(&device_domain_lock, flags);
1045         list_for_each_entry(info, &domain->devices, link) {
1046                 if (!info->dev || !pci_ats_enabled(info->dev))
1047                         continue;
1048
1049                 sid = info->bus << 8 | info->devfn;
1050                 qdep = pci_ats_queue_depth(info->dev);
1051                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1052         }
1053         spin_unlock_irqrestore(&device_domain_lock, flags);
1054 }
1055
1056 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1057                                   unsigned long pfn, unsigned int pages)
1058 {
1059         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1060         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1061
1062         BUG_ON(pages == 0);
1063
1064         /*
1065          * Fallback to domain selective flush if no PSI support or the size is
1066          * too big.
1067          * PSI requires page size to be 2 ^ x, and the base address is naturally
1068          * aligned to the size
1069          */
1070         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1071                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1072                                                 DMA_TLB_DSI_FLUSH);
1073         else
1074                 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1075                                                 DMA_TLB_PSI_FLUSH);
1076
1077         /*
1078          * In caching mode, domain ID 0 is reserved for non-present to present
1079          * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1080          */
1081         if (!cap_caching_mode(iommu->cap) || did)
1082                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1083 }
1084
1085 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1086 {
1087         u32 pmen;
1088         unsigned long flags;
1089
1090         spin_lock_irqsave(&iommu->register_lock, flags);
1091         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1092         pmen &= ~DMA_PMEN_EPM;
1093         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1094
1095         /* wait for the protected region status bit to clear */
1096         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1097                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1098
1099         spin_unlock_irqrestore(&iommu->register_lock, flags);
1100 }
1101
1102 static int iommu_enable_translation(struct intel_iommu *iommu)
1103 {
1104         u32 sts;
1105         unsigned long flags;
1106
1107         spin_lock_irqsave(&iommu->register_lock, flags);
1108         iommu->gcmd |= DMA_GCMD_TE;
1109         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1110
1111         /* Make sure hardware complete it */
1112         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1113                       readl, (sts & DMA_GSTS_TES), sts);
1114
1115         spin_unlock_irqrestore(&iommu->register_lock, flags);
1116         return 0;
1117 }
1118
1119 static int iommu_disable_translation(struct intel_iommu *iommu)
1120 {
1121         u32 sts;
1122         unsigned long flag;
1123
1124         spin_lock_irqsave(&iommu->register_lock, flag);
1125         iommu->gcmd &= ~DMA_GCMD_TE;
1126         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1127
1128         /* Make sure hardware complete it */
1129         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1130                       readl, (!(sts & DMA_GSTS_TES)), sts);
1131
1132         spin_unlock_irqrestore(&iommu->register_lock, flag);
1133         return 0;
1134 }
1135
1136
1137 static int iommu_init_domains(struct intel_iommu *iommu)
1138 {
1139         unsigned long ndomains;
1140         unsigned long nlongs;
1141
1142         ndomains = cap_ndoms(iommu->cap);
1143         pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1144         nlongs = BITS_TO_LONGS(ndomains);
1145
1146         /* TBD: there might be 64K domains,
1147          * consider other allocation for future chip
1148          */
1149         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1150         if (!iommu->domain_ids) {
1151                 printk(KERN_ERR "Allocating domain id array failed\n");
1152                 return -ENOMEM;
1153         }
1154         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1155                         GFP_KERNEL);
1156         if (!iommu->domains) {
1157                 printk(KERN_ERR "Allocating domain array failed\n");
1158                 kfree(iommu->domain_ids);
1159                 return -ENOMEM;
1160         }
1161
1162         spin_lock_init(&iommu->lock);
1163
1164         /*
1165          * if Caching mode is set, then invalid translations are tagged
1166          * with domainid 0. Hence we need to pre-allocate it.
1167          */
1168         if (cap_caching_mode(iommu->cap))
1169                 set_bit(0, iommu->domain_ids);
1170         return 0;
1171 }
1172
1173
1174 static void domain_exit(struct dmar_domain *domain);
1175 static void vm_domain_exit(struct dmar_domain *domain);
1176
1177 void free_dmar_iommu(struct intel_iommu *iommu)
1178 {
1179         struct dmar_domain *domain;
1180         int i;
1181         unsigned long flags;
1182
1183         i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1184         for (; i < cap_ndoms(iommu->cap); ) {
1185                 domain = iommu->domains[i];
1186                 clear_bit(i, iommu->domain_ids);
1187
1188                 spin_lock_irqsave(&domain->iommu_lock, flags);
1189                 if (--domain->iommu_count == 0) {
1190                         if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1191                                 vm_domain_exit(domain);
1192                         else
1193                                 domain_exit(domain);
1194                 }
1195                 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1196
1197                 i = find_next_bit(iommu->domain_ids,
1198                         cap_ndoms(iommu->cap), i+1);
1199         }
1200
1201         if (iommu->gcmd & DMA_GCMD_TE)
1202                 iommu_disable_translation(iommu);
1203
1204         if (iommu->irq) {
1205                 set_irq_data(iommu->irq, NULL);
1206                 /* This will mask the irq */
1207                 free_irq(iommu->irq, iommu);
1208                 destroy_irq(iommu->irq);
1209         }
1210
1211         kfree(iommu->domains);
1212         kfree(iommu->domain_ids);
1213
1214         g_iommus[iommu->seq_id] = NULL;
1215
1216         /* if all iommus are freed, free g_iommus */
1217         for (i = 0; i < g_num_of_iommus; i++) {
1218                 if (g_iommus[i])
1219                         break;
1220         }
1221
1222         if (i == g_num_of_iommus)
1223                 kfree(g_iommus);
1224
1225         /* free context mapping */
1226         free_context_table(iommu);
1227 }
1228
1229 static struct dmar_domain *alloc_domain(void)
1230 {
1231         struct dmar_domain *domain;
1232
1233         domain = alloc_domain_mem();
1234         if (!domain)
1235                 return NULL;
1236
1237         memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1238         domain->flags = 0;
1239
1240         return domain;
1241 }
1242
1243 static int iommu_attach_domain(struct dmar_domain *domain,
1244                                struct intel_iommu *iommu)
1245 {
1246         int num;
1247         unsigned long ndomains;
1248         unsigned long flags;
1249
1250         ndomains = cap_ndoms(iommu->cap);
1251
1252         spin_lock_irqsave(&iommu->lock, flags);
1253
1254         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1255         if (num >= ndomains) {
1256                 spin_unlock_irqrestore(&iommu->lock, flags);
1257                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1258                 return -ENOMEM;
1259         }
1260
1261         domain->id = num;
1262         set_bit(num, iommu->domain_ids);
1263         set_bit(iommu->seq_id, &domain->iommu_bmp);
1264         iommu->domains[num] = domain;
1265         spin_unlock_irqrestore(&iommu->lock, flags);
1266
1267         return 0;
1268 }
1269
1270 static void iommu_detach_domain(struct dmar_domain *domain,
1271                                 struct intel_iommu *iommu)
1272 {
1273         unsigned long flags;
1274         int num, ndomains;
1275         int found = 0;
1276
1277         spin_lock_irqsave(&iommu->lock, flags);
1278         ndomains = cap_ndoms(iommu->cap);
1279         num = find_first_bit(iommu->domain_ids, ndomains);
1280         for (; num < ndomains; ) {
1281                 if (iommu->domains[num] == domain) {
1282                         found = 1;
1283                         break;
1284                 }
1285                 num = find_next_bit(iommu->domain_ids,
1286                                     cap_ndoms(iommu->cap), num+1);
1287         }
1288
1289         if (found) {
1290                 clear_bit(num, iommu->domain_ids);
1291                 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1292                 iommu->domains[num] = NULL;
1293         }
1294         spin_unlock_irqrestore(&iommu->lock, flags);
1295 }
1296
1297 static struct iova_domain reserved_iova_list;
1298 static struct lock_class_key reserved_alloc_key;
1299 static struct lock_class_key reserved_rbtree_key;
1300
1301 static void dmar_init_reserved_ranges(void)
1302 {
1303         struct pci_dev *pdev = NULL;
1304         struct iova *iova;
1305         int i;
1306
1307         init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1308
1309         lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1310                 &reserved_alloc_key);
1311         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1312                 &reserved_rbtree_key);
1313
1314         /* IOAPIC ranges shouldn't be accessed by DMA */
1315         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1316                 IOVA_PFN(IOAPIC_RANGE_END));
1317         if (!iova)
1318                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1319
1320         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1321         for_each_pci_dev(pdev) {
1322                 struct resource *r;
1323
1324                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1325                         r = &pdev->resource[i];
1326                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1327                                 continue;
1328                         iova = reserve_iova(&reserved_iova_list,
1329                                             IOVA_PFN(r->start),
1330                                             IOVA_PFN(r->end));
1331                         if (!iova)
1332                                 printk(KERN_ERR "Reserve iova failed\n");
1333                 }
1334         }
1335
1336 }
1337
1338 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1339 {
1340         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1341 }
1342
1343 static inline int guestwidth_to_adjustwidth(int gaw)
1344 {
1345         int agaw;
1346         int r = (gaw - 12) % 9;
1347
1348         if (r == 0)
1349                 agaw = gaw;
1350         else
1351                 agaw = gaw + 9 - r;
1352         if (agaw > 64)
1353                 agaw = 64;
1354         return agaw;
1355 }
1356
1357 static int domain_init(struct dmar_domain *domain, int guest_width)
1358 {
1359         struct intel_iommu *iommu;
1360         int adjust_width, agaw;
1361         unsigned long sagaw;
1362
1363         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1364         spin_lock_init(&domain->mapping_lock);
1365         spin_lock_init(&domain->iommu_lock);
1366
1367         domain_reserve_special_ranges(domain);
1368
1369         /* calculate AGAW */
1370         iommu = domain_get_iommu(domain);
1371         if (guest_width > cap_mgaw(iommu->cap))
1372                 guest_width = cap_mgaw(iommu->cap);
1373         domain->gaw = guest_width;
1374         adjust_width = guestwidth_to_adjustwidth(guest_width);
1375         agaw = width_to_agaw(adjust_width);
1376         sagaw = cap_sagaw(iommu->cap);
1377         if (!test_bit(agaw, &sagaw)) {
1378                 /* hardware doesn't support it, choose a bigger one */
1379                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1380                 agaw = find_next_bit(&sagaw, 5, agaw);
1381                 if (agaw >= 5)
1382                         return -ENODEV;
1383         }
1384         domain->agaw = agaw;
1385         INIT_LIST_HEAD(&domain->devices);
1386
1387         if (ecap_coherent(iommu->ecap))
1388                 domain->iommu_coherency = 1;
1389         else
1390                 domain->iommu_coherency = 0;
1391
1392         if (ecap_sc_support(iommu->ecap))
1393                 domain->iommu_snooping = 1;
1394         else
1395                 domain->iommu_snooping = 0;
1396
1397         domain->iommu_count = 1;
1398
1399         /* always allocate the top pgd */
1400         domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1401         if (!domain->pgd)
1402                 return -ENOMEM;
1403         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1404         return 0;
1405 }
1406
1407 static void domain_exit(struct dmar_domain *domain)
1408 {
1409         struct dmar_drhd_unit *drhd;
1410         struct intel_iommu *iommu;
1411
1412         /* Domain 0 is reserved, so dont process it */
1413         if (!domain)
1414                 return;
1415
1416         domain_remove_dev_info(domain);
1417         /* destroy iovas */
1418         put_iova_domain(&domain->iovad);
1419
1420         /* clear ptes */
1421         dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1422
1423         /* free page tables */
1424         dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1425
1426         for_each_active_iommu(iommu, drhd)
1427                 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1428                         iommu_detach_domain(domain, iommu);
1429
1430         free_domain_mem(domain);
1431 }
1432
1433 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1434                                  u8 bus, u8 devfn, int translation)
1435 {
1436         struct context_entry *context;
1437         unsigned long flags;
1438         struct intel_iommu *iommu;
1439         struct dma_pte *pgd;
1440         unsigned long num;
1441         unsigned long ndomains;
1442         int id;
1443         int agaw;
1444         struct device_domain_info *info = NULL;
1445
1446         pr_debug("Set context mapping for %02x:%02x.%d\n",
1447                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1448
1449         BUG_ON(!domain->pgd);
1450         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1451                translation != CONTEXT_TT_MULTI_LEVEL);
1452
1453         iommu = device_to_iommu(segment, bus, devfn);
1454         if (!iommu)
1455                 return -ENODEV;
1456
1457         context = device_to_context_entry(iommu, bus, devfn);
1458         if (!context)
1459                 return -ENOMEM;
1460         spin_lock_irqsave(&iommu->lock, flags);
1461         if (context_present(context)) {
1462                 spin_unlock_irqrestore(&iommu->lock, flags);
1463                 return 0;
1464         }
1465
1466         id = domain->id;
1467         pgd = domain->pgd;
1468
1469         if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1470             domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1471                 int found = 0;
1472
1473                 /* find an available domain id for this device in iommu */
1474                 ndomains = cap_ndoms(iommu->cap);
1475                 num = find_first_bit(iommu->domain_ids, ndomains);
1476                 for (; num < ndomains; ) {
1477                         if (iommu->domains[num] == domain) {
1478                                 id = num;
1479                                 found = 1;
1480                                 break;
1481                         }
1482                         num = find_next_bit(iommu->domain_ids,
1483                                             cap_ndoms(iommu->cap), num+1);
1484                 }
1485
1486                 if (found == 0) {
1487                         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1488                         if (num >= ndomains) {
1489                                 spin_unlock_irqrestore(&iommu->lock, flags);
1490                                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1491                                 return -EFAULT;
1492                         }
1493
1494                         set_bit(num, iommu->domain_ids);
1495                         set_bit(iommu->seq_id, &domain->iommu_bmp);
1496                         iommu->domains[num] = domain;
1497                         id = num;
1498                 }
1499
1500                 /* Skip top levels of page tables for
1501                  * iommu which has less agaw than default.
1502                  */
1503                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1504                         pgd = phys_to_virt(dma_pte_addr(pgd));
1505                         if (!dma_pte_present(pgd)) {
1506                                 spin_unlock_irqrestore(&iommu->lock, flags);
1507                                 return -ENOMEM;
1508                         }
1509                 }
1510         }
1511
1512         context_set_domain_id(context, id);
1513
1514         if (translation != CONTEXT_TT_PASS_THROUGH) {
1515                 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1516                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1517                                      CONTEXT_TT_MULTI_LEVEL;
1518         }
1519         /*
1520          * In pass through mode, AW must be programmed to indicate the largest
1521          * AGAW value supported by hardware. And ASR is ignored by hardware.
1522          */
1523         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1524                 context_set_address_width(context, iommu->msagaw);
1525         else {
1526                 context_set_address_root(context, virt_to_phys(pgd));
1527                 context_set_address_width(context, iommu->agaw);
1528         }
1529
1530         context_set_translation_type(context, translation);
1531         context_set_fault_enable(context);
1532         context_set_present(context);
1533         domain_flush_cache(domain, context, sizeof(*context));
1534
1535         /*
1536          * It's a non-present to present mapping. If hardware doesn't cache
1537          * non-present entry we only need to flush the write-buffer. If the
1538          * _does_ cache non-present entries, then it does so in the special
1539          * domain #0, which we have to flush:
1540          */
1541         if (cap_caching_mode(iommu->cap)) {
1542                 iommu->flush.flush_context(iommu, 0,
1543                                            (((u16)bus) << 8) | devfn,
1544                                            DMA_CCMD_MASK_NOBIT,
1545                                            DMA_CCMD_DEVICE_INVL);
1546                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1547         } else {
1548                 iommu_flush_write_buffer(iommu);
1549         }
1550         iommu_enable_dev_iotlb(info);
1551         spin_unlock_irqrestore(&iommu->lock, flags);
1552
1553         spin_lock_irqsave(&domain->iommu_lock, flags);
1554         if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1555                 domain->iommu_count++;
1556                 domain_update_iommu_cap(domain);
1557         }
1558         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1559         return 0;
1560 }
1561
1562 static int
1563 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1564                         int translation)
1565 {
1566         int ret;
1567         struct pci_dev *tmp, *parent;
1568
1569         ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1570                                          pdev->bus->number, pdev->devfn,
1571                                          translation);
1572         if (ret)
1573                 return ret;
1574
1575         /* dependent device mapping */
1576         tmp = pci_find_upstream_pcie_bridge(pdev);
1577         if (!tmp)
1578                 return 0;
1579         /* Secondary interface's bus number and devfn 0 */
1580         parent = pdev->bus->self;
1581         while (parent != tmp) {
1582                 ret = domain_context_mapping_one(domain,
1583                                                  pci_domain_nr(parent->bus),
1584                                                  parent->bus->number,
1585                                                  parent->devfn, translation);
1586                 if (ret)
1587                         return ret;
1588                 parent = parent->bus->self;
1589         }
1590         if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1591                 return domain_context_mapping_one(domain,
1592                                         pci_domain_nr(tmp->subordinate),
1593                                         tmp->subordinate->number, 0,
1594                                         translation);
1595         else /* this is a legacy PCI bridge */
1596                 return domain_context_mapping_one(domain,
1597                                                   pci_domain_nr(tmp->bus),
1598                                                   tmp->bus->number,
1599                                                   tmp->devfn,
1600                                                   translation);
1601 }
1602
1603 static int domain_context_mapped(struct pci_dev *pdev)
1604 {
1605         int ret;
1606         struct pci_dev *tmp, *parent;
1607         struct intel_iommu *iommu;
1608
1609         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1610                                 pdev->devfn);
1611         if (!iommu)
1612                 return -ENODEV;
1613
1614         ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1615         if (!ret)
1616                 return ret;
1617         /* dependent device mapping */
1618         tmp = pci_find_upstream_pcie_bridge(pdev);
1619         if (!tmp)
1620                 return ret;
1621         /* Secondary interface's bus number and devfn 0 */
1622         parent = pdev->bus->self;
1623         while (parent != tmp) {
1624                 ret = device_context_mapped(iommu, parent->bus->number,
1625                                             parent->devfn);
1626                 if (!ret)
1627                         return ret;
1628                 parent = parent->bus->self;
1629         }
1630         if (tmp->is_pcie)
1631                 return device_context_mapped(iommu, tmp->subordinate->number,
1632                                              0);
1633         else
1634                 return device_context_mapped(iommu, tmp->bus->number,
1635                                              tmp->devfn);
1636 }
1637
1638 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1639                             struct scatterlist *sg, unsigned long phys_pfn,
1640                             unsigned long nr_pages, int prot)
1641 {
1642         struct dma_pte *first_pte = NULL, *pte = NULL;
1643         phys_addr_t uninitialized_var(pteval);
1644         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1645         unsigned long sg_res;
1646
1647         BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1648
1649         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1650                 return -EINVAL;
1651
1652         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1653
1654         if (sg)
1655                 sg_res = 0;
1656         else {
1657                 sg_res = nr_pages + 1;
1658                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1659         }
1660
1661         while (nr_pages--) {
1662                 if (!sg_res) {
1663                         sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1664                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1665                         sg->dma_length = sg->length;
1666                         pteval = page_to_phys(sg_page(sg)) | prot;
1667                 }
1668                 if (!pte) {
1669                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1670                         if (!pte)
1671                                 return -ENOMEM;
1672                 }
1673                 /* We don't need lock here, nobody else
1674                  * touches the iova range
1675                  */
1676                 if (unlikely(dma_pte_addr(pte))) {
1677                         static int dumps = 5;
1678                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx)\n",
1679                                iov_pfn, pte->val);
1680                         if (dumps) {
1681                                 dumps--;
1682                                 debug_dma_dump_mappings(NULL);
1683                         }
1684                         WARN_ON(1);
1685                 }
1686                 pte->val = pteval;
1687                 pte++;
1688                 if (!nr_pages ||
1689                     (unsigned long)pte >> VTD_PAGE_SHIFT !=
1690                     (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
1691                         domain_flush_cache(domain, first_pte,
1692                                            (void *)pte - (void *)first_pte);
1693                         pte = NULL;
1694                 }
1695                 iov_pfn++;
1696                 pteval += VTD_PAGE_SIZE;
1697                 sg_res--;
1698                 if (!sg_res)
1699                         sg = sg_next(sg);
1700         }
1701         return 0;
1702 }
1703
1704 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1705                                     struct scatterlist *sg, unsigned long nr_pages,
1706                                     int prot)
1707 {
1708         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1709 }
1710
1711 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1712                                      unsigned long phys_pfn, unsigned long nr_pages,
1713                                      int prot)
1714 {
1715         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1716 }
1717
1718 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1719 {
1720         if (!iommu)
1721                 return;
1722
1723         clear_context_table(iommu, bus, devfn);
1724         iommu->flush.flush_context(iommu, 0, 0, 0,
1725                                            DMA_CCMD_GLOBAL_INVL);
1726         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1727 }
1728
1729 static void domain_remove_dev_info(struct dmar_domain *domain)
1730 {
1731         struct device_domain_info *info;
1732         unsigned long flags;
1733         struct intel_iommu *iommu;
1734
1735         spin_lock_irqsave(&device_domain_lock, flags);
1736         while (!list_empty(&domain->devices)) {
1737                 info = list_entry(domain->devices.next,
1738                         struct device_domain_info, link);
1739                 list_del(&info->link);
1740                 list_del(&info->global);
1741                 if (info->dev)
1742                         info->dev->dev.archdata.iommu = NULL;
1743                 spin_unlock_irqrestore(&device_domain_lock, flags);
1744
1745                 iommu_disable_dev_iotlb(info);
1746                 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1747                 iommu_detach_dev(iommu, info->bus, info->devfn);
1748                 free_devinfo_mem(info);
1749
1750                 spin_lock_irqsave(&device_domain_lock, flags);
1751         }
1752         spin_unlock_irqrestore(&device_domain_lock, flags);
1753 }
1754
1755 /*
1756  * find_domain
1757  * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1758  */
1759 static struct dmar_domain *
1760 find_domain(struct pci_dev *pdev)
1761 {
1762         struct device_domain_info *info;
1763
1764         /* No lock here, assumes no domain exit in normal case */
1765         info = pdev->dev.archdata.iommu;
1766         if (info)
1767                 return info->domain;
1768         return NULL;
1769 }
1770
1771 /* domain is initialized */
1772 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1773 {
1774         struct dmar_domain *domain, *found = NULL;
1775         struct intel_iommu *iommu;
1776         struct dmar_drhd_unit *drhd;
1777         struct device_domain_info *info, *tmp;
1778         struct pci_dev *dev_tmp;
1779         unsigned long flags;
1780         int bus = 0, devfn = 0;
1781         int segment;
1782         int ret;
1783
1784         domain = find_domain(pdev);
1785         if (domain)
1786                 return domain;
1787
1788         segment = pci_domain_nr(pdev->bus);
1789
1790         dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1791         if (dev_tmp) {
1792                 if (dev_tmp->is_pcie) {
1793                         bus = dev_tmp->subordinate->number;
1794                         devfn = 0;
1795                 } else {
1796                         bus = dev_tmp->bus->number;
1797                         devfn = dev_tmp->devfn;
1798                 }
1799                 spin_lock_irqsave(&device_domain_lock, flags);
1800                 list_for_each_entry(info, &device_domain_list, global) {
1801                         if (info->segment == segment &&
1802                             info->bus == bus && info->devfn == devfn) {
1803                                 found = info->domain;
1804                                 break;
1805                         }
1806                 }
1807                 spin_unlock_irqrestore(&device_domain_lock, flags);
1808                 /* pcie-pci bridge already has a domain, uses it */
1809                 if (found) {
1810                         domain = found;
1811                         goto found_domain;
1812                 }
1813         }
1814
1815         domain = alloc_domain();
1816         if (!domain)
1817                 goto error;
1818
1819         /* Allocate new domain for the device */
1820         drhd = dmar_find_matched_drhd_unit(pdev);
1821         if (!drhd) {
1822                 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1823                         pci_name(pdev));
1824                 return NULL;
1825         }
1826         iommu = drhd->iommu;
1827
1828         ret = iommu_attach_domain(domain, iommu);
1829         if (ret) {
1830                 domain_exit(domain);
1831                 goto error;
1832         }
1833
1834         if (domain_init(domain, gaw)) {
1835                 domain_exit(domain);
1836                 goto error;
1837         }
1838
1839         /* register pcie-to-pci device */
1840         if (dev_tmp) {
1841                 info = alloc_devinfo_mem();
1842                 if (!info) {
1843                         domain_exit(domain);
1844                         goto error;
1845                 }
1846                 info->segment = segment;
1847                 info->bus = bus;
1848                 info->devfn = devfn;
1849                 info->dev = NULL;
1850                 info->domain = domain;
1851                 /* This domain is shared by devices under p2p bridge */
1852                 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1853
1854                 /* pcie-to-pci bridge already has a domain, uses it */
1855                 found = NULL;
1856                 spin_lock_irqsave(&device_domain_lock, flags);
1857                 list_for_each_entry(tmp, &device_domain_list, global) {
1858                         if (tmp->segment == segment &&
1859                             tmp->bus == bus && tmp->devfn == devfn) {
1860                                 found = tmp->domain;
1861                                 break;
1862                         }
1863                 }
1864                 if (found) {
1865                         free_devinfo_mem(info);
1866                         domain_exit(domain);
1867                         domain = found;
1868                 } else {
1869                         list_add(&info->link, &domain->devices);
1870                         list_add(&info->global, &device_domain_list);
1871                 }
1872                 spin_unlock_irqrestore(&device_domain_lock, flags);
1873         }
1874
1875 found_domain:
1876         info = alloc_devinfo_mem();
1877         if (!info)
1878                 goto error;
1879         info->segment = segment;
1880         info->bus = pdev->bus->number;
1881         info->devfn = pdev->devfn;
1882         info->dev = pdev;
1883         info->domain = domain;
1884         spin_lock_irqsave(&device_domain_lock, flags);
1885         /* somebody is fast */
1886         found = find_domain(pdev);
1887         if (found != NULL) {
1888                 spin_unlock_irqrestore(&device_domain_lock, flags);
1889                 if (found != domain) {
1890                         domain_exit(domain);
1891                         domain = found;
1892                 }
1893                 free_devinfo_mem(info);
1894                 return domain;
1895         }
1896         list_add(&info->link, &domain->devices);
1897         list_add(&info->global, &device_domain_list);
1898         pdev->dev.archdata.iommu = info;
1899         spin_unlock_irqrestore(&device_domain_lock, flags);
1900         return domain;
1901 error:
1902         /* recheck it here, maybe others set it */
1903         return find_domain(pdev);
1904 }
1905
1906 static int iommu_identity_mapping;
1907
1908 static int iommu_domain_identity_map(struct dmar_domain *domain,
1909                                      unsigned long long start,
1910                                      unsigned long long end)
1911 {
1912         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1913         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1914
1915         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1916                           dma_to_mm_pfn(last_vpfn))) {
1917                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1918                 return -ENOMEM;
1919         }
1920
1921         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1922                  start, end, domain->id);
1923         /*
1924          * RMRR range might have overlap with physical memory range,
1925          * clear it first
1926          */
1927         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1928
1929         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1930                                   last_vpfn - first_vpfn + 1,
1931                                   DMA_PTE_READ|DMA_PTE_WRITE);
1932 }
1933
1934 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1935                                       unsigned long long start,
1936                                       unsigned long long end)
1937 {
1938         struct dmar_domain *domain;
1939         int ret;
1940
1941         printk(KERN_INFO
1942                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1943                pci_name(pdev), start, end);
1944
1945         domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1946         if (!domain)
1947                 return -ENOMEM;
1948
1949         ret = iommu_domain_identity_map(domain, start, end);
1950         if (ret)
1951                 goto error;
1952
1953         /* context entry init */
1954         ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1955         if (ret)
1956                 goto error;
1957
1958         return 0;
1959
1960  error:
1961         domain_exit(domain);
1962         return ret;
1963 }
1964
1965 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1966         struct pci_dev *pdev)
1967 {
1968         if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1969                 return 0;
1970         return iommu_prepare_identity_map(pdev, rmrr->base_address,
1971                 rmrr->end_address + 1);
1972 }
1973
1974 #ifdef CONFIG_DMAR_FLOPPY_WA
1975 static inline void iommu_prepare_isa(void)
1976 {
1977         struct pci_dev *pdev;
1978         int ret;
1979
1980         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1981         if (!pdev)
1982                 return;
1983
1984         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1985         ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1986
1987         if (ret)
1988                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1989                        "floppy might not work\n");
1990
1991 }
1992 #else
1993 static inline void iommu_prepare_isa(void)
1994 {
1995         return;
1996 }
1997 #endif /* !CONFIG_DMAR_FLPY_WA */
1998
1999 /* Initialize each context entry as pass through.*/
2000 static int __init init_context_pass_through(void)
2001 {
2002         struct pci_dev *pdev = NULL;
2003         struct dmar_domain *domain;
2004         int ret;
2005
2006         for_each_pci_dev(pdev) {
2007                 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2008                 ret = domain_context_mapping(domain, pdev,
2009                                              CONTEXT_TT_PASS_THROUGH);
2010                 if (ret)
2011                         return ret;
2012         }
2013         return 0;
2014 }
2015
2016 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2017
2018 static int __init si_domain_work_fn(unsigned long start_pfn,
2019                                     unsigned long end_pfn, void *datax)
2020 {
2021         int *ret = datax;
2022
2023         *ret = iommu_domain_identity_map(si_domain,
2024                                          (uint64_t)start_pfn << PAGE_SHIFT,
2025                                          (uint64_t)end_pfn << PAGE_SHIFT);
2026         return *ret;
2027
2028 }
2029
2030 static int si_domain_init(void)
2031 {
2032         struct dmar_drhd_unit *drhd;
2033         struct intel_iommu *iommu;
2034         int nid, ret = 0;
2035
2036         si_domain = alloc_domain();
2037         if (!si_domain)
2038                 return -EFAULT;
2039
2040         pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2041
2042         for_each_active_iommu(iommu, drhd) {
2043                 ret = iommu_attach_domain(si_domain, iommu);
2044                 if (ret) {
2045                         domain_exit(si_domain);
2046                         return -EFAULT;
2047                 }
2048         }
2049
2050         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2051                 domain_exit(si_domain);
2052                 return -EFAULT;
2053         }
2054
2055         si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2056
2057         for_each_online_node(nid) {
2058                 work_with_active_regions(nid, si_domain_work_fn, &ret);
2059                 if (ret)
2060                         return ret;
2061         }
2062
2063         return 0;
2064 }
2065
2066 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2067                                           struct pci_dev *pdev);
2068 static int identity_mapping(struct pci_dev *pdev)
2069 {
2070         struct device_domain_info *info;
2071
2072         if (likely(!iommu_identity_mapping))
2073                 return 0;
2074
2075
2076         list_for_each_entry(info, &si_domain->devices, link)
2077                 if (info->dev == pdev)
2078                         return 1;
2079         return 0;
2080 }
2081
2082 static int domain_add_dev_info(struct dmar_domain *domain,
2083                                   struct pci_dev *pdev)
2084 {
2085         struct device_domain_info *info;
2086         unsigned long flags;
2087
2088         info = alloc_devinfo_mem();
2089         if (!info)
2090                 return -ENOMEM;
2091
2092         info->segment = pci_domain_nr(pdev->bus);
2093         info->bus = pdev->bus->number;
2094         info->devfn = pdev->devfn;
2095         info->dev = pdev;
2096         info->domain = domain;
2097
2098         spin_lock_irqsave(&device_domain_lock, flags);
2099         list_add(&info->link, &domain->devices);
2100         list_add(&info->global, &device_domain_list);
2101         pdev->dev.archdata.iommu = info;
2102         spin_unlock_irqrestore(&device_domain_lock, flags);
2103
2104         return 0;
2105 }
2106
2107 static int iommu_prepare_static_identity_mapping(void)
2108 {
2109         struct pci_dev *pdev = NULL;
2110         int ret;
2111
2112         ret = si_domain_init();
2113         if (ret)
2114                 return -EFAULT;
2115
2116         for_each_pci_dev(pdev) {
2117                 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2118                        pci_name(pdev));
2119
2120                 ret = domain_context_mapping(si_domain, pdev,
2121                                              CONTEXT_TT_MULTI_LEVEL);
2122                 if (ret)
2123                         return ret;
2124                 ret = domain_add_dev_info(si_domain, pdev);
2125                 if (ret)
2126                         return ret;
2127         }
2128
2129         return 0;
2130 }
2131
2132 int __init init_dmars(void)
2133 {
2134         struct dmar_drhd_unit *drhd;
2135         struct dmar_rmrr_unit *rmrr;
2136         struct pci_dev *pdev;
2137         struct intel_iommu *iommu;
2138         int i, ret;
2139         int pass_through = 1;
2140
2141         /*
2142          * In case pass through can not be enabled, iommu tries to use identity
2143          * mapping.
2144          */
2145         if (iommu_pass_through)
2146                 iommu_identity_mapping = 1;
2147
2148         /*
2149          * for each drhd
2150          *    allocate root
2151          *    initialize and program root entry to not present
2152          * endfor
2153          */
2154         for_each_drhd_unit(drhd) {
2155                 g_num_of_iommus++;
2156                 /*
2157                  * lock not needed as this is only incremented in the single
2158                  * threaded kernel __init code path all other access are read
2159                  * only
2160                  */
2161         }
2162
2163         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2164                         GFP_KERNEL);
2165         if (!g_iommus) {
2166                 printk(KERN_ERR "Allocating global iommu array failed\n");
2167                 ret = -ENOMEM;
2168                 goto error;
2169         }
2170
2171         deferred_flush = kzalloc(g_num_of_iommus *
2172                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2173         if (!deferred_flush) {
2174                 kfree(g_iommus);
2175                 ret = -ENOMEM;
2176                 goto error;
2177         }
2178
2179         for_each_drhd_unit(drhd) {
2180                 if (drhd->ignored)
2181                         continue;
2182
2183                 iommu = drhd->iommu;
2184                 g_iommus[iommu->seq_id] = iommu;
2185
2186                 ret = iommu_init_domains(iommu);
2187                 if (ret)
2188                         goto error;
2189
2190                 /*
2191                  * TBD:
2192                  * we could share the same root & context tables
2193                  * amoung all IOMMU's. Need to Split it later.
2194                  */
2195                 ret = iommu_alloc_root_entry(iommu);
2196                 if (ret) {
2197                         printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2198                         goto error;
2199                 }
2200                 if (!ecap_pass_through(iommu->ecap))
2201                         pass_through = 0;
2202         }
2203         if (iommu_pass_through)
2204                 if (!pass_through) {
2205                         printk(KERN_INFO
2206                                "Pass Through is not supported by hardware.\n");
2207                         iommu_pass_through = 0;
2208                 }
2209
2210         /*
2211          * Start from the sane iommu hardware state.
2212          */
2213         for_each_drhd_unit(drhd) {
2214                 if (drhd->ignored)
2215                         continue;
2216
2217                 iommu = drhd->iommu;
2218
2219                 /*
2220                  * If the queued invalidation is already initialized by us
2221                  * (for example, while enabling interrupt-remapping) then
2222                  * we got the things already rolling from a sane state.
2223                  */
2224                 if (iommu->qi)
2225                         continue;
2226
2227                 /*
2228                  * Clear any previous faults.
2229                  */
2230                 dmar_fault(-1, iommu);
2231                 /*
2232                  * Disable queued invalidation if supported and already enabled
2233                  * before OS handover.
2234                  */
2235                 dmar_disable_qi(iommu);
2236         }
2237
2238         for_each_drhd_unit(drhd) {
2239                 if (drhd->ignored)
2240                         continue;
2241
2242                 iommu = drhd->iommu;
2243
2244                 if (dmar_enable_qi(iommu)) {
2245                         /*
2246                          * Queued Invalidate not enabled, use Register Based
2247                          * Invalidate
2248                          */
2249                         iommu->flush.flush_context = __iommu_flush_context;
2250                         iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2251                         printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2252                                "invalidation\n",
2253                                (unsigned long long)drhd->reg_base_addr);
2254                 } else {
2255                         iommu->flush.flush_context = qi_flush_context;
2256                         iommu->flush.flush_iotlb = qi_flush_iotlb;
2257                         printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2258                                "invalidation\n",
2259                                (unsigned long long)drhd->reg_base_addr);
2260                 }
2261         }
2262
2263         /*
2264          * If pass through is set and enabled, context entries of all pci
2265          * devices are intialized by pass through translation type.
2266          */
2267         if (iommu_pass_through) {
2268                 ret = init_context_pass_through();
2269                 if (ret) {
2270                         printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2271                         iommu_pass_through = 0;
2272                 }
2273         }
2274
2275         /*
2276          * If pass through is not set or not enabled, setup context entries for
2277          * identity mappings for rmrr, gfx, and isa and may fall back to static
2278          * identity mapping if iommu_identity_mapping is set.
2279          */
2280         if (!iommu_pass_through) {
2281                 if (iommu_identity_mapping)
2282                         iommu_prepare_static_identity_mapping();
2283                 /*
2284                  * For each rmrr
2285                  *   for each dev attached to rmrr
2286                  *   do
2287                  *     locate drhd for dev, alloc domain for dev
2288                  *     allocate free domain
2289                  *     allocate page table entries for rmrr
2290                  *     if context not allocated for bus
2291                  *           allocate and init context
2292                  *           set present in root table for this bus
2293                  *     init context with domain, translation etc
2294                  *    endfor
2295                  * endfor
2296                  */
2297                 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2298                 for_each_rmrr_units(rmrr) {
2299                         for (i = 0; i < rmrr->devices_cnt; i++) {
2300                                 pdev = rmrr->devices[i];
2301                                 /*
2302                                  * some BIOS lists non-exist devices in DMAR
2303                                  * table.
2304                                  */
2305                                 if (!pdev)
2306                                         continue;
2307                                 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2308                                 if (ret)
2309                                         printk(KERN_ERR
2310                                  "IOMMU: mapping reserved region failed\n");
2311                         }
2312                 }
2313
2314                 iommu_prepare_isa();
2315         }
2316
2317         /*
2318          * for each drhd
2319          *   enable fault log
2320          *   global invalidate context cache
2321          *   global invalidate iotlb
2322          *   enable translation
2323          */
2324         for_each_drhd_unit(drhd) {
2325                 if (drhd->ignored)
2326                         continue;
2327                 iommu = drhd->iommu;
2328
2329                 iommu_flush_write_buffer(iommu);
2330
2331                 ret = dmar_set_interrupt(iommu);
2332                 if (ret)
2333                         goto error;
2334
2335                 iommu_set_root_entry(iommu);
2336
2337                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2338                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2339                 iommu_disable_protect_mem_regions(iommu);
2340
2341                 ret = iommu_enable_translation(iommu);
2342                 if (ret)
2343                         goto error;
2344         }
2345
2346         return 0;
2347 error:
2348         for_each_drhd_unit(drhd) {
2349                 if (drhd->ignored)
2350                         continue;
2351                 iommu = drhd->iommu;
2352                 free_iommu(iommu);
2353         }
2354         kfree(g_iommus);
2355         return ret;
2356 }
2357
2358 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2359                                             size_t size)
2360 {
2361         host_addr &= ~PAGE_MASK;
2362         host_addr += size + PAGE_SIZE - 1;
2363
2364         return host_addr >> VTD_PAGE_SHIFT;
2365 }
2366
2367 static struct iova *intel_alloc_iova(struct device *dev,
2368                                      struct dmar_domain *domain,
2369                                      unsigned long nrpages, uint64_t dma_mask)
2370 {
2371         struct pci_dev *pdev = to_pci_dev(dev);
2372         struct iova *iova = NULL;
2373
2374         /* Restrict dma_mask to the width that the iommu can handle */
2375         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2376
2377         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2378                 /*
2379                  * First try to allocate an io virtual address in
2380                  * DMA_BIT_MASK(32) and if that fails then try allocating
2381                  * from higher range
2382                  */
2383                 iova = alloc_iova(&domain->iovad, nrpages,
2384                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2385                 if (iova)
2386                         return iova;
2387         }
2388         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2389         if (unlikely(!iova)) {
2390                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2391                        nrpages, pci_name(pdev));
2392                 return NULL;
2393         }
2394
2395         return iova;
2396 }
2397
2398 static struct dmar_domain *
2399 get_valid_domain_for_dev(struct pci_dev *pdev)
2400 {
2401         struct dmar_domain *domain;
2402         int ret;
2403
2404         domain = get_domain_for_dev(pdev,
2405                         DEFAULT_DOMAIN_ADDRESS_WIDTH);
2406         if (!domain) {
2407                 printk(KERN_ERR
2408                         "Allocating domain for %s failed", pci_name(pdev));
2409                 return NULL;
2410         }
2411
2412         /* make sure context mapping is ok */
2413         if (unlikely(!domain_context_mapped(pdev))) {
2414                 ret = domain_context_mapping(domain, pdev,
2415                                              CONTEXT_TT_MULTI_LEVEL);
2416                 if (ret) {
2417                         printk(KERN_ERR
2418                                 "Domain context map for %s failed",
2419                                 pci_name(pdev));
2420                         return NULL;
2421                 }
2422         }
2423
2424         return domain;
2425 }
2426
2427 static int iommu_dummy(struct pci_dev *pdev)
2428 {
2429         return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2430 }
2431
2432 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2433 static int iommu_no_mapping(struct pci_dev *pdev)
2434 {
2435         int found;
2436
2437         if (!iommu_identity_mapping)
2438                 return iommu_dummy(pdev);
2439
2440         found = identity_mapping(pdev);
2441         if (found) {
2442                 if (pdev->dma_mask > DMA_BIT_MASK(32))
2443                         return 1;
2444                 else {
2445                         /*
2446                          * 32 bit DMA is removed from si_domain and fall back
2447                          * to non-identity mapping.
2448                          */
2449                         domain_remove_one_dev_info(si_domain, pdev);
2450                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2451                                pci_name(pdev));
2452                         return 0;
2453                 }
2454         } else {
2455                 /*
2456                  * In case of a detached 64 bit DMA device from vm, the device
2457                  * is put into si_domain for identity mapping.
2458                  */
2459                 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2460                         int ret;
2461                         ret = domain_add_dev_info(si_domain, pdev);
2462                         if (!ret) {
2463                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
2464                                        pci_name(pdev));
2465                                 return 1;
2466                         }
2467                 }
2468         }
2469
2470         return iommu_dummy(pdev);
2471 }
2472
2473 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2474                                      size_t size, int dir, u64 dma_mask)
2475 {
2476         struct pci_dev *pdev = to_pci_dev(hwdev);
2477         struct dmar_domain *domain;
2478         phys_addr_t start_paddr;
2479         struct iova *iova;
2480         int prot = 0;
2481         int ret;
2482         struct intel_iommu *iommu;
2483
2484         BUG_ON(dir == DMA_NONE);
2485
2486         if (iommu_no_mapping(pdev))
2487                 return paddr;
2488
2489         domain = get_valid_domain_for_dev(pdev);
2490         if (!domain)
2491                 return 0;
2492
2493         iommu = domain_get_iommu(domain);
2494         size = aligned_nrpages(paddr, size);
2495
2496         iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2497         if (!iova)
2498                 goto error;
2499
2500         /*
2501          * Check if DMAR supports zero-length reads on write only
2502          * mappings..
2503          */
2504         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2505                         !cap_zlr(iommu->cap))
2506                 prot |= DMA_PTE_READ;
2507         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2508                 prot |= DMA_PTE_WRITE;
2509         /*
2510          * paddr - (paddr + size) might be partial page, we should map the whole
2511          * page.  Note: if two part of one page are separately mapped, we
2512          * might have two guest_addr mapping to the same host paddr, but this
2513          * is not a big problem
2514          */
2515         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2516                                  paddr >> VTD_PAGE_SHIFT, size, prot);
2517         if (ret)
2518                 goto error;
2519
2520         /* it's a non-present to present mapping. Only flush if caching mode */
2521         if (cap_caching_mode(iommu->cap))
2522                 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2523         else
2524                 iommu_flush_write_buffer(iommu);
2525
2526         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2527         start_paddr += paddr & ~PAGE_MASK;
2528         return start_paddr;
2529
2530 error:
2531         if (iova)
2532                 __free_iova(&domain->iovad, iova);
2533         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2534                 pci_name(pdev), size, (unsigned long long)paddr, dir);
2535         return 0;
2536 }
2537
2538 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2539                                  unsigned long offset, size_t size,
2540                                  enum dma_data_direction dir,
2541                                  struct dma_attrs *attrs)
2542 {
2543         return __intel_map_single(dev, page_to_phys(page) + offset, size,
2544                                   dir, to_pci_dev(dev)->dma_mask);
2545 }
2546
2547 static void flush_unmaps(void)
2548 {
2549         int i, j;
2550
2551         timer_on = 0;
2552
2553         /* just flush them all */
2554         for (i = 0; i < g_num_of_iommus; i++) {
2555                 struct intel_iommu *iommu = g_iommus[i];
2556                 if (!iommu)
2557                         continue;
2558
2559                 if (!deferred_flush[i].next)
2560                         continue;
2561
2562                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2563                                          DMA_TLB_GLOBAL_FLUSH);
2564                 for (j = 0; j < deferred_flush[i].next; j++) {
2565                         unsigned long mask;
2566                         struct iova *iova = deferred_flush[i].iova[j];
2567
2568                         mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2569                         mask = ilog2(mask >> VTD_PAGE_SHIFT);
2570                         iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2571                                         iova->pfn_lo << PAGE_SHIFT, mask);
2572                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2573                 }
2574                 deferred_flush[i].next = 0;
2575         }
2576
2577         list_size = 0;
2578 }
2579
2580 static void flush_unmaps_timeout(unsigned long data)
2581 {
2582         unsigned long flags;
2583
2584         spin_lock_irqsave(&async_umap_flush_lock, flags);
2585         flush_unmaps();
2586         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2587 }
2588
2589 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2590 {
2591         unsigned long flags;
2592         int next, iommu_id;
2593         struct intel_iommu *iommu;
2594
2595         spin_lock_irqsave(&async_umap_flush_lock, flags);
2596         if (list_size == HIGH_WATER_MARK)
2597                 flush_unmaps();
2598
2599         iommu = domain_get_iommu(dom);
2600         iommu_id = iommu->seq_id;
2601
2602         next = deferred_flush[iommu_id].next;
2603         deferred_flush[iommu_id].domain[next] = dom;
2604         deferred_flush[iommu_id].iova[next] = iova;
2605         deferred_flush[iommu_id].next++;
2606
2607         if (!timer_on) {
2608                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2609                 timer_on = 1;
2610         }
2611         list_size++;
2612         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2613 }
2614
2615 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2616                              size_t size, enum dma_data_direction dir,
2617                              struct dma_attrs *attrs)
2618 {
2619         struct pci_dev *pdev = to_pci_dev(dev);
2620         struct dmar_domain *domain;
2621         unsigned long start_pfn, last_pfn;
2622         struct iova *iova;
2623         struct intel_iommu *iommu;
2624
2625         if (iommu_no_mapping(pdev))
2626                 return;
2627
2628         domain = find_domain(pdev);
2629         BUG_ON(!domain);
2630
2631         iommu = domain_get_iommu(domain);
2632
2633         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2634         if (!iova)
2635                 return;
2636
2637         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2638         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2639
2640         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2641                  pci_name(pdev), start_pfn, last_pfn);
2642
2643         /*  clear the whole page */
2644         dma_pte_clear_range(domain, start_pfn, last_pfn);
2645
2646         /* free page tables */
2647         dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2648
2649         if (intel_iommu_strict) {
2650                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2651                                       last_pfn - start_pfn + 1);
2652                 /* free iova */
2653                 __free_iova(&domain->iovad, iova);
2654         } else {
2655                 add_unmap(domain, iova);
2656                 /*
2657                  * queue up the release of the unmap to save the 1/6th of the
2658                  * cpu used up by the iotlb flush operation...
2659                  */
2660         }
2661 }
2662
2663 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2664                                int dir)
2665 {
2666         intel_unmap_page(dev, dev_addr, size, dir, NULL);
2667 }
2668
2669 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2670                                   dma_addr_t *dma_handle, gfp_t flags)
2671 {
2672         void *vaddr;
2673         int order;
2674
2675         size = PAGE_ALIGN(size);
2676         order = get_order(size);
2677         flags &= ~(GFP_DMA | GFP_DMA32);
2678
2679         vaddr = (void *)__get_free_pages(flags, order);
2680         if (!vaddr)
2681                 return NULL;
2682         memset(vaddr, 0, size);
2683
2684         *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2685                                          DMA_BIDIRECTIONAL,
2686                                          hwdev->coherent_dma_mask);
2687         if (*dma_handle)
2688                 return vaddr;
2689         free_pages((unsigned long)vaddr, order);
2690         return NULL;
2691 }
2692
2693 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2694                                 dma_addr_t dma_handle)
2695 {
2696         int order;
2697
2698         size = PAGE_ALIGN(size);
2699         order = get_order(size);
2700
2701         intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2702         free_pages((unsigned long)vaddr, order);
2703 }
2704
2705 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2706                            int nelems, enum dma_data_direction dir,
2707                            struct dma_attrs *attrs)
2708 {
2709         struct pci_dev *pdev = to_pci_dev(hwdev);
2710         struct dmar_domain *domain;
2711         unsigned long start_pfn, last_pfn;
2712         struct iova *iova;
2713         struct intel_iommu *iommu;
2714
2715         if (iommu_no_mapping(pdev))
2716                 return;
2717
2718         domain = find_domain(pdev);
2719         BUG_ON(!domain);
2720
2721         iommu = domain_get_iommu(domain);
2722
2723         iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2724         if (!iova)
2725                 return;
2726
2727         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2728         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2729
2730         /*  clear the whole page */
2731         dma_pte_clear_range(domain, start_pfn, last_pfn);
2732
2733         /* free page tables */
2734         dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2735
2736         iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2737                               (last_pfn - start_pfn + 1));
2738
2739         /* free iova */
2740         __free_iova(&domain->iovad, iova);
2741 }
2742
2743 static int intel_nontranslate_map_sg(struct device *hddev,
2744         struct scatterlist *sglist, int nelems, int dir)
2745 {
2746         int i;
2747         struct scatterlist *sg;
2748
2749         for_each_sg(sglist, sg, nelems, i) {
2750                 BUG_ON(!sg_page(sg));
2751                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2752                 sg->dma_length = sg->length;
2753         }
2754         return nelems;
2755 }
2756
2757 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2758                         enum dma_data_direction dir, struct dma_attrs *attrs)
2759 {
2760         int i;
2761         struct pci_dev *pdev = to_pci_dev(hwdev);
2762         struct dmar_domain *domain;
2763         size_t size = 0;
2764         int prot = 0;
2765         size_t offset_pfn = 0;
2766         struct iova *iova = NULL;
2767         int ret;
2768         struct scatterlist *sg;
2769         unsigned long start_vpfn;
2770         struct intel_iommu *iommu;
2771
2772         BUG_ON(dir == DMA_NONE);
2773         if (iommu_no_mapping(pdev))
2774                 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2775
2776         domain = get_valid_domain_for_dev(pdev);
2777         if (!domain)
2778                 return 0;
2779
2780         iommu = domain_get_iommu(domain);
2781
2782         for_each_sg(sglist, sg, nelems, i)
2783                 size += aligned_nrpages(sg->offset, sg->length);
2784
2785         iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2786         if (!iova) {
2787                 sglist->dma_length = 0;
2788                 return 0;
2789         }
2790
2791         /*
2792          * Check if DMAR supports zero-length reads on write only
2793          * mappings..
2794          */
2795         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2796                         !cap_zlr(iommu->cap))
2797                 prot |= DMA_PTE_READ;
2798         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2799                 prot |= DMA_PTE_WRITE;
2800
2801         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2802
2803         ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2804         if (unlikely(ret)) {
2805                 /*  clear the page */
2806                 dma_pte_clear_range(domain, start_vpfn,
2807                                     start_vpfn + size - 1);
2808                 /* free page tables */
2809                 dma_pte_free_pagetable(domain, start_vpfn,
2810                                        start_vpfn + size - 1);
2811                 /* free iova */
2812                 __free_iova(&domain->iovad, iova);
2813                 return 0;
2814         }
2815
2816         /* it's a non-present to present mapping. Only flush if caching mode */
2817         if (cap_caching_mode(iommu->cap))
2818                 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2819         else
2820                 iommu_flush_write_buffer(iommu);
2821
2822         return nelems;
2823 }
2824
2825 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2826 {
2827         return !dma_addr;
2828 }
2829
2830 struct dma_map_ops intel_dma_ops = {
2831         .alloc_coherent = intel_alloc_coherent,
2832         .free_coherent = intel_free_coherent,
2833         .map_sg = intel_map_sg,
2834         .unmap_sg = intel_unmap_sg,
2835         .map_page = intel_map_page,
2836         .unmap_page = intel_unmap_page,
2837         .mapping_error = intel_mapping_error,
2838 };
2839
2840 static inline int iommu_domain_cache_init(void)
2841 {
2842         int ret = 0;
2843
2844         iommu_domain_cache = kmem_cache_create("iommu_domain",
2845                                          sizeof(struct dmar_domain),
2846                                          0,
2847                                          SLAB_HWCACHE_ALIGN,
2848
2849                                          NULL);
2850         if (!iommu_domain_cache) {
2851                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2852                 ret = -ENOMEM;
2853         }
2854
2855         return ret;
2856 }
2857
2858 static inline int iommu_devinfo_cache_init(void)
2859 {
2860         int ret = 0;
2861
2862         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2863                                          sizeof(struct device_domain_info),
2864                                          0,
2865                                          SLAB_HWCACHE_ALIGN,
2866                                          NULL);
2867         if (!iommu_devinfo_cache) {
2868                 printk(KERN_ERR "Couldn't create devinfo cache\n");
2869                 ret = -ENOMEM;
2870         }
2871
2872         return ret;
2873 }
2874
2875 static inline int iommu_iova_cache_init(void)
2876 {
2877         int ret = 0;
2878
2879         iommu_iova_cache = kmem_cache_create("iommu_iova",
2880                                          sizeof(struct iova),
2881                                          0,
2882                                          SLAB_HWCACHE_ALIGN,
2883                                          NULL);
2884         if (!iommu_iova_cache) {
2885                 printk(KERN_ERR "Couldn't create iova cache\n");
2886                 ret = -ENOMEM;
2887         }
2888
2889         return ret;
2890 }
2891
2892 static int __init iommu_init_mempool(void)
2893 {
2894         int ret;
2895         ret = iommu_iova_cache_init();
2896         if (ret)
2897                 return ret;
2898
2899         ret = iommu_domain_cache_init();
2900         if (ret)
2901                 goto domain_error;
2902
2903         ret = iommu_devinfo_cache_init();
2904         if (!ret)
2905                 return ret;
2906
2907         kmem_cache_destroy(iommu_domain_cache);
2908 domain_error:
2909         kmem_cache_destroy(iommu_iova_cache);
2910
2911         return -ENOMEM;
2912 }
2913
2914 static void __init iommu_exit_mempool(void)
2915 {
2916         kmem_cache_destroy(iommu_devinfo_cache);
2917         kmem_cache_destroy(iommu_domain_cache);
2918         kmem_cache_destroy(iommu_iova_cache);
2919
2920 }
2921
2922 static void __init init_no_remapping_devices(void)
2923 {
2924         struct dmar_drhd_unit *drhd;
2925
2926         for_each_drhd_unit(drhd) {
2927                 if (!drhd->include_all) {
2928                         int i;
2929                         for (i = 0; i < drhd->devices_cnt; i++)
2930                                 if (drhd->devices[i] != NULL)
2931                                         break;
2932                         /* ignore DMAR unit if no pci devices exist */
2933                         if (i == drhd->devices_cnt)
2934                                 drhd->ignored = 1;
2935                 }
2936         }
2937
2938         if (dmar_map_gfx)
2939                 return;
2940
2941         for_each_drhd_unit(drhd) {
2942                 int i;
2943                 if (drhd->ignored || drhd->include_all)
2944                         continue;
2945
2946                 for (i = 0; i < drhd->devices_cnt; i++)
2947                         if (drhd->devices[i] &&
2948                                 !IS_GFX_DEVICE(drhd->devices[i]))
2949                                 break;
2950
2951                 if (i < drhd->devices_cnt)
2952                         continue;
2953
2954                 /* bypass IOMMU if it is just for gfx devices */
2955                 drhd->ignored = 1;
2956                 for (i = 0; i < drhd->devices_cnt; i++) {
2957                         if (!drhd->devices[i])
2958                                 continue;
2959                         drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2960                 }
2961         }
2962 }
2963
2964 #ifdef CONFIG_SUSPEND
2965 static int init_iommu_hw(void)
2966 {
2967         struct dmar_drhd_unit *drhd;
2968         struct intel_iommu *iommu = NULL;
2969
2970         for_each_active_iommu(iommu, drhd)
2971                 if (iommu->qi)
2972                         dmar_reenable_qi(iommu);
2973
2974         for_each_active_iommu(iommu, drhd) {
2975                 iommu_flush_write_buffer(iommu);
2976
2977                 iommu_set_root_entry(iommu);
2978
2979                 iommu->flush.flush_context(iommu, 0, 0, 0,
2980                                            DMA_CCMD_GLOBAL_INVL);
2981                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2982                                          DMA_TLB_GLOBAL_FLUSH);
2983                 iommu_disable_protect_mem_regions(iommu);
2984                 iommu_enable_translation(iommu);
2985         }
2986
2987         return 0;
2988 }
2989
2990 static void iommu_flush_all(void)
2991 {
2992         struct dmar_drhd_unit *drhd;
2993         struct intel_iommu *iommu;
2994
2995         for_each_active_iommu(iommu, drhd) {
2996                 iommu->flush.flush_context(iommu, 0, 0, 0,
2997                                            DMA_CCMD_GLOBAL_INVL);
2998                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2999                                          DMA_TLB_GLOBAL_FLUSH);
3000         }
3001 }
3002
3003 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3004 {
3005         struct dmar_drhd_unit *drhd;
3006         struct intel_iommu *iommu = NULL;
3007         unsigned long flag;
3008
3009         for_each_active_iommu(iommu, drhd) {
3010                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3011                                                  GFP_ATOMIC);
3012                 if (!iommu->iommu_state)
3013                         goto nomem;
3014         }
3015
3016         iommu_flush_all();
3017
3018         for_each_active_iommu(iommu, drhd) {
3019                 iommu_disable_translation(iommu);
3020
3021                 spin_lock_irqsave(&iommu->register_lock, flag);
3022
3023                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3024                         readl(iommu->reg + DMAR_FECTL_REG);
3025                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3026                         readl(iommu->reg + DMAR_FEDATA_REG);
3027                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3028                         readl(iommu->reg + DMAR_FEADDR_REG);
3029                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3030                         readl(iommu->reg + DMAR_FEUADDR_REG);
3031
3032                 spin_unlock_irqrestore(&iommu->register_lock, flag);
3033         }
3034         return 0;
3035
3036 nomem:
3037         for_each_active_iommu(iommu, drhd)
3038                 kfree(iommu->iommu_state);
3039
3040         return -ENOMEM;
3041 }
3042
3043 static int iommu_resume(struct sys_device *dev)
3044 {
3045         struct dmar_drhd_unit *drhd;
3046         struct intel_iommu *iommu = NULL;
3047         unsigned long flag;
3048
3049         if (init_iommu_hw()) {
3050                 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3051                 return -EIO;
3052         }
3053
3054         for_each_active_iommu(iommu, drhd) {
3055
3056                 spin_lock_irqsave(&iommu->register_lock, flag);
3057
3058                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3059                         iommu->reg + DMAR_FECTL_REG);
3060                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3061                         iommu->reg + DMAR_FEDATA_REG);
3062                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3063                         iommu->reg + DMAR_FEADDR_REG);
3064                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3065                         iommu->reg + DMAR_FEUADDR_REG);
3066
3067                 spin_unlock_irqrestore(&iommu->register_lock, flag);
3068         }
3069
3070         for_each_active_iommu(iommu, drhd)
3071                 kfree(iommu->iommu_state);
3072
3073         return 0;
3074 }
3075
3076 static struct sysdev_class iommu_sysclass = {
3077         .name           = "iommu",
3078         .resume         = iommu_resume,
3079         .suspend        = iommu_suspend,
3080 };
3081
3082 static struct sys_device device_iommu = {
3083         .cls    = &iommu_sysclass,
3084 };
3085
3086 static int __init init_iommu_sysfs(void)
3087 {
3088         int error;
3089
3090         error = sysdev_class_register(&iommu_sysclass);
3091         if (error)
3092                 return error;
3093
3094         error = sysdev_register(&device_iommu);
3095         if (error)
3096                 sysdev_class_unregister(&iommu_sysclass);
3097
3098         return error;
3099 }
3100
3101 #else
3102 static int __init init_iommu_sysfs(void)
3103 {
3104         return 0;
3105 }
3106 #endif  /* CONFIG_PM */
3107
3108 int __init intel_iommu_init(void)
3109 {
3110         int ret = 0;
3111
3112         if (dmar_table_init())
3113                 return  -ENODEV;
3114
3115         if (dmar_dev_scope_init())
3116                 return  -ENODEV;
3117
3118         /*
3119          * Check the need for DMA-remapping initialization now.
3120          * Above initialization will also be used by Interrupt-remapping.
3121          */
3122         if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
3123                 return -ENODEV;
3124
3125         iommu_init_mempool();
3126         dmar_init_reserved_ranges();
3127
3128         init_no_remapping_devices();
3129
3130         ret = init_dmars();
3131         if (ret) {
3132                 printk(KERN_ERR "IOMMU: dmar init failed\n");
3133                 put_iova_domain(&reserved_iova_list);
3134                 iommu_exit_mempool();
3135                 return ret;
3136         }
3137         printk(KERN_INFO
3138         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3139
3140         init_timer(&unmap_timer);
3141         force_iommu = 1;
3142
3143         if (!iommu_pass_through) {
3144                 printk(KERN_INFO
3145                        "Multi-level page-table translation for DMAR.\n");
3146                 dma_ops = &intel_dma_ops;
3147         } else
3148                 printk(KERN_INFO
3149                        "DMAR: Pass through translation for DMAR.\n");
3150
3151         init_iommu_sysfs();
3152
3153         register_iommu(&intel_iommu_ops);
3154
3155         return 0;
3156 }
3157
3158 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3159                                            struct pci_dev *pdev)
3160 {
3161         struct pci_dev *tmp, *parent;
3162
3163         if (!iommu || !pdev)
3164                 return;
3165
3166         /* dependent device detach */
3167         tmp = pci_find_upstream_pcie_bridge(pdev);
3168         /* Secondary interface's bus number and devfn 0 */
3169         if (tmp) {
3170                 parent = pdev->bus->self;
3171                 while (parent != tmp) {
3172                         iommu_detach_dev(iommu, parent->bus->number,
3173                                          parent->devfn);
3174                         parent = parent->bus->self;
3175                 }
3176                 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3177                         iommu_detach_dev(iommu,
3178                                 tmp->subordinate->number, 0);
3179                 else /* this is a legacy PCI bridge */
3180                         iommu_detach_dev(iommu, tmp->bus->number,
3181                                          tmp->devfn);
3182         }
3183 }
3184
3185 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3186                                           struct pci_dev *pdev)
3187 {
3188         struct device_domain_info *info;
3189         struct intel_iommu *iommu;
3190         unsigned long flags;
3191         int found = 0;
3192         struct list_head *entry, *tmp;
3193
3194         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3195                                 pdev->devfn);
3196         if (!iommu)
3197                 return;
3198
3199         spin_lock_irqsave(&device_domain_lock, flags);
3200         list_for_each_safe(entry, tmp, &domain->devices) {
3201                 info = list_entry(entry, struct device_domain_info, link);
3202                 /* No need to compare PCI domain; it has to be the same */
3203                 if (info->bus == pdev->bus->number &&
3204                     info->devfn == pdev->devfn) {
3205                         list_del(&info->link);
3206                         list_del(&info->global);
3207                         if (info->dev)
3208                                 info->dev->dev.archdata.iommu = NULL;
3209                         spin_unlock_irqrestore(&device_domain_lock, flags);
3210
3211                         iommu_disable_dev_iotlb(info);
3212                         iommu_detach_dev(iommu, info->bus, info->devfn);
3213                         iommu_detach_dependent_devices(iommu, pdev);
3214                         free_devinfo_mem(info);
3215
3216                         spin_lock_irqsave(&device_domain_lock, flags);
3217
3218                         if (found)
3219                                 break;
3220                         else
3221                                 continue;
3222                 }
3223
3224                 /* if there is no other devices under the same iommu
3225                  * owned by this domain, clear this iommu in iommu_bmp
3226                  * update iommu count and coherency
3227                  */
3228                 if (iommu == device_to_iommu(info->segment, info->bus,
3229                                             info->devfn))
3230                         found = 1;
3231         }
3232
3233         if (found == 0) {
3234                 unsigned long tmp_flags;
3235                 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3236                 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3237                 domain->iommu_count--;
3238                 domain_update_iommu_cap(domain);
3239                 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3240         }
3241
3242         spin_unlock_irqrestore(&device_domain_lock, flags);
3243 }
3244
3245 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3246 {
3247         struct device_domain_info *info;
3248         struct intel_iommu *iommu;
3249         unsigned long flags1, flags2;
3250
3251         spin_lock_irqsave(&device_domain_lock, flags1);
3252         while (!list_empty(&domain->devices)) {
3253                 info = list_entry(domain->devices.next,
3254                         struct device_domain_info, link);
3255                 list_del(&info->link);
3256                 list_del(&info->global);
3257                 if (info->dev)
3258                         info->dev->dev.archdata.iommu = NULL;
3259
3260                 spin_unlock_irqrestore(&device_domain_lock, flags1);
3261
3262                 iommu_disable_dev_iotlb(info);
3263                 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3264                 iommu_detach_dev(iommu, info->bus, info->devfn);
3265                 iommu_detach_dependent_devices(iommu, info->dev);
3266
3267                 /* clear this iommu in iommu_bmp, update iommu count
3268                  * and capabilities
3269                  */
3270                 spin_lock_irqsave(&domain->iommu_lock, flags2);
3271                 if (test_and_clear_bit(iommu->seq_id,
3272                                        &domain->iommu_bmp)) {
3273                         domain->iommu_count--;
3274                         domain_update_iommu_cap(domain);
3275                 }
3276                 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3277
3278                 free_devinfo_mem(info);
3279                 spin_lock_irqsave(&device_domain_lock, flags1);
3280         }
3281         spin_unlock_irqrestore(&device_domain_lock, flags1);
3282 }
3283
3284 /* domain id for virtual machine, it won't be set in context */
3285 static unsigned long vm_domid;
3286
3287 static int vm_domain_min_agaw(struct dmar_domain *domain)
3288 {
3289         int i;
3290         int min_agaw = domain->agaw;
3291
3292         i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3293         for (; i < g_num_of_iommus; ) {
3294                 if (min_agaw > g_iommus[i]->agaw)
3295                         min_agaw = g_iommus[i]->agaw;
3296
3297                 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3298         }
3299
3300         return min_agaw;
3301 }
3302
3303 static struct dmar_domain *iommu_alloc_vm_domain(void)
3304 {
3305         struct dmar_domain *domain;
3306
3307         domain = alloc_domain_mem();
3308         if (!domain)
3309                 return NULL;
3310
3311         domain->id = vm_domid++;
3312         memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3313         domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3314
3315         return domain;
3316 }
3317
3318 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3319 {
3320         int adjust_width;
3321
3322         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3323         spin_lock_init(&domain->mapping_lock);
3324         spin_lock_init(&domain->iommu_lock);
3325
3326         domain_reserve_special_ranges(domain);
3327
3328         /* calculate AGAW */
3329         domain->gaw = guest_width;
3330         adjust_width = guestwidth_to_adjustwidth(guest_width);
3331         domain->agaw = width_to_agaw(adjust_width);
3332
3333         INIT_LIST_HEAD(&domain->devices);
3334
3335         domain->iommu_count = 0;
3336         domain->iommu_coherency = 0;
3337         domain->max_addr = 0;
3338
3339         /* always allocate the top pgd */
3340         domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3341         if (!domain->pgd)
3342                 return -ENOMEM;
3343         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3344         return 0;
3345 }
3346
3347 static void iommu_free_vm_domain(struct dmar_domain *domain)
3348 {
3349         unsigned long flags;
3350         struct dmar_drhd_unit *drhd;
3351         struct intel_iommu *iommu;
3352         unsigned long i;
3353         unsigned long ndomains;
3354
3355         for_each_drhd_unit(drhd) {
3356                 if (drhd->ignored)
3357                         continue;
3358                 iommu = drhd->iommu;
3359
3360                 ndomains = cap_ndoms(iommu->cap);
3361                 i = find_first_bit(iommu->domain_ids, ndomains);
3362                 for (; i < ndomains; ) {
3363                         if (iommu->domains[i] == domain) {
3364                                 spin_lock_irqsave(&iommu->lock, flags);
3365                                 clear_bit(i, iommu->domain_ids);
3366                                 iommu->domains[i] = NULL;
3367                                 spin_unlock_irqrestore(&iommu->lock, flags);
3368                                 break;
3369                         }
3370                         i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3371                 }
3372         }
3373 }
3374
3375 static void vm_domain_exit(struct dmar_domain *domain)
3376 {
3377         /* Domain 0 is reserved, so dont process it */
3378         if (!domain)
3379                 return;
3380
3381         vm_domain_remove_all_dev_info(domain);
3382         /* destroy iovas */
3383         put_iova_domain(&domain->iovad);
3384
3385         /* clear ptes */
3386         dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3387
3388         /* free page tables */
3389         dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3390
3391         iommu_free_vm_domain(domain);
3392         free_domain_mem(domain);
3393 }
3394
3395 static int intel_iommu_domain_init(struct iommu_domain *domain)
3396 {
3397         struct dmar_domain *dmar_domain;
3398
3399         dmar_domain = iommu_alloc_vm_domain();
3400         if (!dmar_domain) {
3401                 printk(KERN_ERR
3402                         "intel_iommu_domain_init: dmar_domain == NULL\n");
3403                 return -ENOMEM;
3404         }
3405         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3406                 printk(KERN_ERR
3407                         "intel_iommu_domain_init() failed\n");
3408                 vm_domain_exit(dmar_domain);
3409                 return -ENOMEM;
3410         }
3411         domain->priv = dmar_domain;
3412
3413         return 0;
3414 }
3415
3416 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3417 {
3418         struct dmar_domain *dmar_domain = domain->priv;
3419
3420         domain->priv = NULL;
3421         vm_domain_exit(dmar_domain);
3422 }
3423
3424 static int intel_iommu_attach_device(struct iommu_domain *domain,
3425                                      struct device *dev)
3426 {
3427         struct dmar_domain *dmar_domain = domain->priv;
3428         struct pci_dev *pdev = to_pci_dev(dev);
3429         struct intel_iommu *iommu;
3430         int addr_width;
3431         u64 end;
3432         int ret;
3433
3434         /* normally pdev is not mapped */
3435         if (unlikely(domain_context_mapped(pdev))) {
3436                 struct dmar_domain *old_domain;
3437
3438                 old_domain = find_domain(pdev);
3439                 if (old_domain) {
3440                         if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3441                             dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3442                                 domain_remove_one_dev_info(old_domain, pdev);
3443                         else
3444                                 domain_remove_dev_info(old_domain);
3445                 }
3446         }
3447
3448         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3449                                 pdev->devfn);
3450         if (!iommu)
3451                 return -ENODEV;
3452
3453         /* check if this iommu agaw is sufficient for max mapped address */
3454         addr_width = agaw_to_width(iommu->agaw);
3455         end = DOMAIN_MAX_ADDR(addr_width);
3456         end = end & VTD_PAGE_MASK;
3457         if (end < dmar_domain->max_addr) {
3458                 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3459                        "sufficient for the mapped address (%llx)\n",
3460                        __func__, iommu->agaw, dmar_domain->max_addr);
3461                 return -EFAULT;
3462         }
3463
3464         ret = domain_add_dev_info(dmar_domain, pdev);
3465         if (ret)
3466                 return ret;
3467
3468         ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3469         return ret;
3470 }
3471
3472 static void intel_iommu_detach_device(struct iommu_domain *domain,
3473                                       struct device *dev)
3474 {
3475         struct dmar_domain *dmar_domain = domain->priv;
3476         struct pci_dev *pdev = to_pci_dev(dev);
3477
3478         domain_remove_one_dev_info(dmar_domain, pdev);
3479 }
3480
3481 static int intel_iommu_map_range(struct iommu_domain *domain,
3482                                  unsigned long iova, phys_addr_t hpa,
3483                                  size_t size, int iommu_prot)
3484 {
3485         struct dmar_domain *dmar_domain = domain->priv;
3486         u64 max_addr;
3487         int addr_width;
3488         int prot = 0;
3489         int ret;
3490
3491         if (iommu_prot & IOMMU_READ)
3492                 prot |= DMA_PTE_READ;
3493         if (iommu_prot & IOMMU_WRITE)
3494                 prot |= DMA_PTE_WRITE;
3495         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3496                 prot |= DMA_PTE_SNP;
3497
3498         max_addr = iova + size;
3499         if (dmar_domain->max_addr < max_addr) {
3500                 int min_agaw;
3501                 u64 end;
3502
3503                 /* check if minimum agaw is sufficient for mapped address */
3504                 min_agaw = vm_domain_min_agaw(dmar_domain);
3505                 addr_width = agaw_to_width(min_agaw);
3506                 end = DOMAIN_MAX_ADDR(addr_width);
3507                 end = end & VTD_PAGE_MASK;
3508                 if (end < max_addr) {
3509                         printk(KERN_ERR "%s: iommu agaw (%d) is not "
3510                                "sufficient for the mapped address (%llx)\n",
3511                                __func__, min_agaw, max_addr);
3512                         return -EFAULT;
3513                 }
3514                 dmar_domain->max_addr = max_addr;
3515         }
3516         /* Round up size to next multiple of PAGE_SIZE, if it and
3517            the low bits of hpa would take us onto the next page */
3518         size = aligned_nrpages(hpa, size);
3519         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3520                                  hpa >> VTD_PAGE_SHIFT, size, prot);
3521         return ret;
3522 }
3523
3524 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3525                                     unsigned long iova, size_t size)
3526 {
3527         struct dmar_domain *dmar_domain = domain->priv;
3528
3529         dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3530                             (iova + size - 1) >> VTD_PAGE_SHIFT);
3531
3532         if (dmar_domain->max_addr == iova + size)
3533                 dmar_domain->max_addr = iova;
3534 }
3535
3536 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3537                                             unsigned long iova)
3538 {
3539         struct dmar_domain *dmar_domain = domain->priv;
3540         struct dma_pte *pte;
3541         u64 phys = 0;
3542
3543         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3544         if (pte)
3545                 phys = dma_pte_addr(pte);
3546
3547         return phys;
3548 }
3549
3550 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3551                                       unsigned long cap)
3552 {
3553         struct dmar_domain *dmar_domain = domain->priv;
3554
3555         if (cap == IOMMU_CAP_CACHE_COHERENCY)
3556                 return dmar_domain->iommu_snooping;
3557
3558         return 0;
3559 }
3560
3561 static struct iommu_ops intel_iommu_ops = {
3562         .domain_init    = intel_iommu_domain_init,
3563         .domain_destroy = intel_iommu_domain_destroy,
3564         .attach_dev     = intel_iommu_attach_device,
3565         .detach_dev     = intel_iommu_detach_device,
3566         .map            = intel_iommu_map_range,
3567         .unmap          = intel_iommu_unmap_range,
3568         .iova_to_phys   = intel_iommu_iova_to_phys,
3569         .domain_has_cap = intel_iommu_domain_has_cap,
3570 };
3571
3572 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3573 {
3574         /*
3575          * Mobile 4 Series Chipset neglects to set RWBF capability,
3576          * but needs it:
3577          */
3578         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3579         rwbf_quirk = 1;
3580 }
3581
3582 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);