Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
authorDavid Woodhouse <David.Woodhouse@intel.com>
Tue, 8 Dec 2009 09:58:33 +0000 (09:58 +0000)
committerDavid Woodhouse <David.Woodhouse@intel.com>
Tue, 8 Dec 2009 09:59:24 +0000 (09:59 +0000)
Merge the BIOS workarounds from 2.6.32, and the swiotlb fallback on failure.

drivers/pci/dmar.c
drivers/pci/intel-iommu.c
drivers/pci/intr_remapping.c
include/linux/intel-iommu.h

index 416f6ac65b761080a2e81a8ace5c5d442a8ea2d1..525a32487abdd24a2457b445ff094355beb03160 100644 (file)
@@ -339,6 +339,35 @@ found:
 }
 #endif
 
+#ifdef CONFIG_ACPI_NUMA
+static int __init
+dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+{
+       struct acpi_dmar_rhsa *rhsa;
+       struct dmar_drhd_unit *drhd;
+
+       rhsa = (struct acpi_dmar_rhsa *)header;
+       for_each_drhd_unit(drhd) {
+               if (drhd->reg_base_addr == rhsa->base_address) {
+                       int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
+
+                       if (!node_online(node))
+                               node = -1;
+                       drhd->iommu->node = node;
+                       return 0;
+               }
+       }
+       WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
+            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+            drhd->reg_base_addr,
+            dmi_get_system_info(DMI_BIOS_VENDOR),
+            dmi_get_system_info(DMI_BIOS_VERSION),
+            dmi_get_system_info(DMI_PRODUCT_VERSION));
+
+       return 0;
+}
+#endif
+
 static void __init
 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
 {
@@ -458,7 +487,9 @@ parse_dmar_table(void)
 #endif
                        break;
                case ACPI_DMAR_HARDWARE_AFFINITY:
-                       /* We don't do anything with RHSA (yet?) */
+#ifdef CONFIG_ACPI_NUMA
+                       ret = dmar_parse_one_rhsa(entry_header);
+#endif
                        break;
                default:
                        printk(KERN_WARNING PREFIX
@@ -712,6 +743,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
        iommu->agaw = agaw;
        iommu->msagaw = msagaw;
 
+       iommu->node = -1;
+
        /* the registers might be more than one page */
        map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
                cap_max_fault_reg_offset(iommu->cap));
@@ -1053,6 +1086,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
 int dmar_enable_qi(struct intel_iommu *iommu)
 {
        struct q_inval *qi;
+       struct page *desc_page;
 
        if (!ecap_qis(iommu->ecap))
                return -ENOENT;
@@ -1069,13 +1103,16 @@ int dmar_enable_qi(struct intel_iommu *iommu)
 
        qi = iommu->qi;
 
-       qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
-       if (!qi->desc) {
+
+       desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
+       if (!desc_page) {
                kfree(qi);
                iommu->qi = 0;
                return -ENOMEM;
        }
 
+       qi->desc = page_address(desc_page);
+
        qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
        if (!qi->desc_status) {
                free_page((unsigned long) qi->desc);
index 9261327b49f308941c07af292a31bc379b7cb6b9..cb5cae3e0205c5479e37585aa2a6cfd12e20f735 100644 (file)
@@ -277,6 +277,7 @@ static int hw_pass_through = 1;
 
 struct dmar_domain {
        int     id;                     /* domain id */
+       int     nid;                    /* node id */
        unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
 
        struct list_head devices;       /* all devices' list */
@@ -400,15 +401,18 @@ static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
 }
 
 
-static inline void *alloc_pgtable_page(void)
+static inline void *alloc_pgtable_page(int node)
 {
        unsigned int flags;
-       void *vaddr;
+       struct page *page;
+       void *vaddr = NULL;
 
        /* trying to avoid low memory issues */
        flags = current->flags & PF_MEMALLOC;
        current->flags |= PF_MEMALLOC;
-       vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
+       page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+       if (page)
+               vaddr = page_address(page);
        current->flags &= (~PF_MEMALLOC | flags);
        return vaddr;
 }
@@ -589,7 +593,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
        root = &iommu->root_entry[bus];
        context = get_context_addr_from_root(root);
        if (!context) {
-               context = (struct context_entry *)alloc_pgtable_page();
+               context = (struct context_entry *)
+                               alloc_pgtable_page(iommu->node);
                if (!context) {
                        spin_unlock_irqrestore(&iommu->lock, flags);
                        return NULL;
@@ -732,7 +737,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                if (!dma_pte_present(pte)) {
                        uint64_t pteval;
 
-                       tmp_page = alloc_pgtable_page();
+                       tmp_page = alloc_pgtable_page(domain->nid);
 
                        if (!tmp_page)
                                return NULL;
@@ -868,7 +873,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
        struct root_entry *root;
        unsigned long flags;
 
-       root = (struct root_entry *)alloc_pgtable_page();
+       root = (struct root_entry *)alloc_pgtable_page(iommu->node);
        if (!root)
                return -ENOMEM;
 
@@ -1263,6 +1268,7 @@ static struct dmar_domain *alloc_domain(void)
        if (!domain)
                return NULL;
 
+       domain->nid = -1;
        memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
        domain->flags = 0;
 
@@ -1420,9 +1426,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
                domain->iommu_snooping = 0;
 
        domain->iommu_count = 1;
+       domain->nid = iommu->node;
 
        /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
        if (!domain->pgd)
                return -ENOMEM;
        __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@ -1577,6 +1584,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
        spin_lock_irqsave(&domain->iommu_lock, flags);
        if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
                domain->iommu_count++;
+               if (domain->iommu_count == 1)
+                       domain->nid = iommu->node;
                domain_update_iommu_cap(domain);
        }
        spin_unlock_irqrestore(&domain->iommu_lock, flags);
@@ -3455,6 +3464,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
                return NULL;
 
        domain->id = vm_domid++;
+       domain->nid = -1;
        memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
        domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
 
@@ -3481,9 +3491,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
        domain->iommu_coherency = 0;
        domain->iommu_snooping = 0;
        domain->max_addr = 0;
+       domain->nid = -1;
 
        /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
        if (!domain->pgd)
                return -ENOMEM;
        domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
index 0ed78a764ded2e10ede6f3f109ab481bca73be42..fccf0e2fcba330f8959ace4b3b1c9e99ff71c0c1 100644 (file)
@@ -548,7 +548,8 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
        if (!iommu->ir_table)
                return -ENOMEM;
 
-       pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
+       pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+                                INTR_REMAP_PAGE_ORDER);
 
        if (!pages) {
                printk(KERN_ERR "failed to allocate pages of order %d\n",
index 4f0a72a9740cfa86220e3a22816445e36a986705..9310c699a37d451d2f2a24e961b7fab5545753ac 100644 (file)
@@ -332,6 +332,7 @@ struct intel_iommu {
 #ifdef CONFIG_INTR_REMAP
        struct ir_table *ir_table;      /* Interrupt remapping info */
 #endif
+       int             node;
 };
 
 static inline void __iommu_flush_cache(