Merge git://git.infradead.org/iommu-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2009 18:11:38 +0000 (10:11 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2009 18:11:38 +0000 (10:11 -0800)
* git://git.infradead.org/iommu-2.6:
  implement early_io{re,un}map for ia64
  Revert "Intel IOMMU: Avoid memory allocation failures in dma map api calls"
  intel-iommu: ignore page table validation in pass through mode
  intel-iommu: Fix oops with intel_iommu=igfx_off
  intel-iommu: Check for an RMRR which ends before it starts.
  intel-iommu: Apply BIOS sanity checks for interrupt remapping too.
  intel-iommu: Detect DMAR in hyperspace at probe time.
  dmar: Fix build failure without NUMA, warn on bogus RHSA tables and don't abort
  iommu: Allocate dma-remapping structures using numa locality info
  intr_remap: Allocate intr-remapping table using numa locality info
  dmar: Allocate queued invalidation structure using numa locality info
  dmar: support for parsing Remapping Hardware Static Affinity structure

1  2 
drivers/pci/dmar.c
drivers/pci/intel-iommu.c
drivers/pci/intr_remapping.c

diff --combined drivers/pci/dmar.c
index 6cdc931f7c1773e6fb59b2c8bbac85df950a9f86,beeaef84e151b9fe68c4f48de062a3faa24f82f8..83aae47475940b383fe89c5fd3cf30c7c7cb3ec4
@@@ -320,7 -320,7 +320,7 @@@ found
        for (bus = dev->bus; bus; bus = bus->parent) {
                struct pci_dev *bridge = bus->self;
  
 -              if (!bridge || !bridge->is_pcie ||
 +              if (!bridge || !pci_is_pcie(bridge) ||
                    bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
                        return 0;
  
  }
  #endif
  
+ #ifdef CONFIG_ACPI_NUMA
+ static int __init
+ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+ {
+       struct acpi_dmar_rhsa *rhsa;
+       struct dmar_drhd_unit *drhd;
+       rhsa = (struct acpi_dmar_rhsa *)header;
+       for_each_drhd_unit(drhd) {
+               if (drhd->reg_base_addr == rhsa->base_address) {
+                       int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
+                       if (!node_online(node))
+                               node = -1;
+                       drhd->iommu->node = node;
+                       return 0;
+               }
+       }
+       WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
+            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+            drhd->reg_base_addr,
+            dmi_get_system_info(DMI_BIOS_VENDOR),
+            dmi_get_system_info(DMI_BIOS_VERSION),
+            dmi_get_system_info(DMI_PRODUCT_VERSION));
+       return 0;
+ }
+ #endif
  static void __init
  dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
  {
@@@ -458,7 -487,9 +487,9 @@@ parse_dmar_table(void
  #endif
                        break;
                case ACPI_DMAR_HARDWARE_AFFINITY:
-                       /* We don't do anything with RHSA (yet?) */
+ #ifdef CONFIG_ACPI_NUMA
+                       ret = dmar_parse_one_rhsa(entry_header);
+ #endif
                        break;
                default:
                        printk(KERN_WARNING PREFIX
@@@ -582,6 -613,8 +613,8 @@@ int __init dmar_table_init(void
        return 0;
  }
  
+ static int bios_warned;
  int __init check_zero_address(void)
  {
        struct acpi_table_dmar *dmar;
                }
  
                if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
+                       void __iomem *addr;
+                       u64 cap, ecap;
                        drhd = (void *)entry_header;
                        if (!drhd->address) {
                                /* Promote an attitude of violence to a BIOS engineer today */
                                     dmi_get_system_info(DMI_BIOS_VENDOR),
                                     dmi_get_system_info(DMI_BIOS_VERSION),
                                     dmi_get_system_info(DMI_PRODUCT_VERSION));
- #ifdef CONFIG_DMAR
-                               dmar_disabled = 1;
- #endif
-                               return 0;
+                               bios_warned = 1;
+                               goto failed;
+                       }
+                       addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+                       if (!addr ) {
+                               printk("IOMMU: can't validate: %llx\n", drhd->address);
+                               goto failed;
+                       }
+                       cap = dmar_readq(addr + DMAR_CAP_REG);
+                       ecap = dmar_readq(addr + DMAR_ECAP_REG);
+                       early_iounmap(addr, VTD_PAGE_SIZE);
+                       if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
+                               /* Promote an attitude of violence to a BIOS engineer today */
+                               WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+                                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                                     drhd->address,
+                                     dmi_get_system_info(DMI_BIOS_VENDOR),
+                                     dmi_get_system_info(DMI_BIOS_VERSION),
+                                     dmi_get_system_info(DMI_PRODUCT_VERSION));
+                               bios_warned = 1;
+                               goto failed;
                        }
-                       break;
                }
  
                entry_header = ((void *)entry_header + entry_header->length);
        }
        return 1;
+ failed:
+ #ifdef CONFIG_DMAR
+       dmar_disabled = 1;
+ #endif
+       return 0;
  }
  
  void __init detect_intel_iommu(void)
                               "x2apic and Intr-remapping.\n");
  #endif
  #ifdef CONFIG_DMAR
 -              if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
 +              if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
                        iommu_detected = 1;
 +                      /* Make sure ACS will be enabled */
 +                      pci_request_acs();
 +              }
  #endif
  #ifdef CONFIG_X86
                if (ret)
@@@ -670,6 -726,18 +729,18 @@@ int alloc_iommu(struct dmar_drhd_unit *
        int agaw = 0;
        int msagaw = 0;
  
+       if (!drhd->reg_base_addr) {
+               if (!bios_warned) {
+                       WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
+                            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                            dmi_get_system_info(DMI_BIOS_VENDOR),
+                            dmi_get_system_info(DMI_BIOS_VERSION),
+                            dmi_get_system_info(DMI_PRODUCT_VERSION));
+                       bios_warned = 1;
+               }
+               return -EINVAL;
+       }
        iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
        if (!iommu)
                return -ENOMEM;
        iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
  
        if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
-               /* Promote an attitude of violence to a BIOS engineer today */
-               WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
-                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                    drhd->reg_base_addr,
-                    dmi_get_system_info(DMI_BIOS_VENDOR),
-                    dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               if (!bios_warned) {
+                       /* Promote an attitude of violence to a BIOS engineer today */
+                       WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+                            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                            drhd->reg_base_addr,
+                            dmi_get_system_info(DMI_BIOS_VENDOR),
+                            dmi_get_system_info(DMI_BIOS_VERSION),
+                            dmi_get_system_info(DMI_PRODUCT_VERSION));
+                       bios_warned = 1;
+               }
                goto err_unmap;
        }
  
        iommu->agaw = agaw;
        iommu->msagaw = msagaw;
  
+       iommu->node = -1;
        /* the registers might be more than one page */
        map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
                cap_max_fault_reg_offset(iommu->cap));
@@@ -1056,6 -1129,7 +1132,7 @@@ static void __dmar_enable_qi(struct int
  int dmar_enable_qi(struct intel_iommu *iommu)
  {
        struct q_inval *qi;
+       struct page *desc_page;
  
        if (!ecap_qis(iommu->ecap))
                return -ENOENT;
  
        qi = iommu->qi;
  
-       qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
-       if (!qi->desc) {
+       desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
+       if (!desc_page) {
                kfree(qi);
                iommu->qi = 0;
                return -ENOMEM;
        }
  
+       qi->desc = page_address(desc_page);
        qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
        if (!qi->desc_status) {
                free_page((unsigned long) qi->desc);
index 8d6159426311bda6a5bcab491b282e5427d6eff2,4e1dd40f18e3b2289a79631ef156cac51ce4f671..e56f9bed6f2b29d74c1e8e0a910644bc31d5a688
@@@ -277,6 -277,7 +277,7 @@@ static int hw_pass_through = 1
  
  struct dmar_domain {
        int     id;                     /* domain id */
+       int     nid;                    /* node id */
        unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
  
        struct list_head devices;       /* all devices' list */
@@@ -386,30 -387,14 +387,14 @@@ static struct kmem_cache *iommu_domain_
  static struct kmem_cache *iommu_devinfo_cache;
  static struct kmem_cache *iommu_iova_cache;
  
- static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
+ static inline void *alloc_pgtable_page(int node)
  {
-       unsigned int flags;
-       void *vaddr;
-       /* trying to avoid low memory issues */
-       flags = current->flags & PF_MEMALLOC;
-       current->flags |= PF_MEMALLOC;
-       vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
-       current->flags &= (~PF_MEMALLOC | flags);
-       return vaddr;
- }
+       struct page *page;
+       void *vaddr = NULL;
  
- static inline void *alloc_pgtable_page(void)
- {
-       unsigned int flags;
-       void *vaddr;
-       /* trying to avoid low memory issues */
-       flags = current->flags & PF_MEMALLOC;
-       current->flags |= PF_MEMALLOC;
-       vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
-       current->flags &= (~PF_MEMALLOC | flags);
+       page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+       if (page)
+               vaddr = page_address(page);
        return vaddr;
  }
  
@@@ -420,7 -405,7 +405,7 @@@ static inline void free_pgtable_page(vo
  
  static inline void *alloc_domain_mem(void)
  {
-       return iommu_kmem_cache_alloc(iommu_domain_cache);
+       return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
  }
  
  static void free_domain_mem(void *vaddr)
  
  static inline void * alloc_devinfo_mem(void)
  {
-       return iommu_kmem_cache_alloc(iommu_devinfo_cache);
+       return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
  }
  
  static inline void free_devinfo_mem(void *vaddr)
  
  struct iova *alloc_iova_mem(void)
  {
-       return iommu_kmem_cache_alloc(iommu_iova_cache);
+       return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
  }
  
  void free_iova_mem(struct iova *iova)
@@@ -589,7 -574,8 +574,8 @@@ static struct context_entry * device_to
        root = &iommu->root_entry[bus];
        context = get_context_addr_from_root(root);
        if (!context) {
-               context = (struct context_entry *)alloc_pgtable_page();
+               context = (struct context_entry *)
+                               alloc_pgtable_page(iommu->node);
                if (!context) {
                        spin_unlock_irqrestore(&iommu->lock, flags);
                        return NULL;
@@@ -732,7 -718,7 +718,7 @@@ static struct dma_pte *pfn_to_dma_pte(s
                if (!dma_pte_present(pte)) {
                        uint64_t pteval;
  
-                       tmp_page = alloc_pgtable_page();
+                       tmp_page = alloc_pgtable_page(domain->nid);
  
                        if (!tmp_page)
                                return NULL;
@@@ -868,7 -854,7 +854,7 @@@ static int iommu_alloc_root_entry(struc
        struct root_entry *root;
        unsigned long flags;
  
-       root = (struct root_entry *)alloc_pgtable_page();
+       root = (struct root_entry *)alloc_pgtable_page(iommu->node);
        if (!root)
                return -ENOMEM;
  
@@@ -1263,6 -1249,7 +1249,7 @@@ static struct dmar_domain *alloc_domain
        if (!domain)
                return NULL;
  
+       domain->nid = -1;
        memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
        domain->flags = 0;
  
@@@ -1420,9 -1407,10 +1407,10 @@@ static int domain_init(struct dmar_doma
                domain->iommu_snooping = 0;
  
        domain->iommu_count = 1;
+       domain->nid = iommu->node;
  
        /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
        if (!domain->pgd)
                return -ENOMEM;
        __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@@ -1523,12 -1511,15 +1511,15 @@@ static int domain_context_mapping_one(s
  
                /* Skip top levels of page tables for
                 * iommu which has less agaw than default.
+                * Unnecessary for PT mode.
                 */
-               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
-                       pgd = phys_to_virt(dma_pte_addr(pgd));
-                       if (!dma_pte_present(pgd)) {
-                               spin_unlock_irqrestore(&iommu->lock, flags);
-                               return -ENOMEM;
+               if (translation != CONTEXT_TT_PASS_THROUGH) {
+                       for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                               pgd = phys_to_virt(dma_pte_addr(pgd));
+                               if (!dma_pte_present(pgd)) {
+                                       spin_unlock_irqrestore(&iommu->lock, flags);
+                                       return -ENOMEM;
+                               }
                        }
                }
        }
        spin_lock_irqsave(&domain->iommu_lock, flags);
        if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
                domain->iommu_count++;
+               if (domain->iommu_count == 1)
+                       domain->nid = iommu->node;
                domain_update_iommu_cap(domain);
        }
        spin_unlock_irqrestore(&domain->iommu_lock, flags);
@@@ -1611,7 -1604,7 +1604,7 @@@ domain_context_mapping(struct dmar_doma
                        return ret;
                parent = parent->bus->self;
        }
 -      if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
 +      if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
                return domain_context_mapping_one(domain,
                                        pci_domain_nr(tmp->subordinate),
                                        tmp->subordinate->number, 0,
@@@ -1651,7 -1644,7 +1644,7 @@@ static int domain_context_mapped(struc
                        return ret;
                parent = parent->bus->self;
        }
 -      if (tmp->is_pcie)
 +      if (pci_is_pcie(tmp))
                return device_context_mapped(iommu, tmp->subordinate->number,
                                             0);
        else
@@@ -1821,7 -1814,7 +1814,7 @@@ static struct dmar_domain *get_domain_f
  
        dev_tmp = pci_find_upstream_pcie_bridge(pdev);
        if (dev_tmp) {
 -              if (dev_tmp->is_pcie) {
 +              if (pci_is_pcie(dev_tmp)) {
                        bus = dev_tmp->subordinate->number;
                        devfn = 0;
                } else {
@@@ -1991,6 -1984,16 +1984,16 @@@ static int iommu_prepare_identity_map(s
               "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
               pci_name(pdev), start, end);
        
+       if (end < start) {
+               WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
+                       "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                       dmi_get_system_info(DMI_BIOS_VENDOR),
+                       dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               ret = -EIO;
+               goto error;
+       }
        if (end >> agaw_to_width(domain->agaw)) {
                WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
                     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@@ -2182,7 -2185,7 +2185,7 @@@ static int iommu_should_identity_map(st
         * the 1:1 domain, just in _case_ one of their siblings turns out
         * not to be able to map all of memory.
         */
 -      if (!pdev->is_pcie) {
 +      if (!pci_is_pcie(pdev)) {
                if (!pci_is_root_bus(pdev->bus))
                        return 0;
                if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
@@@ -3228,6 -3231,9 +3231,9 @@@ static int device_notifier(struct notif
        struct pci_dev *pdev = to_pci_dev(dev);
        struct dmar_domain *domain;
  
+       if (iommu_no_mapping(dev))
+               return 0;
        domain = find_domain(pdev);
        if (!domain)
                return 0;
@@@ -3319,7 -3325,7 +3325,7 @@@ static void iommu_detach_dependent_devi
                                         parent->devfn);
                        parent = parent->bus->self;
                }
 -              if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
 +              if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
                        iommu_detach_dev(iommu,
                                tmp->subordinate->number, 0);
                else /* this is a legacy PCI bridge */
@@@ -3455,6 -3461,7 +3461,7 @@@ static struct dmar_domain *iommu_alloc_
                return NULL;
  
        domain->id = vm_domid++;
+       domain->nid = -1;
        memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
        domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
  
@@@ -3481,9 -3488,10 +3488,10 @@@ static int md_domain_init(struct dmar_d
        domain->iommu_coherency = 0;
        domain->iommu_snooping = 0;
        domain->max_addr = 0;
+       domain->nid = -1;
  
        /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
        if (!domain->pgd)
                return -ENOMEM;
        domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
index 1487bf2be863115a9f858b9ed0886945d012dae1,fccf0e2fcba330f8959ace4b3b1c9e99ff71c0c1..8b65a489581b07aa88349a1f37285f03efec6b2b
@@@ -2,7 -2,6 +2,7 @@@
  #include <linux/dmar.h>
  #include <linux/spinlock.h>
  #include <linux/jiffies.h>
 +#include <linux/hpet.h>
  #include <linux/pci.h>
  #include <linux/irq.h>
  #include <asm/io_apic.h>
@@@ -15,8 -14,7 +15,8 @@@
  #include "pci.h"
  
  static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
 -static int ir_ioapic_num;
 +static struct hpet_scope ir_hpet[MAX_HPET_TBS];
 +static int ir_ioapic_num, ir_hpet_num;
  int intr_remapping_enabled;
  
  static int disable_intremap;
@@@ -345,16 -343,6 +345,16 @@@ int flush_irte(int irq
        return rc;
  }
  
 +struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
 +{
 +      int i;
 +
 +      for (i = 0; i < MAX_HPET_TBS; i++)
 +              if (ir_hpet[i].id == hpet_id)
 +                      return ir_hpet[i].iommu;
 +      return NULL;
 +}
 +
  struct intel_iommu *map_ioapic_to_ir(int apic)
  {
        int i;
@@@ -482,36 -470,6 +482,36 @@@ int set_ioapic_sid(struct irte *irte, i
        return 0;
  }
  
 +int set_hpet_sid(struct irte *irte, u8 id)
 +{
 +      int i;
 +      u16 sid = 0;
 +
 +      if (!irte)
 +              return -1;
 +
 +      for (i = 0; i < MAX_HPET_TBS; i++) {
 +              if (ir_hpet[i].id == id) {
 +                      sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
 +                      break;
 +              }
 +      }
 +
 +      if (sid == 0) {
 +              pr_warning("Failed to set source-id of HPET block (%d)\n", id);
 +              return -1;
 +      }
 +
 +      /*
 +       * Should really use SQ_ALL_16. Some platforms are broken.
 +       * While we figure out the right quirks for these broken platforms, use
 +       * SQ_13_IGNORE_3 for now.
 +       */
 +      set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
 +
 +      return 0;
 +}
 +
  int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  {
        struct pci_dev *bridge;
                return -1;
  
        /* PCIe device or Root Complex integrated PCI device */
 -      if (dev->is_pcie || !dev->bus->parent) {
 +      if (pci_is_pcie(dev) || !dev->bus->parent) {
                set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
                             (dev->bus->number << 8) | dev->devfn);
                return 0;
  
        bridge = pci_find_upstream_pcie_bridge(dev);
        if (bridge) {
 -              if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
 +              if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */
                        set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
                                (bridge->bus->number << 8) | dev->bus->number);
                else /* this is a legacy PCI bridge */
@@@ -590,7 -548,8 +590,8 @@@ static int setup_intr_remapping(struct 
        if (!iommu->ir_table)
                return -ENOMEM;
  
-       pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
+       pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+                                INTR_REMAP_PAGE_ORDER);
  
        if (!pages) {
                printk(KERN_ERR "failed to allocate pages of order %d\n",
@@@ -753,34 -712,6 +754,34 @@@ error
        return -1;
  }
  
 +static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
 +                                    struct intel_iommu *iommu)
 +{
 +      struct acpi_dmar_pci_path *path;
 +      u8 bus;
 +      int count;
 +
 +      bus = scope->bus;
 +      path = (struct acpi_dmar_pci_path *)(scope + 1);
 +      count = (scope->length - sizeof(struct acpi_dmar_device_scope))
 +              / sizeof(struct acpi_dmar_pci_path);
 +
 +      while (--count > 0) {
 +              /*
 +               * Access PCI directly due to the PCI
 +               * subsystem isn't initialized yet.
 +               */
 +              bus = read_pci_config_byte(bus, path->dev, path->fn,
 +                                         PCI_SECONDARY_BUS);
 +              path++;
 +      }
 +      ir_hpet[ir_hpet_num].bus   = bus;
 +      ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
 +      ir_hpet[ir_hpet_num].iommu = iommu;
 +      ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
 +      ir_hpet_num++;
 +}
 +
  static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
                                      struct intel_iommu *iommu)
  {
        ir_ioapic_num++;
  }
  
 -static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
 -                               struct intel_iommu *iommu)
 +static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
 +                                    struct intel_iommu *iommu)
  {
        struct acpi_dmar_hardware_unit *drhd;
        struct acpi_dmar_device_scope *scope;
                               drhd->address);
  
                        ir_parse_one_ioapic_scope(scope, iommu);
 +              } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
 +                      if (ir_hpet_num == MAX_HPET_TBS) {
 +                              printk(KERN_WARNING "Exceeded Max HPET blocks\n");
 +                              return -1;
 +                      }
 +
 +                      printk(KERN_INFO "HPET id %d under DRHD base"
 +                             " 0x%Lx\n", scope->enumeration_id,
 +                             drhd->address);
 +
 +                      ir_parse_one_hpet_scope(scope, iommu);
                }
                start += scope->length;
        }
@@@ -866,7 -786,7 +867,7 @@@ int __init parse_ioapics_under_ir(void
                struct intel_iommu *iommu = drhd->iommu;
  
                if (ecap_ir_support(iommu->ecap)) {
 -                      if (ir_parse_ioapic_scope(drhd->hdr, iommu))
 +                      if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
                                return -1;
  
                        ir_supported = 1;