Merge branches 'upstream/xenfs' and 'upstream/core' of git://git.kernel.org/pub/scm...
Linus Torvalds [Wed, 27 Oct 2010 01:20:19 +0000 (18:20 -0700)]
* 'upstream/xenfs' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen:
  xen/privcmd: make privcmd visible in domU
  xen/privcmd: move remap_domain_mfn_range() to core xen code and export.
  privcmd: MMAPBATCH: Fix error handling/reporting
  xenbus: export xen_store_interface for xenfs
  xen/privcmd: make sure vma is ours before doing anything to it
  xen/privcmd: print SIGBUS faults
  xen/xenfs: set_page_dirty is supposed to return true if it dirties
  xen/privcmd: create address space to allow writable mmaps
  xen: add privcmd driver
  xen: add variable hypercall caller
  xen: add xen_set_domain_pte()
  xen: add /proc/xen/xsd_{kva,port} to xenfs

* 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen: (29 commits)
  xen: include xen/xen.h for definition of xen_initial_domain()
  xen: use host E820 map for dom0
  xen: correctly rebuild mfn list list after migration.
  xen: improvements to VIRQ_DEBUG output
  xen: set up IRQ before binding virq to evtchn
  xen: ensure that all event channels start off bound to VCPU 0
  xen/hvc: only notify if we actually sent something
  xen: don't add extra_pages for RAM after mem_end
  xen: add support for PAT
  xen: make sure xen_max_p2m_pfn is up to date
  xen: limit extra memory to a certain ratio of base
  xen: add extra pages for E820 RAM regions, even if beyond mem_end
  xen: make sure xen_extra_mem_start is beyond all non-RAM e820
  xen: implement "extra" memory to reserve space for pages not present at boot
  xen: Use host-provided E820 map
  xen: don't map missing memory
  xen: defer building p2m mfn structures until kernel is mapped
  xen: add return value to set_phys_to_machine()
  xen: convert p2m to a 3 level tree
  xen: make install_p2mtop_page() static
  ...

Fix up trivial conflict in arch/x86/xen/mmu.c, and fix the use of
'reserve_early()' - in the new memblock world order it is now
'memblock_x86_reserve_range()' instead. Pointed out by Jeremy.

20 files changed:
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/asm/xen/page.h
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/mmu.h
arch/x86/xen/setup.c
arch/x86/xen/xen-ops.h
drivers/char/hvc_xen.c
drivers/xen/events.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenfs/Makefile
drivers/xen/xenfs/privcmd.c [new file with mode: 0644]
drivers/xen/xenfs/super.c
drivers/xen/xenfs/xenfs.h
drivers/xen/xenfs/xenstored.c [new file with mode: 0644]
include/xen/Kbuild
include/xen/interface/memory.h
include/xen/privcmd.h [new file with mode: 0644]
include/xen/xen-ops.h

index 7fda040..a3c28ae 100644 (file)
@@ -200,6 +200,23 @@ extern struct { char _entry[32]; } hypercall_page[];
        (type)__res;                                                    \
 })
 
+static inline long
+privcmd_call(unsigned call,
+            unsigned long a1, unsigned long a2,
+            unsigned long a3, unsigned long a4,
+            unsigned long a5)
+{
+       __HYPERCALL_DECLS;
+       __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+
+       asm volatile("call *%[call]"
+                    : __HYPERCALL_5PARAM
+                    : [call] "a" (&hypercall_page[call])
+                    : __HYPERCALL_CLOBBER5);
+
+       return (long)__res;
+}
+
 static inline int
 HYPERVISOR_set_trap_table(struct trap_info *table)
 {
index bf5f7d3..dd8c141 100644 (file)
@@ -37,14 +37,21 @@ typedef struct xpaddr {
 
 
 extern unsigned long get_phys_to_machine(unsigned long pfn);
-extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
 {
+       unsigned long mfn;
+
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return pfn;
 
-       return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
+       mfn = get_phys_to_machine(pfn);
+
+       if (mfn != INVALID_P2M_ENTRY)
+               mfn &= ~FOREIGN_FRAME_BIT;
+
+       return mfn;
 }
 
 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -159,6 +166,7 @@ static inline pte_t __pte_ma(pteval_t x)
 
 #define pgd_val_ma(x)  ((x).pgd)
 
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
 
 xmaddr_t arbitrary_virt_to_machine(void *address);
 unsigned long arbitrary_virt_to_mfn(void *vaddr);
index 68128a1..90a7f5a 100644 (file)
@@ -19,15 +19,12 @@ config XEN_PVHVM
        depends on X86_LOCAL_APIC
 
 config XEN_MAX_DOMAIN_MEMORY
-       int "Maximum allowed size of a domain in gigabytes"
-       default 8 if X86_32
-       default 32 if X86_64
+       int
+       default 128
        depends on XEN
        help
-         The pseudo-physical to machine address array is sized
-         according to the maximum possible memory size of a Xen
-         domain.  This array uses 1 page per gigabyte, so there's no
-         need to be too stingy here.
+         This only affects the sizing of some bss arrays, the unused
+         portions of which are freed.
 
 config XEN_SAVE_RESTORE
        bool
index 63b83ce..44ab12d 100644 (file)
@@ -136,9 +136,6 @@ static void xen_vcpu_setup(int cpu)
        info.mfn = arbitrary_virt_to_mfn(vcpup);
        info.offset = offset_in_page(vcpup);
 
-       printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
-              cpu, vcpup, info.mfn, info.offset);
-
        /* Check to see if the hypervisor will put the vcpu_info
           structure where we want it, which allows direct access via
           a percpu-variable. */
@@ -152,9 +149,6 @@ static void xen_vcpu_setup(int cpu)
                /* This cpu is using the registered vcpu info, even if
                   later ones fail to. */
                per_cpu(xen_vcpu, cpu) = vcpup;
-
-               printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
-                      cpu, vcpup);
        }
 }
 
@@ -836,6 +830,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
                   Xen console noise. */
                break;
 
+       case MSR_IA32_CR_PAT:
+               if (smp_processor_id() == 0)
+                       xen_set_pat(((u64)high << 32) | low);
+               break;
+
        default:
                ret = native_write_msr_safe(msr, low, high);
        }
@@ -874,8 +873,6 @@ void xen_setup_vcpu_info_placement(void)
        /* xen_vcpu_setup managed to place the vcpu_info within the
           percpu area for all cpus, so make use of it */
        if (have_vcpu_info_placement) {
-               printk(KERN_INFO "Xen: using vcpu_info placement\n");
-
                pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
                pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
                pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
@@ -1189,6 +1186,9 @@ asmlinkage void __init xen_start_kernel(void)
        xen_raw_console_write("mapping kernel into physical memory\n");
        pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
 
+       /* Allocate and initialize top and mid mfn levels for p2m structure */
+       xen_build_mfn_list_list();
+
        init_mm.pgd = pgd;
 
        /* keep using Xen gdt for now; no urgent need to change it */
index f72d18c..9631c90 100644 (file)
@@ -57,6 +57,7 @@
 #include <asm/linkage.h>
 #include <asm/page.h>
 #include <asm/init.h>
+#include <asm/pat.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -140,7 +141,8 @@ static inline void check_zero(void)
  * large enough to allocate page table pages to allocate the rest.
  * Each page can map 2MB.
  */
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+#define LEVEL1_IDENT_ENTRIES   (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
 
 #ifdef CONFIG_X86_64
 /* l3 pud for userspace vsyscall mapping */
@@ -171,49 +173,182 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);   /* actual vcpu cr3 */
  */
 #define USER_LIMIT     ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
 
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ *                               Xen
+ *                                |
+ *     p2m_top              p2m_top_mfn
+ *       /  \                   /   \
+ * p2m_mid p2m_mid     p2m_mid_mfn p2m_mid_mfn
+ *    / \      / \         /           /
+ *  p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively. 
+ */
+
+unsigned long xen_max_p2m_pfn __read_mostly;
 
-#define P2M_ENTRIES_PER_PAGE   (PAGE_SIZE / sizeof(unsigned long))
-#define TOP_ENTRIES            (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
+#define P2M_PER_PAGE           (PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long **))
 
-/* Placeholder for holes in the address space */
-static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
-               { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+#define MAX_P2M_PFN            (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 
- /* Array of pointers to pages containing p2m entries */
-static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
-               { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
 
-/* Arrays of p2m arrays expressed in mfns used for save/restore */
-static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
 
-static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
-       __page_aligned_bss;
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
-       BUG_ON(pfn >= MAX_DOMAIN_PAGES);
-       return pfn / P2M_ENTRIES_PER_PAGE;
+       BUG_ON(pfn >= MAX_P2M_PFN);
+       return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+       return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
 }
 
 static inline unsigned p2m_index(unsigned long pfn)
 {
-       return pfn % P2M_ENTRIES_PER_PAGE;
+       return pfn % P2M_PER_PAGE;
+}
+
+static void p2m_top_init(unsigned long ***top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_top_mfn_p_init(unsigned long **top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = p2m_mid_missing_mfn;
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = p2m_missing;
+}
+
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = virt_to_mfn(p2m_missing);
 }
 
-/* Build the parallel p2m_top_mfn structures */
+static void p2m_init(unsigned long *p2m)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               p2m[i] = INVALID_P2M_ENTRY;
+}
+
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ *   to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ *   tree should alreay be completely allocated.
+ */
 void xen_build_mfn_list_list(void)
 {
-       unsigned pfn, idx;
+       unsigned long pfn;
 
-       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
-               unsigned topidx = p2m_top_index(pfn);
+       /* Pre-initialize p2m_top_mfn to be completely missing */
+       if (p2m_top_mfn == NULL) {
+               p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_mid_mfn_init(p2m_mid_missing_mfn);
+
+               p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_top_mfn_p_init(p2m_top_mfn_p);
 
-               p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
+               p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_top_mfn_init(p2m_top_mfn);
+       } else {
+               /* Reinitialise, mfn's all change after migration */
+               p2m_mid_mfn_init(p2m_mid_missing_mfn);
        }
 
-       for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
-               unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
-               p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
+       for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
+               unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
+               unsigned long **mid;
+               unsigned long *mid_mfn_p;
+
+               mid = p2m_top[topidx];
+               mid_mfn_p = p2m_top_mfn_p[topidx];
+
+               /* Don't bother allocating any mfn mid levels if
+                * they're just missing, just update the stored mfn,
+                * since all could have changed over a migrate.
+                */
+               if (mid == p2m_mid_missing) {
+                       BUG_ON(mididx);
+                       BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+                       p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+                       pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
+                       continue;
+               }
+
+               if (mid_mfn_p == p2m_mid_missing_mfn) {
+                       /*
+                        * XXX boot-time only!  We should never find
+                        * missing parts of the mfn tree after
+                        * runtime.  extend_brk() will BUG if we call
+                        * it too late.
+                        */
+                       mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_mfn_init(mid_mfn_p);
+
+                       p2m_top_mfn_p[topidx] = mid_mfn_p;
+               }
+
+               p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+               mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
        }
 }
 
@@ -222,8 +357,8 @@ void xen_setup_mfn_list_list(void)
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-               virt_to_mfn(p2m_top_mfn_list);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
+               virt_to_mfn(p2m_top_mfn);
+       HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
 }
 
 /* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -231,98 +366,176 @@ void __init xen_build_dynamic_phys_to_machine(void)
 {
        unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
-       unsigned pfn;
+       unsigned long pfn;
+
+       xen_max_p2m_pfn = max_pfn;
 
-       for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+       p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_init(p2m_missing);
+
+       p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_mid_init(p2m_mid_missing);
+
+       p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_top_init(p2m_top);
+
+       /*
+        * The domain builder gives us a pre-constructed p2m array in
+        * mfn_list for all the pages initially given to us, so we just
+        * need to graft that into our tree structure.
+        */
+       for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
 
-               p2m_top[topidx] = &mfn_list[pfn];
-       }
+               if (p2m_top[topidx] == p2m_mid_missing) {
+                       unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_init(mid);
+
+                       p2m_top[topidx] = mid;
+               }
 
-       xen_build_mfn_list_list();
+               p2m_top[topidx][mididx] = &mfn_list[pfn];
+       }
 }
 
 unsigned long get_phys_to_machine(unsigned long pfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES))
+       if (unlikely(pfn >= MAX_P2M_PFN))
                return INVALID_P2M_ENTRY;
 
        topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       return p2m_top[topidx][idx];
+
+       return p2m_top[topidx][mididx][idx];
 }
 EXPORT_SYMBOL_GPL(get_phys_to_machine);
 
-/* install a  new p2m_top page */
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
+static void *alloc_p2m_page(void)
 {
-       unsigned topidx = p2m_top_index(pfn);
-       unsigned long **pfnp, *mfnp;
-       unsigned i;
+       return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
 
-       pfnp = &p2m_top[topidx];
-       mfnp = &p2m_top_mfn[topidx];
+static void free_p2m_page(void *p)
+{
+       free_page((unsigned long)p);
+}
 
-       for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
-               p[i] = INVALID_P2M_ENTRY;
+/* 
+ * Fully allocate the p2m structure for a given pfn.  We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync.  We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+       unsigned topidx, mididx;
+       unsigned long ***top_p, **mid;
+       unsigned long *top_mfn_p, *mid_mfn;
 
-       if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
-               *mfnp = virt_to_mfn(p);
-               return true;
+       topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
+
+       top_p = &p2m_top[topidx];
+       mid = *top_p;
+
+       if (mid == p2m_mid_missing) {
+               /* Mid level is missing, allocate a new one */
+               mid = alloc_p2m_page();
+               if (!mid)
+                       return false;
+
+               p2m_mid_init(mid);
+
+               if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+                       free_p2m_page(mid);
        }
 
-       return false;
-}
+       top_mfn_p = &p2m_top_mfn[topidx];
+       mid_mfn = p2m_top_mfn_p[topidx];
 
-static void alloc_p2m(unsigned long pfn)
-{
-       unsigned long *p;
+       BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+
+       if (mid_mfn == p2m_mid_missing_mfn) {
+               /* Separately check the mid mfn level */
+               unsigned long missing_mfn;
+               unsigned long mid_mfn_mfn;
+
+               mid_mfn = alloc_p2m_page();
+               if (!mid_mfn)
+                       return false;
+
+               p2m_mid_mfn_init(mid_mfn);
+
+               missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+               mid_mfn_mfn = virt_to_mfn(mid_mfn);
+               if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+                       free_p2m_page(mid_mfn);
+               else
+                       p2m_top_mfn_p[topidx] = mid_mfn;
+       }
+
+       if (p2m_top[topidx][mididx] == p2m_missing) {
+               /* p2m leaf page is missing */
+               unsigned long *p2m;
+
+               p2m = alloc_p2m_page();
+               if (!p2m)
+                       return false;
 
-       p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
-       BUG_ON(p == NULL);
+               p2m_init(p2m);
+
+               if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+                       free_p2m_page(p2m);
+               else
+                       mid_mfn[mididx] = virt_to_mfn(p2m);
+       }
 
-       if (!install_p2mtop_page(pfn, p))
-               free_page((unsigned long)p);
+       return true;
 }
 
 /* Try to install p2m mapping; fail if intermediate bits missing */
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
+       if (unlikely(pfn >= MAX_P2M_PFN)) {
                BUG_ON(mfn != INVALID_P2M_ENTRY);
                return true;
        }
 
        topidx = p2m_top_index(pfn);
-       if (p2m_top[topidx] == p2m_missing) {
-               if (mfn == INVALID_P2M_ENTRY)
-                       return true;
-               return false;
-       }
-
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       p2m_top[topidx][idx] = mfn;
+
+       if (p2m_top[topidx][mididx] == p2m_missing)
+               return mfn == INVALID_P2M_ENTRY;
+
+       p2m_top[topidx][mididx][idx] = mfn;
 
        return true;
 }
 
-void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
        if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
                BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-               return;
+               return true;
        }
 
        if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
-               alloc_p2m(pfn);
+               if (!alloc_p2m(pfn))
+                       return false;
 
                if (!__set_phys_to_machine(pfn, mfn))
-                       BUG();
+                       return false;
        }
+
+       return true;
 }
 
 unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -399,7 +612,7 @@ static bool xen_iomap_pte(pte_t pte)
        return pte_flags(pte) & _PAGE_IOMAP;
 }
 
-static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
 {
        struct multicall_space mcs;
        struct mmu_update *u;
@@ -411,10 +624,16 @@ static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
        u->ptr = arbitrary_virt_to_machine(ptep).maddr;
        u->val = pte_val_ma(pteval);
 
-       MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
+       MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
 
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
+EXPORT_SYMBOL_GPL(xen_set_domain_pte);
+
+static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+{
+       xen_set_domain_pte(ptep, pteval, DOMID_IO);
+}
 
 static void xen_extend_mmu_update(const struct mmu_update *update)
 {
@@ -561,7 +780,20 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
        if (val & _PAGE_PRESENT) {
                unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
                pteval_t flags = val & PTE_FLAGS_MASK;
-               val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
+               unsigned long mfn = pfn_to_mfn(pfn);
+
+               /*
+                * If there's no mfn for the pfn, then just create an
+                * empty non-present pte.  Unfortunately this loses
+                * information about the original pfn, so
+                * pte_mfn_to_pfn is asymmetric.
+                */
+               if (unlikely(mfn == INVALID_P2M_ENTRY)) {
+                       mfn = 0;
+                       flags = 0;
+               }
+
+               val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
        }
 
        return val;
@@ -583,10 +815,18 @@ static pteval_t iomap_pte(pteval_t val)
 
 pteval_t xen_pte_val(pte_t pte)
 {
-       if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
-               return pte.pte;
+       pteval_t pteval = pte.pte;
+
+       /* If this is a WC pte, convert back from Xen WC to Linux WC */
+       if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
+               WARN_ON(!pat_enabled);
+               pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
+       }
 
-       return pte_mfn_to_pfn(pte.pte);
+       if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
+               return pteval;
+
+       return pte_mfn_to_pfn(pteval);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
@@ -596,10 +836,48 @@ pgdval_t xen_pgd_val(pgd_t pgd)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
 
+/*
+ * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
+ * are reserved for now, to correspond to the Intel-reserved PAT
+ * types.
+ *
+ * We expect Linux's PAT set as follows:
+ *
+ * Idx  PTE flags        Linux    Xen    Default
+ * 0                     WB       WB     WB
+ * 1            PWT      WC       WT     WT
+ * 2        PCD          UC-      UC-    UC-
+ * 3        PCD PWT      UC       UC     UC
+ * 4    PAT              WB       WC     WB
+ * 5    PAT     PWT      WC       WP     WT
+ * 6    PAT PCD          UC-      UC     UC-
+ * 7    PAT PCD PWT      UC       UC     UC
+ */
+
+void xen_set_pat(u64 pat)
+{
+       /* We expect Linux to use a PAT setting of
+        * UC UC- WC WB (ignoring the PAT flag) */
+       WARN_ON(pat != 0x0007010600070106ull);
+}
+
 pte_t xen_make_pte(pteval_t pte)
 {
        phys_addr_t addr = (pte & PTE_PFN_MASK);
 
+       /* If Linux is trying to set a WC pte, then map to the Xen WC.
+        * If _PAGE_PAT is set, then it probably means it is really
+        * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
+        * things work out OK...
+        *
+        * (We should never see kernel mappings with _PAGE_PSE set,
+        * but we could see hugetlbfs mappings, I think.).
+        */
+       if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
+               if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
+                       pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
+       }
+
        /*
         * Unprivileged domains are allowed to do IOMAPpings for
         * PCI passthrough, but not map ISA space.  The ISA
@@ -1712,6 +1990,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
        unsigned ident_pte;
        unsigned long pfn;
 
+       level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+                                     PAGE_SIZE);
+
        ident_pte = 0;
        pfn = 0;
        for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
@@ -1722,7 +2003,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                        pte_page = m2v(pmd[pmdidx].pmd);
                else {
                        /* Check for free pte pages */
-                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                       if (ident_pte == LEVEL1_IDENT_ENTRIES)
                                break;
 
                        pte_page = &level1_ident_pgt[ident_pte];
@@ -1837,13 +2118,15 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
        return pgd;
 }
 #else  /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
 
 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
                                         unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
+       level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+
        max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
                                  xen_start_info->nr_pt_frames * PAGE_SIZE +
                                  512*1024);
@@ -2269,6 +2552,72 @@ void __init xen_hvm_init_mmu_ops(void)
 }
 #endif
 
+#define REMAP_BATCH_SIZE 16
+
+struct remap_data {
+       unsigned long mfn;
+       pgprot_t prot;
+       struct mmu_update *mmu_update;
+};
+
+static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+                                unsigned long addr, void *data)
+{
+       struct remap_data *rmd = data;
+       pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
+
+       rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       rmd->mmu_update->val = pte_val_ma(pte);
+       rmd->mmu_update++;
+
+       return 0;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+                              unsigned long addr,
+                              unsigned long mfn, int nr,
+                              pgprot_t prot, unsigned domid)
+{
+       struct remap_data rmd;
+       struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+       int batch;
+       unsigned long range;
+       int err = 0;
+
+       prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
+
+       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+
+       rmd.mfn = mfn;
+       rmd.prot = prot;
+
+       while (nr) {
+               batch = min(REMAP_BATCH_SIZE, nr);
+               range = (unsigned long)batch << PAGE_SHIFT;
+
+               rmd.mmu_update = mmu_update;
+               err = apply_to_page_range(vma->vm_mm, addr, range,
+                                         remap_area_mfn_pte_fn, &rmd);
+               if (err)
+                       goto out;
+
+               err = -EFAULT;
+               if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
+                       goto out;
+
+               nr -= batch;
+               addr += range;
+       }
+
+       err = 0;
+out:
+
+       flush_tlb_all();
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
 #ifdef CONFIG_XEN_DEBUG_FS
 
 static struct dentry *d_mmu_debug;
index fa938c4..537bb9a 100644 (file)
@@ -12,7 +12,6 @@ enum pt_level {
 
 
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
 
 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
 
index 9729c90..105db25 100644 (file)
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
+#include <xen/xen.h>
 #include <xen/page.h>
 #include <xen/interface/callback.h>
+#include <xen/interface/memory.h>
 #include <xen/interface/physdev.h>
 #include <xen/interface/memory.h>
 #include <xen/features.h>
@@ -34,6 +36,39 @@ extern void xen_sysenter_target(void);
 extern void xen_syscall_target(void);
 extern void xen_syscall32_target(void);
 
+/* Amount of extra memory space we add to the e820 ranges */
+phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+
+/* 
+ * The maximum amount of extra memory compared to the base size.  The
+ * main scaling factor is the size of struct page.  At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ * 
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO                (10)
+
+static __init void xen_add_extra_mem(unsigned long pages)
+{
+       u64 size = (u64)pages * PAGE_SIZE;
+       u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
+
+       if (!pages)
+               return;
+
+       e820_add_region(extra_start, size, E820_RAM);
+       sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+
+       memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+
+       xen_extra_mem_size += size;
+
+       xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+}
+
 static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
                                              phys_addr_t end_addr)
 {
@@ -105,16 +140,65 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
-
 char * __init xen_memory_setup(void)
 {
+       static struct e820entry map[E820MAX] __initdata;
+
        unsigned long max_pfn = xen_start_info->nr_pages;
+       unsigned long long mem_end;
+       int rc;
+       struct xen_memory_map memmap;
+       unsigned long extra_pages = 0;
+       unsigned long extra_limit;
+       int i;
+       int op;
 
        max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
+       mem_end = PFN_PHYS(max_pfn);
+
+       memmap.nr_entries = E820MAX;
+       set_xen_guest_handle(memmap.buffer, map);
+
+       op = xen_initial_domain() ?
+               XENMEM_machine_memory_map :
+               XENMEM_memory_map;
+       rc = HYPERVISOR_memory_op(op, &memmap);
+       if (rc == -ENOSYS) {
+               memmap.nr_entries = 1;
+               map[0].addr = 0ULL;
+               map[0].size = mem_end;
+               /* 8MB slack (to balance backend allocations). */
+               map[0].size += 8ULL << 20;
+               map[0].type = E820_RAM;
+               rc = 0;
+       }
+       BUG_ON(rc);
 
        e820.nr_map = 0;
+       xen_extra_mem_start = mem_end;
+       for (i = 0; i < memmap.nr_entries; i++) {
+               unsigned long long end = map[i].addr + map[i].size;
+
+               if (map[i].type == E820_RAM) {
+                       if (map[i].addr < mem_end && end > mem_end) {
+                               /* Truncate region to max_mem. */
+                               u64 delta = end - mem_end;
+
+                               map[i].size -= delta;
+                               extra_pages += PFN_DOWN(delta);
+
+                               end = mem_end;
+                       }
+               }
 
-       e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
+               if (end > xen_extra_mem_start)
+                       xen_extra_mem_start = end;
+
+               /* If region is non-RAM or below mem_end, add what remains */
+               if ((map[i].type != E820_RAM || map[i].addr < mem_end) &&
+                   map[i].size > 0)
+                       e820_add_region(map[i].addr, map[i].size, map[i].type);
+       }
 
        /*
         * Even though this is normal, usable memory under Xen, reserve
@@ -136,7 +220,29 @@ char * __init xen_memory_setup(void)
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
-       xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+       extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+
+       /*
+        * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+        * factor the base size.  On non-highmem systems, the base
+        * size is the full initial memory allocation; on highmem it
+        * is limited to the max size of lowmem, so that it doesn't
+        * get completely filled.
+        *
+        * In principle there could be a problem in lowmem systems if
+        * the initial memory is also very large with respect to
+        * lowmem, but we won't try to deal with that here.
+        */
+       extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+                         max_pfn + extra_pages);
+
+       if (extra_limit >= max_pfn)
+               extra_pages = extra_limit - max_pfn;
+       else
+               extra_pages = 0;
+
+       if (!xen_initial_domain())
+               xen_add_extra_mem(extra_pages);
 
        return "Xen";
 }
index 7c8ab86..6404474 100644 (file)
@@ -30,6 +30,9 @@ void xen_setup_machphys_mapping(void);
 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
 void xen_ident_map_ISA(void);
 void xen_reserve_top(void);
+extern unsigned long xen_max_p2m_pfn;
+
+void xen_set_pat(u64);
 
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
index 60446f8..6b8e6d1 100644 (file)
@@ -74,7 +74,8 @@ static int __write_console(const char *data, int len)
        wmb();                  /* write ring before updating pointer */
        intf->out_prod = prod;
 
-       notify_daemon();
+       if (sent)
+               notify_daemon();
        return sent;
 }
 
index 7d24b0d..347f17e 100644 (file)
@@ -261,7 +261,7 @@ static void init_evtchn_cpu_bindings(void)
        }
 #endif
 
-       memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
+       memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
 }
 
 static inline void clear_evtchn(int port)
@@ -377,7 +377,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                irq = find_unbound_irq();
 
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_edge_irq, "event");
+                                             handle_fasteoi_irq, "event");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_evtchn_info(evtchn);
@@ -435,6 +435,11 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
        irq = per_cpu(virq_to_irq, cpu)[virq];
 
        if (irq == -1) {
+               irq = find_unbound_irq();
+
+               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+                                             handle_percpu_irq, "virq");
+
                bind_virq.virq = virq;
                bind_virq.vcpu = cpu;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
@@ -442,11 +447,6 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
                        BUG();
                evtchn = bind_virq.port;
 
-               irq = find_unbound_irq();
-
-               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
-                                             handle_percpu_irq, "virq");
-
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_virq_info(evtchn, virq);
 
@@ -578,41 +578,75 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
 {
        struct shared_info *sh = HYPERVISOR_shared_info;
        int cpu = smp_processor_id();
+       unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
        int i;
        unsigned long flags;
        static DEFINE_SPINLOCK(debug_lock);
+       struct vcpu_info *v;
 
        spin_lock_irqsave(&debug_lock, flags);
 
-       printk("vcpu %d\n  ", cpu);
+       printk("\nvcpu %d\n  ", cpu);
 
        for_each_online_cpu(i) {
-               struct vcpu_info *v = per_cpu(xen_vcpu, i);
-               printk("%d: masked=%d pending=%d event_sel %08lx\n  ", i,
-                       (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
-                       v->evtchn_upcall_pending,
-                       v->evtchn_pending_sel);
+               int pending;
+               v = per_cpu(xen_vcpu, i);
+               pending = (get_irq_regs() && i == cpu)
+                       ? xen_irqs_disabled(get_irq_regs())
+                       : v->evtchn_upcall_mask;
+               printk("%d: masked=%d pending=%d event_sel %0*lx\n  ", i,
+                      pending, v->evtchn_upcall_pending,
+                      (int)(sizeof(v->evtchn_pending_sel)*2),
+                      v->evtchn_pending_sel);
+       }
+       v = per_cpu(xen_vcpu, cpu);
+
+       printk("\npending:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
+               printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
+                      sh->evtchn_pending[i],
+                      i % 8 == 0 ? "\n   " : " ");
+       printk("\nglobal mask:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+               printk("%0*lx%s",
+                      (int)(sizeof(sh->evtchn_mask[0])*2),
+                      sh->evtchn_mask[i],
+                      i % 8 == 0 ? "\n   " : " ");
+
+       printk("\nglobally unmasked:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+               printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+                      sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
+                      i % 8 == 0 ? "\n   " : " ");
+
+       printk("\nlocal cpu%d mask:\n   ", cpu);
+       for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
+               printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
+                      cpu_evtchn[i],
+                      i % 8 == 0 ? "\n   " : " ");
+
+       printk("\nlocally unmasked:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
+               unsigned long pending = sh->evtchn_pending[i]
+                       & ~sh->evtchn_mask[i]
+                       & cpu_evtchn[i];
+               printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+                      pending, i % 8 == 0 ? "\n   " : " ");
        }
-       printk("pending:\n   ");
-       for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
-               printk("%08lx%s", sh->evtchn_pending[i],
-                       i % 8 == 0 ? "\n   " : " ");
-       printk("\nmasks:\n   ");
-       for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
-               printk("%08lx%s", sh->evtchn_mask[i],
-                       i % 8 == 0 ? "\n   " : " ");
-
-       printk("\nunmasked:\n   ");
-       for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
-               printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
-                       i % 8 == 0 ? "\n   " : " ");
 
        printk("\npending list:\n");
-       for(i = 0; i < NR_EVENT_CHANNELS; i++) {
+       for (i = 0; i < NR_EVENT_CHANNELS; i++) {
                if (sync_test_bit(i, sh->evtchn_pending)) {
-                       printk("  %d: event %d -> irq %d\n",
+                       int word_idx = i / BITS_PER_LONG;
+                       printk("  %d: event %d -> irq %d%s%s%s\n",
                               cpu_from_evtchn(i), i,
-                              evtchn_to_irq[i]);
+                              evtchn_to_irq[i],
+                              sync_test_bit(word_idx, &v->evtchn_pending_sel)
+                                            ? "" : " l2-clear",
+                              !sync_test_bit(i, sh->evtchn_mask)
+                                            ? "" : " globally-masked",
+                              sync_test_bit(i, cpu_evtchn)
+                                            ? "" : " locally-masked");
                }
        }
 
@@ -663,6 +697,9 @@ static void __xen_evtchn_do_upcall(void)
                                int irq = evtchn_to_irq[port];
                                struct irq_desc *desc;
 
+                               mask_evtchn(port);
+                               clear_evtchn(port);
+
                                if (irq != -1) {
                                        desc = irq_to_desc(irq);
                                        if (desc)
@@ -800,10 +837,10 @@ static void ack_dynirq(unsigned int irq)
 {
        int evtchn = evtchn_from_irq(irq);
 
-       move_native_irq(irq);
+       move_masked_irq(irq);
 
        if (VALID_EVTCHN(evtchn))
-               clear_evtchn(evtchn);
+               unmask_evtchn(evtchn);
 }
 
 static int retrigger_dynirq(unsigned int irq)
@@ -959,7 +996,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
        .mask           = disable_dynirq,
        .unmask         = enable_dynirq,
 
-       .ack            = ack_dynirq,
+       .eoi            = ack_dynirq,
        .set_affinity   = set_affinity_irq,
        .retrigger      = retrigger_dynirq,
 };
index d409495..132939f 100644 (file)
 
 
 int xen_store_evtchn;
-EXPORT_SYMBOL(xen_store_evtchn);
+EXPORT_SYMBOL_GPL(xen_store_evtchn);
 
 struct xenstore_domain_interface *xen_store_interface;
+EXPORT_SYMBOL_GPL(xen_store_interface);
+
 static unsigned long xen_store_mfn;
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
index 25275c3..4fde944 100644 (file)
@@ -1,3 +1,4 @@
 obj-$(CONFIG_XENFS) += xenfs.o
 
-xenfs-objs = super.o xenbus.o
\ No newline at end of file
+xenfs-y                          = super.o xenbus.o privcmd.o
+xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
new file mode 100644 (file)
index 0000000..f80be7f
--- /dev/null
@@ -0,0 +1,404 @@
+/******************************************************************************
+ * privcmd.c
+ *
+ * Interface to privileged domain-0 commands.
+ *
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
+#include <xen/privcmd.h>
+#include <xen/interface/xen.h>
+#include <xen/features.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
+
+#ifndef HAVE_ARCH_PRIVCMD_MMAP
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
+#endif
+
+static long privcmd_ioctl_hypercall(void __user *udata)
+{
+       struct privcmd_hypercall hypercall;
+       long ret;
+
+       if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
+               return -EFAULT;
+
+       ret = privcmd_call(hypercall.op,
+                          hypercall.arg[0], hypercall.arg[1],
+                          hypercall.arg[2], hypercall.arg[3],
+                          hypercall.arg[4]);
+
+       return ret;
+}
+
+static void free_page_list(struct list_head *pages)
+{
+       struct page *p, *n;
+
+       list_for_each_entry_safe(p, n, pages, lru)
+               __free_page(p);
+
+       INIT_LIST_HEAD(pages);
+}
+
+/*
+ * Given an array of items in userspace, return a list of pages
+ * containing the data.  If copying fails, either because of memory
+ * allocation failure or a problem reading user memory, return an
+ * error code; its up to the caller to dispose of any partial list.
+ */
+static int gather_array(struct list_head *pagelist,
+                       unsigned nelem, size_t size,
+                       void __user *data)
+{
+       unsigned pageidx;
+       void *pagedata;
+       int ret;
+
+       if (size > PAGE_SIZE)
+               return 0;
+
+       pageidx = PAGE_SIZE;
+       pagedata = NULL;        /* quiet, gcc */
+       while (nelem--) {
+               if (pageidx > PAGE_SIZE-size) {
+                       struct page *page = alloc_page(GFP_KERNEL);
+
+                       ret = -ENOMEM;
+                       if (page == NULL)
+                               goto fail;
+
+                       pagedata = page_address(page);
+
+                       list_add_tail(&page->lru, pagelist);
+                       pageidx = 0;
+               }
+
+               ret = -EFAULT;
+               if (copy_from_user(pagedata + pageidx, data, size))
+                       goto fail;
+
+               data += size;
+               pageidx += size;
+       }
+
+       ret = 0;
+
+fail:
+       return ret;
+}
+
+/*
+ * Call function "fn" on each element of the array fragmented
+ * over a list of pages.
+ */
+static int traverse_pages(unsigned nelem, size_t size,
+                         struct list_head *pos,
+                         int (*fn)(void *data, void *state),
+                         void *state)
+{
+       void *pagedata;
+       unsigned pageidx;
+       int ret = 0;
+
+       BUG_ON(size > PAGE_SIZE);
+
+       pageidx = PAGE_SIZE;
+       pagedata = NULL;        /* hush, gcc */
+
+       while (nelem--) {
+               if (pageidx > PAGE_SIZE-size) {
+                       struct page *page;
+                       pos = pos->next;
+                       page = list_entry(pos, struct page, lru);
+                       pagedata = page_address(page);
+                       pageidx = 0;
+               }
+
+               ret = (*fn)(pagedata + pageidx, state);
+               if (ret)
+                       break;
+               pageidx += size;
+       }
+
+       return ret;
+}
+
+struct mmap_mfn_state {
+       unsigned long va;
+       struct vm_area_struct *vma;
+       domid_t domain;
+};
+
+static int mmap_mfn_range(void *data, void *state)
+{
+       struct privcmd_mmap_entry *msg = data;
+       struct mmap_mfn_state *st = state;
+       struct vm_area_struct *vma = st->vma;
+       int rc;
+
+       /* Do not allow range to wrap the address space. */
+       if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
+           ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
+               return -EINVAL;
+
+       /* Range chunks must be contiguous in va space. */
+       if ((msg->va != st->va) ||
+           ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
+               return -EINVAL;
+
+       rc = xen_remap_domain_mfn_range(vma,
+                                       msg->va & PAGE_MASK,
+                                       msg->mfn, msg->npages,
+                                       vma->vm_page_prot,
+                                       st->domain);
+       if (rc < 0)
+               return rc;
+
+       st->va += msg->npages << PAGE_SHIFT;
+
+       return 0;
+}
+
+static long privcmd_ioctl_mmap(void __user *udata)
+{
+       struct privcmd_mmap mmapcmd;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int rc;
+       LIST_HEAD(pagelist);
+       struct mmap_mfn_state state;
+
+       if (!xen_initial_domain())
+               return -EPERM;
+
+       if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
+               return -EFAULT;
+
+       rc = gather_array(&pagelist,
+                         mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+                         mmapcmd.entry);
+
+       if (rc || list_empty(&pagelist))
+               goto out;
+
+       down_write(&mm->mmap_sem);
+
+       {
+               struct page *page = list_first_entry(&pagelist,
+                                                    struct page, lru);
+               struct privcmd_mmap_entry *msg = page_address(page);
+
+               vma = find_vma(mm, msg->va);
+               rc = -EINVAL;
+
+               if (!vma || (msg->va != vma->vm_start) ||
+                   !privcmd_enforce_singleshot_mapping(vma))
+                       goto out_up;
+       }
+
+       state.va = vma->vm_start;
+       state.vma = vma;
+       state.domain = mmapcmd.dom;
+
+       rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+                           &pagelist,
+                           mmap_mfn_range, &state);
+
+
+out_up:
+       up_write(&mm->mmap_sem);
+
+out:
+       free_page_list(&pagelist);
+
+       return rc;
+}
+
+struct mmap_batch_state {
+       domid_t domain;
+       unsigned long va;
+       struct vm_area_struct *vma;
+       int err;
+
+       xen_pfn_t __user *user;
+};
+
+static int mmap_batch_fn(void *data, void *state)
+{
+       xen_pfn_t *mfnp = data;
+       struct mmap_batch_state *st = state;
+
+       if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
+                                      st->vma->vm_page_prot, st->domain) < 0) {
+               *mfnp |= 0xf0000000U;
+               st->err++;
+       }
+       st->va += PAGE_SIZE;
+
+       return 0;
+}
+
+static int mmap_return_errors(void *data, void *state)
+{
+       xen_pfn_t *mfnp = data;
+       struct mmap_batch_state *st = state;
+
+       put_user(*mfnp, st->user++);
+
+       return 0;
+}
+
+static struct vm_operations_struct privcmd_vm_ops;
+
+static long privcmd_ioctl_mmap_batch(void __user *udata)
+{
+       int ret;
+       struct privcmd_mmapbatch m;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long nr_pages;
+       LIST_HEAD(pagelist);
+       struct mmap_batch_state state;
+
+       if (!xen_initial_domain())
+               return -EPERM;
+
+       if (copy_from_user(&m, udata, sizeof(m)))
+               return -EFAULT;
+
+       nr_pages = m.num;
+       if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
+               return -EINVAL;
+
+       ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
+                          m.arr);
+
+       if (ret || list_empty(&pagelist))
+               goto out;
+
+       down_write(&mm->mmap_sem);
+
+       vma = find_vma(mm, m.addr);
+       ret = -EINVAL;
+       if (!vma ||
+           vma->vm_ops != &privcmd_vm_ops ||
+           (m.addr != vma->vm_start) ||
+           ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
+           !privcmd_enforce_singleshot_mapping(vma)) {
+               up_write(&mm->mmap_sem);
+               goto out;
+       }
+
+       state.domain = m.dom;
+       state.vma = vma;
+       state.va = m.addr;
+       state.err = 0;
+
+       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                            &pagelist, mmap_batch_fn, &state);
+
+       up_write(&mm->mmap_sem);
+
+       if (state.err > 0) {
+               ret = 0;
+
+               state.user = m.arr;
+               traverse_pages(m.num, sizeof(xen_pfn_t),
+                              &pagelist,
+                              mmap_return_errors, &state);
+       }
+
+out:
+       free_page_list(&pagelist);
+
+       return ret;
+}
+
+static long privcmd_ioctl(struct file *file,
+                         unsigned int cmd, unsigned long data)
+{
+       int ret = -ENOSYS;
+       void __user *udata = (void __user *) data;
+
+       switch (cmd) {
+       case IOCTL_PRIVCMD_HYPERCALL:
+               ret = privcmd_ioctl_hypercall(udata);
+               break;
+
+       case IOCTL_PRIVCMD_MMAP:
+               ret = privcmd_ioctl_mmap(udata);
+               break;
+
+       case IOCTL_PRIVCMD_MMAPBATCH:
+               ret = privcmd_ioctl_mmap_batch(udata);
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+#ifndef HAVE_ARCH_PRIVCMD_MMAP
+static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+              vma, vma->vm_start, vma->vm_end,
+              vmf->pgoff, vmf->virtual_address);
+
+       return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct privcmd_vm_ops = {
+       .fault = privcmd_fault
+};
+
+static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       /* Unsupported for auto-translate guests. */
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return -ENOSYS;
+
+       /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+       vma->vm_ops = &privcmd_vm_ops;
+       vma->vm_private_data = NULL;
+
+       return 0;
+}
+
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
+{
+       return (xchg(&vma->vm_private_data, (void *)1) == NULL);
+}
+#endif
+
+const struct file_operations privcmd_file_ops = {
+       .unlocked_ioctl = privcmd_ioctl,
+       .mmap = privcmd_mmap,
+};
index bd96340..d6662b7 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/magic.h>
+#include <linux/mm.h>
+#include <linux/backing-dev.h>
 
 #include <xen/xen.h>
 
 MODULE_DESCRIPTION("Xen filesystem");
 MODULE_LICENSE("GPL");
 
+static int xenfs_set_page_dirty(struct page *page)
+{
+       return !TestSetPageDirty(page);
+}
+
+static const struct address_space_operations xenfs_aops = {
+       .set_page_dirty = xenfs_set_page_dirty,
+};
+
+static struct backing_dev_info xenfs_backing_dev_info = {
+       .ra_pages       = 0,    /* No readahead */
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+};
+
+static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
+{
+       struct inode *ret = new_inode(sb);
+
+       if (ret) {
+               ret->i_mode = mode;
+               ret->i_mapping->a_ops = &xenfs_aops;
+               ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info;
+               ret->i_uid = ret->i_gid = 0;
+               ret->i_blocks = 0;
+               ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+       }
+       return ret;
+}
+
+static struct dentry *xenfs_create_file(struct super_block *sb,
+                                       struct dentry *parent,
+                                       const char *name,
+                                       const struct file_operations *fops,
+                                       void *data,
+                                       int mode)
+{
+       struct dentry *dentry;
+       struct inode *inode;
+
+       dentry = d_alloc_name(parent, name);
+       if (!dentry)
+               return NULL;
+
+       inode = xenfs_make_inode(sb, S_IFREG | mode);
+       if (!inode) {
+               dput(dentry);
+               return NULL;
+       }
+
+       inode->i_fop = fops;
+       inode->i_private = data;
+
+       d_add(dentry, inode);
+       return dentry;
+}
+
 static ssize_t capabilities_read(struct file *file, char __user *buf,
                                 size_t size, loff_t *off)
 {
@@ -44,10 +102,23 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
                [1] = {},
                { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
                { "capabilities", &capabilities_file_ops, S_IRUGO },
+               { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
                {""},
        };
+       int rc;
 
-       return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
+       rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
+       if (rc < 0)
+               return rc;
+
+       if (xen_initial_domain()) {
+               xenfs_create_file(sb, sb->s_root, "xsd_kva",
+                                 &xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
+               xenfs_create_file(sb, sb->s_root, "xsd_port",
+                                 &xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
+       }
+
+       return rc;
 }
 
 static int xenfs_get_sb(struct file_system_type *fs_type,
@@ -66,11 +137,25 @@ static struct file_system_type xenfs_type = {
 
 static int __init xenfs_init(void)
 {
-       if (xen_domain())
-               return register_filesystem(&xenfs_type);
+       int err;
+       if (!xen_domain()) {
+               printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n");
+               return 0;
+       }
+
+       err = register_filesystem(&xenfs_type);
+       if (err) {
+               printk(KERN_ERR "xenfs: Unable to register filesystem!\n");
+               goto out;
+       }
+
+       err = bdi_init(&xenfs_backing_dev_info);
+       if (err)
+               unregister_filesystem(&xenfs_type);
+
+ out:
 
-       printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
-       return 0;
+       return err;
 }
 
 static void __exit xenfs_exit(void)
index 51f08b2..b68aa62 100644 (file)
@@ -2,5 +2,8 @@
 #define _XENFS_XENBUS_H
 
 extern const struct file_operations xenbus_file_ops;
+extern const struct file_operations privcmd_file_ops;
+extern const struct file_operations xsd_kva_file_ops;
+extern const struct file_operations xsd_port_file_ops;
 
 #endif /* _XENFS_XENBUS_H */
diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
new file mode 100644 (file)
index 0000000..fef20db
--- /dev/null
@@ -0,0 +1,68 @@
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+
+#include <xen/page.h>
+
+#include "xenfs.h"
+#include "../xenbus/xenbus_comms.h"
+
+static ssize_t xsd_read(struct file *file, char __user *buf,
+                           size_t size, loff_t *off)
+{
+       const char *str = (const char *)file->private_data;
+       return simple_read_from_buffer(buf, size, off, str, strlen(str));
+}
+
+static int xsd_release(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
+
+static int xsd_kva_open(struct inode *inode, struct file *file)
+{
+       file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
+                                              xen_store_interface);
+       if (!file->private_data)
+               return -ENOMEM;
+       return 0;
+}
+
+static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       size_t size = vma->vm_end - vma->vm_start;
+
+       if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+               return -EINVAL;
+
+       if (remap_pfn_range(vma, vma->vm_start,
+                           virt_to_pfn(xen_store_interface),
+                           size, vma->vm_page_prot))
+               return -EAGAIN;
+
+       return 0;
+}
+
+const struct file_operations xsd_kva_file_ops = {
+       .open = xsd_kva_open,
+       .mmap = xsd_kva_mmap,
+       .read = xsd_read,
+       .release = xsd_release,
+};
+
+static int xsd_port_open(struct inode *inode, struct file *file)
+{
+       file->private_data = (void *)kasprintf(GFP_KERNEL, "%d",
+                                              xen_store_evtchn);
+       if (!file->private_data)
+               return -ENOMEM;
+       return 0;
+}
+
+const struct file_operations xsd_port_file_ops = {
+       .open = xsd_port_open,
+       .read = xsd_read,
+       .release = xsd_release,
+};
index 4e65c16..84ad8f0 100644 (file)
@@ -1 +1,2 @@
 header-y += evtchn.h
+header-y += privcmd.h
index d3938d3..d7a6c13 100644 (file)
@@ -186,6 +186,35 @@ struct xen_translate_gpfn_list {
 };
 DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
 
+/*
+ * Returns the pseudo-physical memory map as it was when the domain
+ * was started (specified by XENMEM_set_memory_map).
+ * arg == addr of struct xen_memory_map.
+ */
+#define XENMEM_memory_map           9
+struct xen_memory_map {
+    /*
+     * On call the number of entries which can be stored in buffer. On
+     * return the number of entries which have been stored in
+     * buffer.
+     */
+    unsigned int nr_entries;
+
+    /*
+     * Entries in the buffer are in the same format as returned by the
+     * BIOS INT 0x15 EAX=0xE820 call.
+     */
+    GUEST_HANDLE(void) buffer;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
+
+/*
+ * Returns the real physical memory map. Passes the same structure as
+ * XENMEM_memory_map.
+ * arg == addr of struct xen_memory_map.
+ */
+#define XENMEM_machine_memory_map   10
+
 
 /*
  * Prevent the balloon driver from changing the memory reservation
diff --git a/include/xen/privcmd.h b/include/xen/privcmd.h
new file mode 100644 (file)
index 0000000..b42cdfd
--- /dev/null
@@ -0,0 +1,80 @@
+/******************************************************************************
+ * privcmd.h
+ *
+ * Interface to /proc/xen/privcmd.
+ *
+ * Copyright (c) 2003-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
+#define __LINUX_PUBLIC_PRIVCMD_H__
+
+#include <linux/types.h>
+
+typedef unsigned long xen_pfn_t;
+
+#ifndef __user
+#define __user
+#endif
+
+struct privcmd_hypercall {
+       __u64 op;
+       __u64 arg[5];
+};
+
+struct privcmd_mmap_entry {
+       __u64 va;
+       __u64 mfn;
+       __u64 npages;
+};
+
+struct privcmd_mmap {
+       int num;
+       domid_t dom; /* target domain */
+       struct privcmd_mmap_entry __user *entry;
+};
+
+struct privcmd_mmapbatch {
+       int num;     /* number of pages to populate */
+       domid_t dom; /* target domain */
+       __u64 addr;  /* virtual address */
+       xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
+};
+
+/*
+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
+ * @arg: &privcmd_hypercall_t
+ * Return: Value returned from execution of the specified hypercall.
+ */
+#define IOCTL_PRIVCMD_HYPERCALL                                        \
+       _IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall))
+#define IOCTL_PRIVCMD_MMAP                                     \
+       _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap))
+#define IOCTL_PRIVCMD_MMAPBATCH                                        \
+       _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch))
+
+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
index 351f405..98b9215 100644 (file)
@@ -23,4 +23,9 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
 
 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
 
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+                              unsigned long addr,
+                              unsigned long mfn, int nr,
+                              pgprot_t prot, unsigned domid);
+
 #endif /* INCLUDE_XEN_OPS_H */