[PATCH] mm: introduce remap_vmalloc_range()
[linux-2.6.git] / mm / vmalloc.c
index c0504f1e34ebdbd2003ba8aaa63287f366b100cb..35f8553f893a7ffe0416732827c71385646c4ea3 100644 (file)
@@ -256,6 +256,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int
        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
 }
 
+/* Caller must hold vmlist_lock */
+static struct vm_struct *__find_vm_area(void *addr)
+{
+       struct vm_struct *tmp;
+
+       for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
+                if (tmp->addr == addr)
+                       break;
+       }
+
+       return tmp;
+}
+
 /* Caller must hold vmlist_lock */
 struct vm_struct *__remove_vm_area(void *addr)
 {
@@ -498,10 +511,32 @@ EXPORT_SYMBOL(__vmalloc);
  */
 void *vmalloc(unsigned long size)
 {
-       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
 }
 EXPORT_SYMBOL(vmalloc);
 
+/**
+ *     vmalloc_user  -  allocate virtually contiguous memory which has
+ *                        been zeroed so it can be mapped to userspace without
+ *                        leaking data.
+ *
+ *     @size:          allocation size
+ */
+void *vmalloc_user(unsigned long size)
+{
+       struct vm_struct *area;
+       void *ret;
+
+       ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+       write_lock(&vmlist_lock);
+       area = __find_vm_area(ret);
+       area->flags |= VM_USERMAP;
+       write_unlock(&vmlist_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(vmalloc_user);
+
 /**
  *     vmalloc_node  -  allocate memory on a specific node
  *
@@ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc);
  */
 void *vmalloc_node(unsigned long size, int node)
 {
-       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
+       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
 }
 EXPORT_SYMBOL(vmalloc_node);
 
@@ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size)
 }
 EXPORT_SYMBOL(vmalloc_32);
 
+/**
+ *     vmalloc_32_user  -  allocate virtually contiguous memory (32bit
+ *                           addressable) which is zeroed so it can be
+ *                           mapped to userspace without leaking data.
+ *
+ *     @size:          allocation size
+ */
+void *vmalloc_32_user(unsigned long size)
+{
+       struct vm_struct *area;
+       void *ret;
+
+       ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+       write_lock(&vmlist_lock);
+       area = __find_vm_area(ret);
+       area->flags |= VM_USERMAP;
+       write_unlock(&vmlist_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(vmalloc_32_user);
+
 long vread(char *buf, char *addr, unsigned long count)
 {
        struct vm_struct *tmp;
@@ -630,3 +687,64 @@ finished:
        read_unlock(&vmlist_lock);
        return buf - buf_start;
 }
+
+/**
+ *     remap_vmalloc_range  -  map vmalloc pages to userspace
+ *
+ *     @vma:           vma to cover (map full range of vma)
+ *     @addr:          vmalloc memory
+ *     @pgoff:         number of pages into addr before first page to map
+ *     @returns:       0 for success, -Exxx on failure
+ *
+ *     This function checks that addr is a valid vmalloc'ed area, and
+ *     that it is big enough to cover the vma. Will return failure if
+ *     that criteria isn't met.
+ *
+ *     Similar to remap_pfn_range (see mm/memory.c)
+ */
+int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+                                               unsigned long pgoff)
+{
+       struct vm_struct *area;
+       unsigned long uaddr = vma->vm_start;
+       unsigned long usize = vma->vm_end - vma->vm_start;
+       int ret;
+
+       if ((PAGE_SIZE-1) & (unsigned long)addr)
+               return -EINVAL;
+
+       read_lock(&vmlist_lock);
+       area = __find_vm_area(addr);
+       if (!area)
+               goto out_einval_locked;
+
+       if (!(area->flags & VM_USERMAP))
+               goto out_einval_locked;
+
+       if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
+               goto out_einval_locked;
+       read_unlock(&vmlist_lock);
+
+       addr += pgoff << PAGE_SHIFT;
+       do {
+               struct page *page = vmalloc_to_page(addr);
+               ret = vm_insert_page(vma, uaddr, page);
+               if (ret)
+                       return ret;
+
+               uaddr += PAGE_SIZE;
+               addr += PAGE_SIZE;
+               usize -= PAGE_SIZE;
+       } while (usize > 0);
+
+       /* Prevent "things" like memory migration? VM_flags need a cleanup... */
+       vma->vm_flags |= VM_RESERVED;
+
+       return ret;
+
+out_einval_locked:
+       read_unlock(&vmlist_lock);
+       return -EINVAL;
+}
+EXPORT_SYMBOL(remap_vmalloc_range);
+