mm: swap: implement generic handler for swap_activate
[linux-3.10.git] / mm / vmalloc.c
index 21fdf46..2bb90b1 100644 (file)
@@ -256,7 +256,7 @@ struct vmap_area {
        struct rb_node rb_node;         /* address sorted rbtree */
        struct list_head list;          /* address sorted list */
        struct list_head purge_list;    /* "lazy purge" list */
-       void *private;
+       struct vm_struct *vm;
        struct rcu_head rcu_head;
 };
 
@@ -413,11 +413,11 @@ nocache:
                if (addr + size - 1 < addr)
                        goto overflow;
 
-               n = rb_next(&first->rb_node);
-               if (n)
-                       first = rb_entry(n, struct vmap_area, rb_node);
-               else
+               if (list_is_last(&first->list, &vmap_area_list))
                        goto found;
+
+               first = list_entry(first->list.next,
+                               struct vmap_area, list);
        }
 
 found:
@@ -904,6 +904,14 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
 
        BUG_ON(size & ~PAGE_MASK);
        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+       if (WARN_ON(size == 0)) {
+               /*
+                * Allocating 0 bytes isn't what caller wants since
+                * get_order(0) returns funny result. Just warn and terminate
+                * early.
+                */
+               return NULL;
+       }
        order = get_order(size);
 
 again:
@@ -1185,9 +1193,10 @@ void __init vmalloc_init(void)
        /* Import existing vmlist entries. */
        for (tmp = vmlist; tmp; tmp = tmp->next) {
                va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
-               va->flags = tmp->flags | VM_VM_AREA;
+               va->flags = VM_VM_AREA;
                va->va_start = (unsigned long)tmp->addr;
                va->va_end = va->va_start + tmp->size;
+               va->vm = tmp;
                __insert_vmap_area(va);
        }
 
@@ -1279,13 +1288,13 @@ DEFINE_RWLOCK(vmlist_lock);
 struct vm_struct *vmlist;
 
 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-                             unsigned long flags, void *caller)
+                             unsigned long flags, const void *caller)
 {
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
-       va->private = vm;
+       va->vm = vm;
        va->flags |= VM_VM_AREA;
 }
 
@@ -1305,7 +1314,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)
 }
 
 static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-                             unsigned long flags, void *caller)
+                             unsigned long flags, const void *caller)
 {
        setup_vmalloc_vm(vm, va, flags, caller);
        insert_vmalloc_vmlist(vm);
@@ -1313,7 +1322,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
 
 static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
-               unsigned long end, int node, gfp_t gfp_mask, void *caller)
+               unsigned long end, int node, gfp_t gfp_mask, const void *caller)
 {
        struct vmap_area *va;
        struct vm_struct *area;
@@ -1374,7 +1383,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
 
 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
                                       unsigned long start, unsigned long end,
-                                      void *caller)
+                                      const void *caller)
 {
        return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
                                  caller);
@@ -1396,19 +1405,27 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
 }
 
 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
-                               void *caller)
+                               const void *caller)
 {
        return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
                                                -1, GFP_KERNEL, caller);
 }
 
-static struct vm_struct *find_vm_area(const void *addr)
+/**
+ *     find_vm_area  -  find a continuous kernel virtual area
+ *     @addr:          base address
+ *
+ *     Search for the kernel VM area starting at @addr, and return it.
+ *     It is up to the caller to do all required locking to keep the returned
+ *     pointer valid.
+ */
+struct vm_struct *find_vm_area(const void *addr)
 {
        struct vmap_area *va;
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA)
-               return va->private;
+               return va->vm;
 
        return NULL;
 }
@@ -1427,7 +1444,7 @@ struct vm_struct *remove_vm_area(const void *addr)
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA) {
-               struct vm_struct *vm = va->private;
+               struct vm_struct *vm = va->vm;
 
                if (!(vm->flags & VM_UNLIST)) {
                        struct vm_struct *tmp, **p;
@@ -1567,9 +1584,9 @@ EXPORT_SYMBOL(vmap);
 
 static void *__vmalloc_node(unsigned long size, unsigned long align,
                            gfp_t gfp_mask, pgprot_t prot,
-                           int node, void *caller);
+                           int node, const void *caller);
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
-                                pgprot_t prot, int node, void *caller)
+                                pgprot_t prot, int node, const void *caller)
 {
        const int order = 0;
        struct page **pages;
@@ -1642,7 +1659,7 @@ fail:
  */
 void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        unsigned long start, unsigned long end, gfp_t gfp_mask,
-                       pgprot_t prot, int node, void *caller)
+                       pgprot_t prot, int node, const void *caller)
 {
        struct vm_struct *area;
        void *addr;
@@ -1698,7 +1715,7 @@ fail:
  */
 static void *__vmalloc_node(unsigned long size, unsigned long align,
                            gfp_t gfp_mask, pgprot_t prot,
-                           int node, void *caller)
+                           int node, const void *caller)
 {
        return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
                                gfp_mask, prot, node, caller);
@@ -1906,9 +1923,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(buf, map + offset, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                } else
                        memset(buf, 0, length);
 
@@ -1945,9 +1962,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(map + offset, buf, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                }
                addr += length;
                buf += length;
@@ -1974,9 +1991,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
  *     IOREMAP area is treated as memory hole and no copy is done.
  *
  *     If [addr...addr+count) doesn't includes any intersects with alive
- *     vm_struct area, returns 0.
- *     @buf should be kernel's buffer. Because this function uses KM_USER0,
- *     the caller should guarantee KM_USER0 is not used.
+ *     vm_struct area, returns 0. @buf should be kernel's buffer.
  *
  *     Note: In usual ops, vread() is never necessary because the caller
  *     should know vmalloc() area is valid and can use memcpy().
@@ -2050,9 +2065,7 @@ finished:
  *     IOREMAP area is treated as memory hole and no copy is done.
  *
  *     If [addr...addr+count) doesn't includes any intersects with alive
- *     vm_struct area, returns 0.
- *     @buf should be kernel's buffer. Because this function uses KM_USER0,
- *     the caller should guarantee KM_USER0 is not used.
+ *     vm_struct area, returns 0. @buf should be kernel's buffer.
  *
  *     Note: In usual ops, vwrite() is never necessary because the caller
  *     should know vmalloc() area is valid and can use memcpy().
@@ -2375,10 +2388,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
                return NULL;
        }
 
-       vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
-       vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
+       vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
+       vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
        if (!vas || !vms)
-               goto err_free;
+               goto err_free2;
 
        for (area = 0; area < nr_vms; area++) {
                vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
@@ -2476,11 +2489,10 @@ found:
 
 err_free:
        for (area = 0; area < nr_vms; area++) {
-               if (vas)
-                       kfree(vas[area]);
-               if (vms)
-                       kfree(vms[area]);
+               kfree(vas[area]);
+               kfree(vms[area]);
        }
+err_free2:
        kfree(vas);
        kfree(vms);
        return NULL;