infrastructure to debug (dynamic) objects
[linux-2.6.git] / mm / nommu.c
index af87456..ef8c62c 100644 (file)
  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
+ *  Copyright (c) 2007      Paul Mundt <lethal@linux-sh.org>
  */
 
+#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/swap.h>
@@ -44,7 +46,7 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
 int heap_stack_gap = 0;
 
 EXPORT_SYMBOL(mem_map);
-EXPORT_SYMBOL(__vm_enough_memory);
+EXPORT_SYMBOL(num_physpages);
 
 /* list of shareable VMAs */
 struct rb_root nommu_vma_tree = RB_ROOT;
@@ -53,12 +55,6 @@ DECLARE_RWSEM(nommu_vma_sem);
 struct vm_operations_struct generic_file_vm_ops = {
 };
 
-EXPORT_SYMBOL(vfree);
-EXPORT_SYMBOL(vmalloc_to_page);
-EXPORT_SYMBOL(vmalloc_32);
-EXPORT_SYMBOL(vmap);
-EXPORT_SYMBOL(vunmap);
-
 /*
  * Handle all mappings that got truncated by a "truncate()"
  * system call.
@@ -109,7 +105,11 @@ unsigned int kobjsize(const void *objp)
 {
        struct page *page;
 
-       if (!objp || !((page = virt_to_page(objp))))
+       /*
+        * If the object we have should not have ksize performed on it,
+        * return size of 0
+        */
+       if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp))))
                return 0;
 
        if (PageSlab(page))
@@ -167,35 +167,58 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 finish_or_fault:
        return i ? : -EFAULT;
 }
-
 EXPORT_SYMBOL(get_user_pages);
 
 DEFINE_RWLOCK(vmlist_lock);
 struct vm_struct *vmlist;
 
-void vfree(void *addr)
+void vfree(const void *addr)
 {
        kfree(addr);
 }
+EXPORT_SYMBOL(vfree);
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 {
        /*
-        * kmalloc doesn't like __GFP_HIGHMEM for some reason
+        *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
+        * returns only a logical address.
         */
        return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 }
+EXPORT_SYMBOL(__vmalloc);
 
-struct page * vmalloc_to_page(void *addr)
+void *vmalloc_user(unsigned long size)
+{
+       void *ret;
+
+       ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+                       PAGE_KERNEL);
+       if (ret) {
+               struct vm_area_struct *vma;
+
+               down_write(&current->mm->mmap_sem);
+               vma = find_vma(current->mm, (unsigned long)ret);
+               if (vma)
+                       vma->vm_flags |= VM_USERMAP;
+               up_write(&current->mm->mmap_sem);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(vmalloc_user);
+
+struct page *vmalloc_to_page(const void *addr)
 {
        return virt_to_page(addr);
 }
+EXPORT_SYMBOL(vmalloc_to_page);
 
-unsigned long vmalloc_to_pfn(void *addr)
+unsigned long vmalloc_to_pfn(const void *addr)
 {
        return page_to_pfn(virt_to_page(addr));
 }
-
+EXPORT_SYMBOL(vmalloc_to_pfn);
 
 long vread(char *buf, char *addr, unsigned long count)
 {
@@ -236,9 +259,8 @@ void *vmalloc_node(unsigned long size, int node)
 }
 EXPORT_SYMBOL(vmalloc_node);
 
-/*
- *     vmalloc_32  -  allocate virtually continguos memory (32bit addressable)
- *
+/**
+ * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
  *     @size:          allocation size
  *
  *     Allocate enough 32bit PA addressable pages to cover @size from the
@@ -248,17 +270,55 @@ void *vmalloc_32(unsigned long size)
 {
        return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
 }
+EXPORT_SYMBOL(vmalloc_32);
+
+/**
+ * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
+ *     @size:          allocation size
+ *
+ * The resulting memory area is 32bit addressable and zeroed so it can be
+ * mapped to userspace without leaking data.
+ *
+ * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
+ * remap_vmalloc_range() are permissible.
+ */
+void *vmalloc_32_user(unsigned long size)
+{
+       /*
+        * We'll have to sort out the ZONE_DMA bits for 64-bit,
+        * but for now this can simply use vmalloc_user() directly.
+        */
+       return vmalloc_user(size);
+}
+EXPORT_SYMBOL(vmalloc_32_user);
 
 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 {
        BUG();
        return NULL;
 }
+EXPORT_SYMBOL(vmap);
 
-void vunmap(void *addr)
+void vunmap(const void *addr)
 {
        BUG();
 }
+EXPORT_SYMBOL(vunmap);
+
+/*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
+ */
+void  __attribute__((weak)) vmalloc_sync_all(void)
+{
+}
+
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+                  struct page *page)
+{
+       return -EINVAL;
+}
+EXPORT_SYMBOL(vm_insert_page);
 
 /*
  *  sys_brk() for the most part doesn't need the global kernel
@@ -358,6 +418,11 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
        return find_vma(mm, addr);
 }
 
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return -ENOMEM;
+}
+
 /*
  * look up the first VMA exactly that exactly matches addr
  * - should be called with mm->mmap_sem at least held readlocked
@@ -523,7 +588,7 @@ static int validate_mmap_request(struct file *file,
                 */
                mapping = file->f_mapping;
                if (!mapping)
-                       mapping = file->f_dentry->d_inode->i_mapping;
+                       mapping = file->f_path.dentry->d_inode->i_mapping;
 
                capabilities = 0;
                if (mapping && mapping->backing_dev_info)
@@ -532,7 +597,7 @@ static int validate_mmap_request(struct file *file,
                if (!capabilities) {
                        /* no explicit capabilities set, so assume some
                         * defaults */
-                       switch (file->f_dentry->d_inode->i_mode & S_IFMT) {
+                       switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
                        case S_IFREG:
                        case S_IFBLK:
                                capabilities = BDI_CAP_MAP_COPY;
@@ -563,11 +628,11 @@ static int validate_mmap_request(struct file *file,
                            !(file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (IS_APPEND(file->f_dentry->d_inode) &&
+                       if (IS_APPEND(file->f_path.dentry->d_inode) &&
                            (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (locks_verify_locked(file->f_dentry->d_inode))
+                       if (locks_verify_locked(file->f_path.dentry->d_inode))
                                return -EAGAIN;
 
                        if (!(capabilities & BDI_CAP_MAP_DIRECT))
@@ -598,7 +663,7 @@ static int validate_mmap_request(struct file *file,
 
                /* handle executable mappings and implied executable
                 * mappings */
-               if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) {
+               if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
                        if (prot & PROT_EXEC)
                                return -EPERM;
                }
@@ -630,7 +695,7 @@ static int validate_mmap_request(struct file *file,
        }
 
        /* allow the security API to have its say */
-       ret = security_file_mmap(file, reqprot, prot, flags);
+       ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
        if (ret < 0)
                return ret;
 
@@ -796,6 +861,9 @@ unsigned long do_mmap_pgoff(struct file *file,
        void *result;
        int ret;
 
+       if (!(flags & MAP_FIXED))
+               addr = round_hint_to_min(addr);
+
        /* decide whether we should attempt the mapping, and if so what sort of
         * mapping */
        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
@@ -826,6 +894,11 @@ unsigned long do_mmap_pgoff(struct file *file,
                unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
                unsigned long vmpglen;
 
+               /* suppress VMA sharing for shared regions */
+               if (vm_flags & VM_SHARED &&
+                   capabilities & BDI_CAP_MAP_DIRECT)
+                       goto dont_share_VMAs;
+
                for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
                        vma = rb_entry(rb, struct vm_area_struct, vm_rb);
 
@@ -833,7 +906,7 @@ unsigned long do_mmap_pgoff(struct file *file,
                                continue;
 
                        /* search for overlapping mappings on the same file */
-                       if (vma->vm_file->f_dentry->d_inode != file->f_dentry->d_inode)
+                       if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
                                continue;
 
                        if (vma->vm_pgoff >= pgoff + pglen)
@@ -859,6 +932,7 @@ unsigned long do_mmap_pgoff(struct file *file,
                        goto shared;
                }
 
+       dont_share_VMAs:
                vma = NULL;
 
                /* obtain the address at which to make a shared mapping
@@ -892,8 +966,13 @@ unsigned long do_mmap_pgoff(struct file *file,
 
        INIT_LIST_HEAD(&vma->anon_vma_node);
        atomic_set(&vma->vm_usage, 1);
-       if (file)
+       if (file) {
                get_file(file);
+               if (vm_flags & VM_EXECUTABLE) {
+                       added_exe_file_vma(current->mm);
+                       vma->vm_mm = current->mm;
+               }
+       }
        vma->vm_file    = file;
        vma->vm_flags   = vm_flags;
        vma->vm_start   = addr;
@@ -948,8 +1027,11 @@ unsigned long do_mmap_pgoff(struct file *file,
        up_write(&nommu_vma_sem);
        kfree(vml);
        if (vma) {
-               if (vma->vm_file)
+               if (vma->vm_file) {
                        fput(vma->vm_file);
+                       if (vma->vm_flags & VM_EXECUTABLE)
+                               removed_exe_file_vma(vma->vm_mm);
+               }
                kfree(vma);
        }
        return ret;
@@ -974,11 +1056,12 @@ unsigned long do_mmap_pgoff(struct file *file,
        show_free_areas();
        return -ENOMEM;
 }
+EXPORT_SYMBOL(do_mmap_pgoff);
 
 /*
  * handle mapping disposal for uClinux
  */
-static void put_vma(struct vm_area_struct *vma)
+static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        if (vma) {
                down_write(&nommu_vma_sem);
@@ -1000,8 +1083,11 @@ static void put_vma(struct vm_area_struct *vma)
                        realalloc -= kobjsize(vma);
                        askedalloc -= sizeof(*vma);
 
-                       if (vma->vm_file)
+                       if (vma->vm_file) {
                                fput(vma->vm_file);
+                               if (vma->vm_flags & VM_EXECUTABLE)
+                                       removed_exe_file_vma(mm);
+                       }
                        kfree(vma);
                }
 
@@ -1038,7 +1124,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
  found:
        vml = *parent;
 
-       put_vma(vml->vma);
+       put_vma(mm, vml->vma);
 
        *parent = vml->next;
        realalloc -= kobjsize(vml);
@@ -1054,6 +1140,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
 
        return 0;
 }
+EXPORT_SYMBOL(do_munmap);
 
 asmlinkage long sys_munmap(unsigned long addr, size_t len)
 {
@@ -1082,7 +1169,7 @@ void exit_mmap(struct mm_struct * mm)
 
                while ((tmp = mm->context.vmlist)) {
                        mm->context.vmlist = tmp->next;
-                       put_vma(tmp->vma);
+                       put_vma(mm, tmp->vma);
 
                        realalloc -= kobjsize(tmp);
                        askedalloc -= sizeof(*tmp);
@@ -1144,6 +1231,7 @@ unsigned long do_mremap(unsigned long addr,
 
        return vma->vm_start;
 }
+EXPORT_SYMBOL(do_mremap);
 
 asmlinkage unsigned long sys_mremap(unsigned long addr,
        unsigned long old_len, unsigned long new_len,
@@ -1171,6 +1259,21 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
+int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+                       unsigned long pgoff)
+{
+       unsigned int size = vma->vm_end - vma->vm_start;
+
+       if (!(vma->vm_flags & VM_USERMAP))
+               return -EINVAL;
+
+       vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
+       vma->vm_end = vma->vm_start + size;
+
+       return 0;
+}
+EXPORT_SYMBOL(remap_vmalloc_range);
+
 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
 {
 }
@@ -1193,6 +1296,27 @@ void unmap_mapping_range(struct address_space *mapping,
 EXPORT_SYMBOL(unmap_mapping_range);
 
 /*
+ * ask for an unmapped area at which to create a mapping on a file
+ */
+unsigned long get_unmapped_area(struct file *file, unsigned long addr,
+                               unsigned long len, unsigned long pgoff,
+                               unsigned long flags)
+{
+       unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
+                                 unsigned long, unsigned long);
+
+       get_area = current->mm->get_unmapped_area;
+       if (file && file->f_op && file->f_op->get_unmapped_area)
+               get_area = file->f_op->get_unmapped_area;
+
+       if (!get_area)
+               return -ENOSYS;
+
+       return get_area(file, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL(get_unmapped_area);
+
+/*
  * Check that a process has enough memory to allocate a new virtual
  * mapping. 0 means there is enough memory for the allocation to
  * succeed and -ENOMEM implies there is not.
@@ -1208,7 +1332,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
  * Note this is a helper function intended to be used by LSMs which
  * wish to use this logic.
  */
-int __vm_enough_memory(long pages, int cap_sys_admin)
+int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 {
        unsigned long free, allowed;
 
@@ -1299,12 +1423,12 @@ int in_gate_area_no_task(unsigned long addr)
        return 0;
 }
 
-struct page *filemap_nopage(struct vm_area_struct *area,
-                       unsigned long address, int *type)
+int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        BUG();
-       return NULL;
+       return 0;
 }
+EXPORT_SYMBOL(filemap_fault);
 
 /*
  * Access another process' address space.