tree-wide: fix assorted typos all over the place
[linux-2.6.git] / drivers / char / mspec.c
index 049a46c..ecb89d7 100644 (file)
@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
  * mspec_close
  *
  * Called when unmapping a device mapping. Frees all mspec pages
- * belonging to the vma.
+ * belonging to all the vma's sharing this vma_data structure.
  */
 static void
 mspec_close(struct vm_area_struct *vma)
 {
        struct vma_data *vdata;
-       int index, last_index, result;
+       int index, last_index;
        unsigned long my_page;
 
        vdata = vma->vm_private_data;
 
-       BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
+       if (!atomic_dec_and_test(&vdata->refcnt))
+               return;
 
-       spin_lock(&vdata->lock);
-       index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
-       last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
-       for (; index < last_index; index++) {
+       last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+       for (index = 0; index < last_index; index++) {
                if (vdata->maddr[index] == 0)
                        continue;
                /*
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
                 */
                my_page = vdata->maddr[index];
                vdata->maddr[index] = 0;
-               spin_unlock(&vdata->lock);
-               result = mspec_zero_block(my_page, PAGE_SIZE);
-               if (!result)
-                       uncached_free_page(my_page);
+               if (!mspec_zero_block(my_page, PAGE_SIZE))
+                       uncached_free_page(my_page, 1);
                else
                        printk(KERN_WARNING "mspec_close(): "
-                              "failed to zero page %i\n",
-                              result);
-               spin_lock(&vdata->lock);
+                              "failed to zero page %ld\n", my_page);
        }
-       spin_unlock(&vdata->lock);
-
-       if (!atomic_dec_and_test(&vdata->refcnt))
-               return;
 
        if (vdata->flags & VMD_VMALLOCED)
                vfree(vdata);
@@ -201,34 +192,31 @@ mspec_close(struct vm_area_struct *vma)
                kfree(vdata);
 }
 
-
 /*
- * mspec_nopfn
+ * mspec_fault
  *
  * Creates a mspec page and maps it to user space.
  */
-static unsigned long
-mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
+static int
+mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        unsigned long paddr, maddr;
        unsigned long pfn;
-       int index;
+       pgoff_t index = vmf->pgoff;
        struct vma_data *vdata = vma->vm_private_data;
 
-       BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
-       index = (address - vdata->vm_start) >> PAGE_SHIFT;
        maddr = (volatile unsigned long) vdata->maddr[index];
        if (maddr == 0) {
-               maddr = uncached_alloc_page(numa_node_id());
+               maddr = uncached_alloc_page(numa_node_id(), 1);
                if (maddr == 0)
-                       return NOPFN_OOM;
+                       return VM_FAULT_OOM;
 
                spin_lock(&vdata->lock);
                if (vdata->maddr[index] == 0) {
                        vdata->count++;
                        vdata->maddr[index] = maddr;
                } else {
-                       uncached_free_page(maddr);
+                       uncached_free_page(maddr, 1);
                        maddr = vdata->maddr[index];
                }
                spin_unlock(&vdata->lock);
@@ -241,19 +229,26 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
 
        pfn = paddr >> PAGE_SHIFT;
 
-       return pfn;
+       /*
+        * vm_insert_pfn can fail with -EBUSY, but in that case it will
+        * be because another thread has installed the pte first, so it
+        * is no problem.
+        */
+       vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+       return VM_FAULT_NOPAGE;
 }
 
-static struct vm_operations_struct mspec_vm_ops = {
+static const struct vm_operations_struct mspec_vm_ops = {
        .open = mspec_open,
        .close = mspec_close,
-       .nopfn = mspec_nopfn
+       .fault = mspec_fault,
 };
 
 /*
  * mspec_mmap
  *
- * Called when mmaping the device.  Initializes the vma with a fault handler
+ * Called when mmapping the device.  Initializes the vma with a fault handler
  * and private data structure necessary to allocate, track, and free the
  * underlying pages.
  */
@@ -293,7 +288,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
        vdata->refcnt = ATOMIC_INIT(1);
        vma->vm_private_data = vdata;
 
-       vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
+       vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
        if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &mspec_vm_ops;
@@ -372,12 +367,12 @@ mspec_init(void)
                is_sn2 = 1;
                if (is_shub2()) {
                        ret = -ENOMEM;
-                       for_each_online_node(nid) {
+                       for_each_node_state(nid, N_ONLINE) {
                                int actual_nid;
                                int nasid;
                                unsigned long phys;
 
-                               scratch_page[nid] = uncached_alloc_page(nid);
+                               scratch_page[nid] = uncached_alloc_page(nid, 1);
                                if (scratch_page[nid] == 0)
                                        goto free_scratch_pages;
                                phys = __pa(scratch_page[nid]);
@@ -424,7 +419,7 @@ mspec_init(void)
  free_scratch_pages:
        for_each_node(nid) {
                if (scratch_page[nid] != 0)
-                       uncached_free_page(scratch_page[nid]);
+                       uncached_free_page(scratch_page[nid], 1);
        }
        return ret;
 }
@@ -441,7 +436,7 @@ mspec_exit(void)
 
                for_each_node(nid) {
                        if (scratch_page[nid] != 0)
-                               uncached_free_page(scratch_page[nid]);
+                               uncached_free_page(scratch_page[nid], 1);
                }
        }
 }