hugepage: fix broken check for offset alignment in hugepage mappings
David Gibson [Fri, 31 Aug 2007 06:56:40 +0000 (23:56 -0700)]
For hugepage mappings, the file offset, like the address and size, needs to
be aligned to the size of a hugepage.

In commit 68589bc353037f233fe510ad9ff432338c95db66, the check for this was
moved into prepare_hugepage_range() along with the address and size checks.
 But since BenH's rework of the get_unmapped_area() paths leading up to
commit 4b1d89290b62bb2db476c94c82cf7442aab440c8, prepare_hugepage_range()
is only called for MAP_FIXED mappings, not for other mappings.  This means
we're no longer ever checking for an aligned offset - I've confirmed that
mmap() will (apparently) succeed with a misaligned offset on both powerpc
and i386 at least.

This patch restores the check, removing it from prepare_hugepage_range()
and putting it back into hugetlbfs_file_mmap().  I'm putting it there,
rather than in the get_unmapped_area() path so it only needs to go in one
place, than separately in the half-dozen or so arch-specific
implementations of hugetlb_get_unmapped_area().

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

arch/i386/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
arch/sparc64/mm/hugetlbpage.c
fs/hugetlbfs/inode.c
include/linux/hugetlb.h

index efdf95a..6c06d9c 100644 (file)
@@ -367,7 +367,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -ENOMEM;
 
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index d22861c..a9ff685 100644 (file)
@@ -75,10 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  * Don't actually need to do any preparation, but need to make sure
  * the address is in the right region.
  */
-int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
+int prepare_hugepage_range(unsigned long addr, unsigned long len)
 {
-       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
-               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
@@ -151,7 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
 
        /* Handle MAP_FIXED */
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index eaba9b7..6cfab2e 100644 (file)
@@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -ENOMEM;
 
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index c848a19..950c2fb 100644 (file)
@@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        int ret;
 
        /*
-        * vma alignment has already been checked by prepare_hugepage_range.
-        * If you add any error returns here, do so after setting VM_HUGETLB,
-        * so is_vm_hugetlb_page tests below unmap_region go the right way
-        * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
+        * vma address alignment (but not the pgoff alignment) has
+        * already been checked by prepare_hugepage_range.  If you add
+        * any error returns here, do so after setting VM_HUGETLB, so
+        * is_vm_hugetlb_page tests below unmap_region go the right
+        * way when do_mmap_pgoff unwinds (may be important on powerpc
+        * and ia64).
         */
        vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
        vma->vm_ops = &hugetlb_vm_ops;
 
+       if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
+
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
 
        mutex_lock(&inode->i_mutex);
@@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -ENOMEM;
 
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index e6a71c8..3a19b03 100644 (file)
@@ -66,11 +66,8 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
  */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
-                                               pgoff_t pgoff)
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
 {
-       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
-               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
@@ -78,8 +75,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
        return 0;
 }
 #else
-int prepare_hugepage_range(unsigned long addr, unsigned long len,
-                                               pgoff_t pgoff);
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
 #endif
 
 #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
@@ -117,7 +113,7 @@ static inline unsigned long hugetlb_total_pages(void)
 #define hugetlb_report_meminfo(buf)            0
 #define hugetlb_report_node_meminfo(n, buf)    0
 #define follow_huge_pmd(mm, addr, pmd, write)  NULL
-#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
+#define prepare_hugepage_range(addr,len)       (-EINVAL)
 #define pmd_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })