[PATCH] mm: ptd_alloc inline and out
Hugh Dickins [Sun, 30 Oct 2005 01:16:22 +0000 (18:16 -0700)]
It seems odd to me that, whereas pud_alloc and pmd_alloc test inline, only
calling out-of-line __pud_alloc __pmd_alloc if allocation needed,
pte_alloc_map and pte_alloc_kernel are entirely out-of-line.  Though it does
add a little to kernel size, change them to macros testing inline, calling
__pte_alloc or __pte_alloc_kernel to allocate out-of-line.  Mark none of them
as fastcalls, leave that to CONFIG_REGPARM or not.

It also seems more natural for the out-of-line functions to leave the offset
calculation and map to the inline, which has to do it anyway for the common
case.  At least mremap move wants __pte_alloc without _map.

Macros rather than inline functions, certainly to avoid the header file issues
which arise from CONFIG_HIGHPTE needing kmap_types.h, but also in case any
architectures I haven't built would have other such problems.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

include/asm-generic/4level-fixup.h
include/linux/mm.h
mm/memory.c
mm/mremap.c

index c20ec25..68c6fea 100644 (file)
 
 #define pud_t                          pgd_t
 
-#define pmd_alloc(mm, pud, address)                    \
-({     pmd_t *ret;                                     \
-       if (pgd_none(*pud))                             \
-               ret = __pmd_alloc(mm, pud, address);    \
-       else                                            \
-               ret = pmd_offset(pud, address);         \
-       ret;                                            \
-})
+#define pmd_alloc(mm, pud, address) \
+       ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
+               NULL: pmd_offset(pud, address))
 
 #define pud_alloc(mm, pgd, address)    (pgd)
 #define pud_offset(pgd, start)         (pgd)
index b9fa82b..22c2d69 100644 (file)
@@ -704,10 +704,6 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
 }
 
 extern int vmtruncate(struct inode * inode, loff_t offset);
-extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
-extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address));
-extern pte_t *FASTCALL(pte_alloc_kernel(pmd_t *pmd, unsigned long address));
-extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
 extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
 extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
 extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
@@ -760,32 +756,36 @@ struct shrinker;
 extern struct shrinker *set_shrinker(int, shrinker_t);
 extern void remove_shrinker(struct shrinker *shrinker);
 
-/*
- * On a two-level or three-level page table, this ends up being trivial. Thus
- * the inlining and the symmetry break with pte_alloc_map() that does all
- * of this out-of-line.
- */
+int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+
 /*
  * The following ifdef needed to get the 4level-fixup.h header to work.
  * Remove it when 4level-fixup.h has been removed.
  */
-#ifdef CONFIG_MMU
-#ifndef __ARCH_HAS_4LEVEL_HACK 
+#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 {
-       if (pgd_none(*pgd))
-               return __pud_alloc(mm, pgd, address);
-       return pud_offset(pgd, address);
+       return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
+               NULL: pud_offset(pgd, address);
 }
 
 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
-       if (pud_none(*pud))
-               return __pmd_alloc(mm, pud, address);
-       return pmd_offset(pud, address);
+       return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+               NULL: pmd_offset(pud, address);
 }
-#endif
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+
+#define pte_alloc_map(mm, pmd, address)                        \
+       ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+               NULL: pte_offset_map(pmd, address))
+
+#define pte_alloc_kernel(pmd, address)                 \
+       ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+               NULL: pte_offset_kernel(pmd, address))
 
 extern void free_area_init(unsigned long * zones_size);
 extern void free_area_init_node(int nid, pg_data_t *pgdat,
index 95a4553..4bdd118 100644 (file)
@@ -280,50 +280,39 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
        }
 }
 
-pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
-                               unsigned long address)
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 {
-       if (!pmd_present(*pmd)) {
-               struct page *new;
+       struct page *new;
 
-               spin_unlock(&mm->page_table_lock);
-               new = pte_alloc_one(mm, address);
-               spin_lock(&mm->page_table_lock);
-               if (!new)
-                       return NULL;
-               /*
-                * Because we dropped the lock, we should re-check the
-                * entry, as somebody else could have populated it..
-                */
-               if (pmd_present(*pmd)) {
-                       pte_free(new);
-                       goto out;
-               }
+       spin_unlock(&mm->page_table_lock);
+       new = pte_alloc_one(mm, address);
+       spin_lock(&mm->page_table_lock);
+       if (!new)
+               return -ENOMEM;
+
+       if (pmd_present(*pmd))          /* Another has populated it */
+               pte_free(new);
+       else {
                mm->nr_ptes++;
                inc_page_state(nr_page_table_pages);
                pmd_populate(mm, pmd, new);
        }
-out:
-       return pte_offset_map(pmd, address);
+       return 0;
 }
 
-pte_t fastcall * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
 {
-       if (!pmd_present(*pmd)) {
-               pte_t *new;
-
-               new = pte_alloc_one_kernel(&init_mm, address);
-               if (!new)
-                       return NULL;
-
-               spin_lock(&init_mm.page_table_lock);
-               if (pmd_present(*pmd))
-                       pte_free_kernel(new);
-               else
-                       pmd_populate_kernel(&init_mm, pmd, new);
-               spin_unlock(&init_mm.page_table_lock);
-       }
-       return pte_offset_kernel(pmd, address);
+       pte_t *new = pte_alloc_one_kernel(&init_mm, address);
+       if (!new)
+               return -ENOMEM;
+
+       spin_lock(&init_mm.page_table_lock);
+       if (pmd_present(*pmd))          /* Another has populated it */
+               pte_free_kernel(new);
+       else
+               pmd_populate_kernel(&init_mm, pmd, new);
+       spin_unlock(&init_mm.page_table_lock);
+       return 0;
 }
 
 static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
@@ -2093,7 +2082,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  * Allocate page upper directory.
  * We've already handled the fast-path in-line.
  */
-pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 {
        pud_t *new;
 
@@ -2103,19 +2092,17 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
        if (!new) {
                if (mm != &init_mm)     /* Temporary bridging hack */
                        spin_lock(&mm->page_table_lock);
-               return NULL;
+               return -ENOMEM;
        }
 
        spin_lock(&mm->page_table_lock);
-       if (pgd_present(*pgd)) {
+       if (pgd_present(*pgd))          /* Another has populated it */
                pud_free(new);
-               goto out;
-       }
-       pgd_populate(mm, pgd, new);
- out:
+       else
+               pgd_populate(mm, pgd, new);
        if (mm == &init_mm)             /* Temporary bridging hack */
                spin_unlock(&mm->page_table_lock);
-       return pud_offset(pgd, address);
+       return 0;
 }
 #endif /* __PAGETABLE_PUD_FOLDED */
 
@@ -2124,7 +2111,7 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
  * Allocate page middle directory.
  * We've already handled the fast-path in-line.
  */
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
        pmd_t *new;
 
@@ -2134,28 +2121,24 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr
        if (!new) {
                if (mm != &init_mm)     /* Temporary bridging hack */
                        spin_lock(&mm->page_table_lock);
-               return NULL;
+               return -ENOMEM;
        }
 
        spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
-       if (pud_present(*pud)) {
+       if (pud_present(*pud))          /* Another has populated it */
                pmd_free(new);
-               goto out;
-       }
-       pud_populate(mm, pud, new);
+       else
+               pud_populate(mm, pud, new);
 #else
-       if (pgd_present(*pud)) {
+       if (pgd_present(*pud))          /* Another has populated it */
                pmd_free(new);
-               goto out;
-       }
-       pgd_populate(mm, pud, new);
+       else
+               pgd_populate(mm, pud, new);
 #endif /* __ARCH_HAS_4LEVEL_HACK */
-
- out:
        if (mm == &init_mm)             /* Temporary bridging hack */
                spin_unlock(&mm->page_table_lock);
-       return pmd_offset(pud, address);
+       return 0;
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
 
index ccf4564..616facc 100644 (file)
@@ -51,7 +51,6 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd = NULL;
-       pte_t *pte;
 
        /*
         * We do need page_table_lock: because allocators expect that.
@@ -66,12 +65,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
        if (!pmd)
                goto out;
 
-       pte = pte_alloc_map(mm, pmd, addr);
-       if (!pte) {
+       if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
                pmd = NULL;
-               goto out;
-       }
-       pte_unmap(pte);
 out:
        spin_unlock(&mm->page_table_lock);
        return pmd;