blob: dd744aa450fae4440584e76d908573e6c5458e38 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Chris Zankel9a8fd552005-06-23 22:01:26 -07002/*
Chris Zankel66569202007-08-22 10:14:51 -07003 * include/asm-xtensa/pgalloc.h
Chris Zankel9a8fd552005-06-23 22:01:26 -07004 *
Chris Zankel66569202007-08-22 10:14:51 -07005 * Copyright (C) 2001-2007 Tensilica Inc.
Chris Zankel9a8fd552005-06-23 22:01:26 -07006 */
7
8#ifndef _XTENSA_PGALLOC_H
9#define _XTENSA_PGALLOC_H
10
Chris Zankel9a8fd552005-06-23 22:01:26 -070011#include <linux/highmem.h>
Chris Zankel4573e392010-05-02 01:05:13 -070012#include <linux/slab.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070013
14/*
15 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
16 * inside the pgd, so has no extra memory associated with it.
17 */
18
Chris Zankel66569202007-08-22 10:14:51 -070019#define pmd_populate_kernel(mm, pmdp, ptep) \
20 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
21#define pmd_populate(mm, pmdp, page) \
22 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080023#define pmd_pgtable(pmd) pmd_page(pmd)
Chris Zankel9a8fd552005-06-23 22:01:26 -070024
25static inline pgd_t*
26pgd_alloc(struct mm_struct *mm)
27{
Chris Zankel66569202007-08-22 10:14:51 -070028 return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
Chris Zankel9a8fd552005-06-23 22:01:26 -070029}
30
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080031static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Chris Zankel66569202007-08-22 10:14:51 -070032{
33 free_page((unsigned long)pgd);
34}
Chris Zankel9a8fd552005-06-23 22:01:26 -070035
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -080036static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
Chris Zankel66569202007-08-22 10:14:51 -070037{
Kirill A. Shutemovf820e282013-11-14 14:31:50 -080038 pte_t *ptep;
39 int i;
40
Michal Hocko32d6bd92016-06-24 14:48:47 -070041 ptep = (pte_t *)__get_free_page(GFP_KERNEL);
Kirill A. Shutemovf820e282013-11-14 14:31:50 -080042 if (!ptep)
43 return NULL;
44 for (i = 0; i < 1024; i++)
45 pte_clear(NULL, 0, ptep + i);
46 return ptep;
Chris Zankel66569202007-08-22 10:14:51 -070047}
48
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -080049static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
Chris Zankel66569202007-08-22 10:14:51 -070050{
Kirill A. Shutemovf8c6d302013-11-14 14:31:19 -080051 pte_t *pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080052 struct page *page;
53
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -080054 pte = pte_alloc_one_kernel(mm);
Kirill A. Shutemovf8c6d302013-11-14 14:31:19 -080055 if (!pte)
56 return NULL;
57 page = virt_to_page(pte);
Kirill A. Shutemov8f431232013-11-14 14:31:48 -080058 if (!pgtable_page_ctor(page)) {
Kirill A. Shutemovf820e282013-11-14 14:31:50 -080059 __free_page(page);
Kirill A. Shutemov8f431232013-11-14 14:31:48 -080060 return NULL;
61 }
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080062 return page;
Chris Zankel66569202007-08-22 10:14:51 -070063}
64
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080065static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
Chris Zankel66569202007-08-22 10:14:51 -070066{
Kirill A. Shutemovf820e282013-11-14 14:31:50 -080067 free_page((unsigned long)pte);
Chris Zankel66569202007-08-22 10:14:51 -070068}
69
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080070static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
Chris Zankel66569202007-08-22 10:14:51 -070071{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080072 pgtable_page_dtor(pte);
Kirill A. Shutemovf820e282013-11-14 14:31:50 -080073 __free_page(pte);
Chris Zankel66569202007-08-22 10:14:51 -070074}
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080075#define pmd_pgtable(pmd) pmd_page(pmd)
Chris Zankel9a8fd552005-06-23 22:01:26 -070076
Chris Zankel9a8fd552005-06-23 22:01:26 -070077#endif /* _XTENSA_PGALLOC_H */