blob: cf4be70d589259df60bfa2198da9f8c7c0a543c7 [file] [log] [blame]
Vineet Gupta5dda4dc2013-01-18 15:12:19 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
10 * They are semantically the same although in different contexts
11 * VALID marks a TLB entry exists and it will only happen if PRESENT
12 * - Utilise some unused free bits to confine PTE flags to 12 bits
13 * This is a must for 4k pg-sz
14 *
Adam Buchbinder7423cc02016-02-23 15:24:55 -080015 * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053016 * -TLB Locking never really existed, except for initial specs
17 * -SILENT_xxx not needed for our port
18 * -Per my request, MMU V3 changes the layout of some of the bits
19 * to avoid a few shifts in TLB Miss handlers.
20 *
21 * vineetg: April 2010
22 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
23 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
24 *
25 * vineetg: April 2010
26 * -Switched form 8:11:13 split for page table lookup to 11:8:13
27 * -this speeds up page table allocation itself as we now have to memset 1K
28 * instead of 8k per page table.
29 * -TODO: Right now page table alloc is 8K and rest 7K is unused
30 * need to optimise it
31 *
32 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
33 */
34
35#ifndef _ASM_ARC_PGTABLE_H
36#define _ASM_ARC_PGTABLE_H
37
Vineet Gupta868a6532017-05-05 11:59:41 -070038#include <linux/const.h>
Kirill A. Shutemov9849a562017-03-09 17:24:05 +030039#define __ARCH_USE_5LEVEL_HACK
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053040#include <asm-generic/pgtable-nopmd.h>
Vineet Gupta868a6532017-05-05 11:59:41 -070041#include <asm/page.h>
42#include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053043
44/**************************************************************************
45 * Page Table Flags
46 *
47 * ARC700 MMU only deals with softare managed TLB entries.
48 * Page Tables are purely for Linux VM's consumption and the bits below are
49 * suited to that (uniqueness). Hence some are not implemented in the TLB and
50 * some have different value in TLB.
Andrea Gelmini25474762016-05-21 13:45:35 +020051 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053052 * seperate PD0 and PD1, which combined forms a translation entry)
53 * while for PTE perspective, they are 8 and 9 respectively
54 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
55 * (saves some bit shift ops in TLB Miss hdlrs)
56 */
57
58#if (CONFIG_ARC_MMU_VER <= 2)
59
60#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
61#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
Vineet Gupta64b703e2013-06-17 18:12:13 +053062#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
63#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
64#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
Vineet Gupta129cbed2013-12-05 12:05:05 +053065#define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
Vineet Gupta24830fc2015-02-16 19:01:29 +053066#define _PAGE_SPECIAL (1<<7)
Vineet Guptad091fcb2013-06-17 19:44:06 +053067#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
68#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053069
Vineet Gupta64b703e2013-06-17 18:12:13 +053070#else /* MMU v3 onwards */
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053071
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053072#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
Vineet Gupta64b703e2013-06-17 18:12:13 +053073#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
74#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
75#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
Vineet Guptad091fcb2013-06-17 19:44:06 +053076#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
Vineet Gupta129cbed2013-12-05 12:05:05 +053077#define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
Vineet Gupta24830fc2015-02-16 19:01:29 +053078#define _PAGE_SPECIAL (1<<6)
Vineet Guptad7a512b2015-04-06 17:22:39 +053079
80#if (CONFIG_ARC_MMU_VER >= 4)
81#define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
82#endif
83
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053084#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
85#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
Vineet Guptad7a512b2015-04-06 17:22:39 +053086
87#if (CONFIG_ARC_MMU_VER >= 4)
Vineet Guptafe6c1b82014-07-08 18:43:47 +053088#define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
Vineet Guptad7a512b2015-04-06 17:22:39 +053089#endif
90
Vineet Guptad091fcb2013-06-17 19:44:06 +053091#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053092 usable for shared TLB entries (H) */
Vineet Guptafe6c1b82014-07-08 18:43:47 +053093
94#define _PAGE_UNUSED_BIT (1<<12)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +053095#endif
96
Vineet Gupta64b703e2013-06-17 18:12:13 +053097/* vmalloc permissions */
98#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
Vineet Guptaa9505492013-05-21 15:25:11 +053099 _PAGE_GLOBAL | _PAGE_PRESENT)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530100
Vineet Gupta129cbed2013-12-05 12:05:05 +0530101#ifndef CONFIG_ARC_CACHE_PAGES
102#undef _PAGE_CACHEABLE
103#define _PAGE_CACHEABLE 0
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530104#endif
105
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530106#ifndef _PAGE_HW_SZ
107#define _PAGE_HW_SZ 0
108#endif
109
Vineet Gupta129cbed2013-12-05 12:05:05 +0530110/* Defaults for every user page */
111#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
Vineet Guptaa9505492013-05-21 15:25:11 +0530112
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530113/* Set of bits not changed in pte_modify */
Vineet Gupta3925a162016-07-28 11:35:50 -0700114#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530115
116/* More Abbrevaited helpers */
117#define PAGE_U_NONE __pgprot(___DEF)
118#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
119#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
120#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
121#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
122 _PAGE_EXECUTE)
123
124#define PAGE_SHARED PAGE_U_W_R
125
Vineet Gupta64b703e2013-06-17 18:12:13 +0530126/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
127 * user vaddr space - visible in all addr spaces, but kernel mode only
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530128 * Thus Global, all-kernel-access, no-user-access, cached
129 */
Vineet Gupta129cbed2013-12-05 12:05:05 +0530130#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530131
132/* ioremap */
Vineet Guptaa9505492013-05-21 15:25:11 +0530133#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530134
Vineet Guptada1677b2013-05-14 13:28:17 +0530135/* Masks for actual TLB "PD"s */
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530136#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
Vineet Gupta64b703e2013-06-17 18:12:13 +0530137#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
Vineet Gupta5a364c22015-02-06 18:44:57 +0300138
139#ifdef CONFIG_ARC_HAS_PAE40
140#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
141#else
Vineet Gupta64b703e2013-06-17 18:12:13 +0530142#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
Vineet Gupta5a364c22015-02-06 18:44:57 +0300143#endif
Vineet Guptada1677b2013-05-14 13:28:17 +0530144
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530145/**************************************************************************
146 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
147 *
148 * Certain cases have 1:1 mapping
149 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
150 * which directly corresponds to PAGE_U_X_R
151 *
152 * Other rules which cause the divergence from 1:1 mapping
153 *
154 * 1. Although ARC700 can do exclusive execute/write protection (meaning R
155 * can be tracked independet of X/W unlike some other CPUs), still to
156 * keep things consistent with other archs:
157 * -Write implies Read: W => R
158 * -Execute implies Read: X => R
159 *
160 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
161 * This is to enable COW mechanism
162 */
163 /* xwr */
164#define __P000 PAGE_U_NONE
165#define __P001 PAGE_U_R
166#define __P010 PAGE_U_R /* Pvt-W => !W */
167#define __P011 PAGE_U_R /* Pvt-W => !W */
168#define __P100 PAGE_U_X_R /* X => R */
169#define __P101 PAGE_U_X_R
170#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
171#define __P111 PAGE_U_X_R /* Pvt-W => !W */
172
173#define __S000 PAGE_U_NONE
174#define __S001 PAGE_U_R
175#define __S010 PAGE_U_W_R /* W => R */
176#define __S011 PAGE_U_W_R
177#define __S100 PAGE_U_X_R /* X => R */
178#define __S101 PAGE_U_X_R
179#define __S110 PAGE_U_X_W_R /* X => R */
180#define __S111 PAGE_U_X_W_R
181
182/****************************************************************
Vineet Gupta37eda9d2016-02-10 06:52:07 +0530183 * 2 tier (PGD:PTE) software page walker
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530184 *
Vineet Gupta37eda9d2016-02-10 06:52:07 +0530185 * [31] 32 bit virtual address [0]
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530186 * -------------------------------------------------------
Vineet Gupta37eda9d2016-02-10 06:52:07 +0530187 * | | <------------ PGDIR_SHIFT ----------> |
188 * | | |
189 * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> |
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530190 * -------------------------------------------------------
191 * | | |
192 * | | --> off in page frame
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530193 * | ---> index into Page Table
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530194 * ----> index into Page Directory
Vineet Gupta37eda9d2016-02-10 06:52:07 +0530195 *
196 * In a single page size configuration, only PAGE_SHIFT is fixed
197 * So both PGD and PTE sizing can be tweaked
198 * e.g. 8K page (PAGE_SHIFT 13) can have
199 * - PGDIR_SHIFT 21 -> 11:8:13 address split
200 * - PGDIR_SHIFT 24 -> 8:11:13 address split
201 *
202 * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
203 * so the sizing flexibility is gone.
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530204 */
205
Vineet Gupta37eda9d2016-02-10 06:52:07 +0530206#if defined(CONFIG_ARC_HUGEPAGE_16M)
207#define PGDIR_SHIFT 24
208#elif defined(CONFIG_ARC_HUGEPAGE_2M)
209#define PGDIR_SHIFT 21
210#else
211/*
212 * Only Normal page support so "hackable" (see comment above)
213 * Default value provides 11:8:13 (8K), 11:9:12 (4K)
214 */
215#define PGDIR_SHIFT 21
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530216#endif
217
Vineet Gupta37eda9d2016-02-10 06:52:07 +0530218#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
219#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530220
Noam Camus15ca68a2014-09-07 22:52:33 +0300221#define PGDIR_SIZE _BITUL(PGDIR_SHIFT) /* vaddr span, not PDG sz */
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530222#define PGDIR_MASK (~(PGDIR_SIZE-1))
223
Alexey Brodkind4084642015-09-02 20:43:30 +0300224#define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
225#define PTRS_PER_PGD _BITUL(BITS_FOR_PGD)
226
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530227/*
228 * Number of entries a user land program use.
229 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
230 */
231#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
232
233/*
234 * No special requirements for lowest virtual address we permit any user space
235 * mapping to be mapped at.
236 */
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -0800237#define FIRST_USER_ADDRESS 0UL
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530238
239
240/****************************************************************
241 * Bucket load of VM Helpers
242 */
243
244#ifndef __ASSEMBLY__
245
246#define pte_ERROR(e) \
247 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
248#define pgd_ERROR(e) \
249 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
250
251/* the zero page used for uninitialized and anonymous pages */
252extern char empty_zero_page[PAGE_SIZE];
253#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
254
255#define pte_unmap(pte) do { } while (0)
256#define pte_unmap_nested(pte) do { } while (0)
257
258#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
259#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
260
261/* find the page descriptor of the Page Tbl ref by PMD entry */
262#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
263
264/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
265#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
266
267/* In a 2 level sys, setup the PGD entry with PTE value */
268static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
269{
270 pmd_val(*pmdp) = (unsigned long)ptep;
271}
272
273#define pte_none(x) (!pte_val(x))
274#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
275#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
276
277#define pmd_none(x) (!pmd_val(x))
278#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
279#define pmd_present(x) (pmd_val(x))
280#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
281
Vineet Gupta2519d752016-05-05 14:53:48 +0530282#define pte_page(pte) pfn_to_page(pte_pfn(pte))
Vineet Gupta336e2132015-03-05 17:06:31 +0530283#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
Yuriy Kolerov6a8b2ca72016-11-28 07:07:17 +0300284#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
Vineet Gupta2519d752016-05-05 14:53:48 +0530285
286/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
287#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
288#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530289
290/*
291 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
292 * and returns ptr to PTE entry corresponding to @addr
293 */
294#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
295 __pte_index(addr))
296
297/* No mapping of Page Tables in high mem etc, so following same as above */
298#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
299#define pte_offset_map(dir, addr) pte_offset(dir, addr)
300
301/* Zoo of pte_xxx function */
302#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
303#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
Vineet Gupta129cbed2013-12-05 12:05:05 +0530304#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530305#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
Vineet Gupta24830fc2015-02-16 19:01:29 +0530306#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530307
308#define PTE_BIT_FUNC(fn, op) \
309 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
310
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530311PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530312PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
313PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
Vineet Gupta129cbed2013-12-05 12:05:05 +0530314PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
315PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530316PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
317PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
318PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
319PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
Vineet Gupta24830fc2015-02-16 19:01:29 +0530320PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530321PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530322
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530323static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
324{
325 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
326}
327
328/* Macro to mark a page protection as uncacheable */
329#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
330
331static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
332 pte_t *ptep, pte_t pteval)
333{
334 set_pte(ptep, pteval);
335}
336
337/*
338 * All kernel related VM pages are in init's mm.
339 */
340#define pgd_offset_k(address) pgd_offset(&init_mm, address)
341#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
342#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
343
344/*
345 * Macro to quickly access the PGD entry, utlising the fact that some
346 * arch may cache the pointer to Page Directory of "current" task
347 * in a MMU register
348 *
349 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
350 * becomes read a register
351 *
352 * ********CAUTION*******:
353 * Kernel code might be dealing with some mm_struct of NON "current"
354 * Thus use this macro only when you are certain that "current" is current
355 * e.g. when dealing with signal frame setup code etc
356 */
Vineet Gupta41195d22013-01-18 15:12:23 +0530357#ifndef CONFIG_SMP
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530358#define pgd_offset_fast(mm, addr) \
359({ \
360 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
361 pgd_base + pgd_index(addr); \
362})
Vineet Gupta41195d22013-01-18 15:12:23 +0530363#else
364#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
365#endif
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530366
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530367extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
368void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
369 pte_t *ptep);
370
371/* Encode swap {type,off} tuple into PTE
372 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
Kirill A. Shutemov18747152015-02-10 14:10:12 -0800373 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530374 */
375#define __swp_entry(type, off) ((swp_entry_t) { \
376 ((type) & 0x1f) | ((off) << 13) })
377
378/* Decode a PTE containing swap "identifier "into constituents */
379#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
Alexey Brodkin6e376112018-06-28 16:59:14 -0700380#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530381
382/* NOPs, to keep generic kernel happy */
383#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
384#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
385
386#define kern_addr_valid(addr) (1)
387
388/*
389 * remap a physical page `pfn' of size `size' with page protection `prot'
390 * into virtual address `from'
391 */
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530392#ifdef CONFIG_TRANSPARENT_HUGEPAGE
393#include <asm/hugepage.h>
394#endif
395
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530396#include <asm-generic/pgtable.h>
397
Vineet Gupta5bba49f2013-05-09 19:20:43 +0530398/* to cope with aliasing VIPT cache */
399#define HAVE_ARCH_UNMAPPED_AREA
400
Vineet Gupta5dda4dc2013-01-18 15:12:19 +0530401/*
402 * No page table caches to initialise
403 */
404#define pgtable_cache_init() do { } while (0)
405
406#endif /* __ASSEMBLY__ */
407
408#endif