blob: b0bb76a6864fce97a979335b1bba0e628eacb155 [file] [log] [blame]
Paul Mundt26ff6c12006-09-27 15:13:36 +09001/*
2 * This file contains the functions and defines necessary to modify and
3 * use the SuperH page table tree.
4 *
5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2002 - 2005 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file "COPYING" in the main directory of this
10 * archive for more details.
11 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#ifndef __ASM_SH_PGTABLE_H
13#define __ASM_SH_PGTABLE_H
14
Paul Mundt26ff6c12006-09-27 15:13:36 +090015#include <asm-generic/pgtable-nopmd.h>
16#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#ifndef __ASSEMBLY__
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/addrspace.h>
20#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * ZERO_PAGE is a global shared page that is always zero: used
24 * for zero-mapped memory areas etc..
25 */
Paul Mundt26ff6c12006-09-27 15:13:36 +090026extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
28
29#endif /* !__ASSEMBLY__ */
30
Paul Mundt21440cf2006-11-20 14:30:26 +090031/*
Paul Mundt36bcd392007-11-10 19:16:55 +090032 * Effective and physical address definitions, to aid with sign
33 * extension.
34 */
35#define NEFF 32
36#define NEFF_SIGN (1LL << (NEFF - 1))
37#define NEFF_MASK (-1LL << NEFF)
38
39#ifdef CONFIG_29BIT
40#define NPHYS 29
41#else
42#define NPHYS 32
43#endif
44
45#define NPHYS_SIGN (1LL << (NPHYS - 1))
46#define NPHYS_MASK (-1LL << NPHYS)
47
48/*
Paul Mundt21440cf2006-11-20 14:30:26 +090049 * traditional two-level paging structure
50 */
51/* PTE bits */
52#ifdef CONFIG_X2TLB
53# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */
54#else
55# define PTE_MAGNITUDE 2 /* 32-bit PTEs */
56#endif
57#define PTE_SHIFT PAGE_SHIFT
58#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE)
59
60/* PGD bits */
61#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
Paul Mundtdb2e1fa2007-02-14 14:13:10 +090062#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#define PGDIR_MASK (~(PGDIR_SIZE-1))
64
Paul Mundt21440cf2006-11-20 14:30:26 +090065/* Entries per level */
Paul Mundt7a847f82006-12-26 15:29:19 +090066#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
Paul Mundtd04a0f72007-09-21 11:55:03 +090067#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
Paul Mundt21440cf2006-11-20 14:30:26 +090068
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
Hugh Dickinsd455a362005-04-19 13:29:23 -070070#define FIRST_USER_ADDRESS 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Paul Mundt7a847f82006-12-26 15:29:19 +090072#define PTE_PHYS_MASK (0x20000000 - PAGE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Paul Mundt0468b4b2007-11-10 20:39:06 +090074#ifdef CONFIG_SUPERH32
Paul Mundtf0b859e2007-07-25 10:43:47 +090075#define VMALLOC_START (P3SEG)
Paul Mundt0468b4b2007-11-10 20:39:06 +090076#else
77#define VMALLOC_START (0xf0000000)
78#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
80
Paul Mundtef48e8e2006-09-27 16:17:17 +090081/*
82 * Linux PTEL encoding.
83 *
Paul Mundt21440cf2006-11-20 14:30:26 +090084 * Hardware and software bit definitions for the PTEL value (see below for
85 * notes on SH-X2 MMUs and 64-bit PTEs):
Paul Mundtef48e8e2006-09-27 16:17:17 +090086 *
87 * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
88 *
89 * - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the
90 * hardware PTEL value can't have the SH-bit set when MMUCR.IX is set,
91 * which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT).
92 *
93 * In order to keep this relatively clean, do not use these for defining
94 * SH-3 specific flags until all of the other unused bits have been
95 * exhausted.
96 *
97 * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE.
98 *
99 * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages.
100 * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused.
101 *
102 * - Bits 31, 30, and 29 remain unused by everyone and can be used for future
103 * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS.
Paul Mundt21440cf2006-11-20 14:30:26 +0900104 *
105 * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
106 *
107 * SH-X2 MMUs and extended PTEs
108 *
109 * SH-X2 supports an extended mode TLB with split data arrays due to the
110 * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
111 * SZ bit placeholders still exist in data array 1, but are implemented as
112 * reserved bits, with the real logic existing in data array 2.
113 *
114 * The downside to this is that we can no longer fit everything in to a 32-bit
115 * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
116 * side, this gives us quite a few spare bits to play with for future usage.
Paul Mundtef48e8e2006-09-27 16:17:17 +0900117 */
Paul Mundt21440cf2006-11-20 14:30:26 +0900118/* Legacy and compat mode bits */
Paul Mundtef48e8e2006-09-27 16:17:17 +0900119#define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */
120#define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */
121#define _PAGE_DIRTY 0x004 /* D-bit : page changed */
122#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900123#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
124#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
125#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
126#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
Paul Mundtef48e8e2006-09-27 16:17:17 +0900127#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
128#define _PAGE_PROTNONE 0x200 /* software: if not present */
129#define _PAGE_ACCESSED 0x400 /* software: page referenced */
130#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Paul Mundtd04a0f72007-09-21 11:55:03 +0900132#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
133#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER)
134
Paul Mundt21440cf2006-11-20 14:30:26 +0900135/* Extended mode bits */
136#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */
137#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */
138#define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */
139#define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */
140
141#define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */
142#define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */
143#define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */
144
145#define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */
146#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */
147#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
148
149/* Wrapper for extended mode pgprot twiddling */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900150#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
Paul Mundt21440cf2006-11-20 14:30:26 +0900151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* software: moves to PTEA.TC (Timing Control) */
153#define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */
154#define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */
155
156/* software: moves to PTEA.SA[2:0] (Space Attributes) */
157#define _PAGE_PCC_IODYN 0x00000001 /* IO space, dynamically sized bus */
158#define _PAGE_PCC_IO8 0x20000000 /* IO space, 8 bit bus */
159#define _PAGE_PCC_IO16 0x20000001 /* IO space, 16 bit bus */
160#define _PAGE_PCC_COM8 0x40000000 /* Common Memory space, 8 bit bus */
161#define _PAGE_PCC_COM16 0x40000001 /* Common Memory space, 16 bit bus */
162#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
163#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
164
Paul Mundtef48e8e2006-09-27 16:17:17 +0900165/* Mask which drops unused bits from the PTEL value */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900166#if defined(CONFIG_CPU_SH3)
Paul Mundtef48e8e2006-09-27 16:17:17 +0900167#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
168 _PAGE_FILE | _PAGE_SZ1 | \
169 _PAGE_HW_SHARED)
Paul Mundtd04a0f72007-09-21 11:55:03 +0900170#elif defined(CONFIG_X2TLB)
171/* Get rid of the legacy PR/SZ bits when using extended mode */
172#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
173 _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174#else
Paul Mundtef48e8e2006-09-27 16:17:17 +0900175#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176#endif
177
Paul Mundtef48e8e2006-09-27 16:17:17 +0900178#define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS))
179
Paul Mundt21440cf2006-11-20 14:30:26 +0900180/* Hardware flags, page size encoding */
181#if defined(CONFIG_X2TLB)
182# if defined(CONFIG_PAGE_SIZE_4KB)
183# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0)
184# elif defined(CONFIG_PAGE_SIZE_8KB)
185# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1)
186# elif defined(CONFIG_PAGE_SIZE_64KB)
187# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2)
188# endif
189#else
190# if defined(CONFIG_PAGE_SIZE_4KB)
191# define _PAGE_FLAGS_HARD _PAGE_SZ0
192# elif defined(CONFIG_PAGE_SIZE_64KB)
193# define _PAGE_FLAGS_HARD _PAGE_SZ1
194# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#endif
196
Paul Mundt21440cf2006-11-20 14:30:26 +0900197#if defined(CONFIG_X2TLB)
198# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
199# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2)
200# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
201# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
202# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
203# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
204# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
205# define _PAGE_SZHUGE (_PAGE_EXT_ESZ3)
206# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
207# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
208# endif
209#else
210# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
211# define _PAGE_SZHUGE (_PAGE_SZ1)
212# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
213# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1)
214# endif
215#endif
216
Paul Mundt5b679542006-12-06 11:20:53 +0900217/*
218 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
219 * to make pte_mkhuge() happy.
220 */
221#ifndef _PAGE_SZHUGE
222# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD)
223#endif
224
Paul Mundt21440cf2006-11-20 14:30:26 +0900225#define _PAGE_CHG_MASK \
226 (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Paul Mundt26ff6c12006-09-27 15:13:36 +0900228#ifndef __ASSEMBLY__
229
Paul Mundt21440cf2006-11-20 14:30:26 +0900230#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
Paul Mundt21440cf2006-11-20 14:30:26 +0900231#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
232 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
233
234#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
235 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
Paul Mundtd04a0f72007-09-21 11:55:03 +0900236 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
237 _PAGE_EXT_KERN_WRITE | \
238 _PAGE_EXT_USER_READ | \
Stuart Menefy99a596f2006-11-21 15:38:05 +0900239 _PAGE_EXT_USER_WRITE))
Paul Mundt21440cf2006-11-20 14:30:26 +0900240
241#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
242 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
Paul Mundtd04a0f72007-09-21 11:55:03 +0900243 _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \
244 _PAGE_EXT_KERN_READ | \
245 _PAGE_EXT_USER_EXEC | \
Stuart Menefy99a596f2006-11-21 15:38:05 +0900246 _PAGE_EXT_USER_READ))
Paul Mundt21440cf2006-11-20 14:30:26 +0900247
248#define PAGE_COPY PAGE_EXECREAD
249
250#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
251 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
Paul Mundtd04a0f72007-09-21 11:55:03 +0900252 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
253 _PAGE_EXT_USER_READ))
Paul Mundt21440cf2006-11-20 14:30:26 +0900254
255#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
256 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
Paul Mundtd04a0f72007-09-21 11:55:03 +0900257 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
258 _PAGE_EXT_USER_WRITE))
Paul Mundt21440cf2006-11-20 14:30:26 +0900259
260#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
261 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
Paul Mundtd04a0f72007-09-21 11:55:03 +0900262 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
263 _PAGE_EXT_KERN_READ | \
264 _PAGE_EXT_KERN_EXEC | \
265 _PAGE_EXT_USER_WRITE | \
Stuart Menefy99a596f2006-11-21 15:38:05 +0900266 _PAGE_EXT_USER_READ | \
Paul Mundt21440cf2006-11-20 14:30:26 +0900267 _PAGE_EXT_USER_EXEC))
268
269#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
270 _PAGE_DIRTY | _PAGE_ACCESSED | \
271 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
272 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
Stuart Menefy99a596f2006-11-21 15:38:05 +0900273 _PAGE_EXT_KERN_WRITE | \
Paul Mundt21440cf2006-11-20 14:30:26 +0900274 _PAGE_EXT_KERN_EXEC))
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276#define PAGE_KERNEL_NOCACHE \
Paul Mundt21440cf2006-11-20 14:30:26 +0900277 __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
278 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
279 _PAGE_FLAGS_HARD | \
280 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
Stuart Menefy99a596f2006-11-21 15:38:05 +0900281 _PAGE_EXT_KERN_WRITE | \
Paul Mundt21440cf2006-11-20 14:30:26 +0900282 _PAGE_EXT_KERN_EXEC))
283
284#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
285 _PAGE_DIRTY | _PAGE_ACCESSED | \
286 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
287 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
Stuart Menefy99a596f2006-11-21 15:38:05 +0900288 _PAGE_EXT_KERN_EXEC))
Paul Mundt21440cf2006-11-20 14:30:26 +0900289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290#define PAGE_KERNEL_PCC(slot, type) \
Paul Mundt21440cf2006-11-20 14:30:26 +0900291 __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
292 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
293 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
Stuart Menefy99a596f2006-11-21 15:38:05 +0900294 _PAGE_EXT_KERN_WRITE | \
Paul Mundt21440cf2006-11-20 14:30:26 +0900295 _PAGE_EXT_KERN_EXEC) \
296 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
297 (type))
298
299#elif defined(CONFIG_MMU) /* SH-X TLB */
Paul Mundt21440cf2006-11-20 14:30:26 +0900300#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
301 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
302
303#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
304 _PAGE_CACHABLE | _PAGE_ACCESSED | \
305 _PAGE_FLAGS_HARD)
306
307#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
308 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
309
310#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
311 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
312
313#define PAGE_EXECREAD PAGE_READONLY
314#define PAGE_RWX PAGE_SHARED
315#define PAGE_WRITEONLY PAGE_SHARED
316
317#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
318 _PAGE_DIRTY | _PAGE_ACCESSED | \
319 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
320
321#define PAGE_KERNEL_NOCACHE \
322 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
323 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
324 _PAGE_FLAGS_HARD)
325
326#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
327 _PAGE_DIRTY | _PAGE_ACCESSED | \
328 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
329
330#define PAGE_KERNEL_PCC(slot, type) \
331 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
332 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
333 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
334 (type))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335#else /* no mmu */
336#define PAGE_NONE __pgprot(0)
337#define PAGE_SHARED __pgprot(0)
338#define PAGE_COPY __pgprot(0)
Paul Mundt21440cf2006-11-20 14:30:26 +0900339#define PAGE_EXECREAD __pgprot(0)
340#define PAGE_RWX __pgprot(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341#define PAGE_READONLY __pgprot(0)
Paul Mundt21440cf2006-11-20 14:30:26 +0900342#define PAGE_WRITEONLY __pgprot(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343#define PAGE_KERNEL __pgprot(0)
344#define PAGE_KERNEL_NOCACHE __pgprot(0)
345#define PAGE_KERNEL_RO __pgprot(0)
Paul Mundtac115842007-11-07 11:40:24 +0900346
347#define PAGE_KERNEL_PCC(slot, type) \
348 __pgprot(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349#endif
350
Paul Mundt26ff6c12006-09-27 15:13:36 +0900351#endif /* __ASSEMBLY__ */
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353/*
Paul Mundt21440cf2006-11-20 14:30:26 +0900354 * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
355 * protection for execute, and considers it the same as a read. Also, write
356 * permission implies read permission. This is the closest we can get..
357 *
358 * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
359 * not only supporting separate execute, read, and write bits, but having
360 * completely separate permission bits for user and kernel space.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 */
Paul Mundt21440cf2006-11-20 14:30:26 +0900362 /*xwr*/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363#define __P000 PAGE_NONE
364#define __P001 PAGE_READONLY
365#define __P010 PAGE_COPY
366#define __P011 PAGE_COPY
Paul Mundt21440cf2006-11-20 14:30:26 +0900367#define __P100 PAGE_EXECREAD
368#define __P101 PAGE_EXECREAD
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369#define __P110 PAGE_COPY
370#define __P111 PAGE_COPY
371
372#define __S000 PAGE_NONE
373#define __S001 PAGE_READONLY
Paul Mundt21440cf2006-11-20 14:30:26 +0900374#define __S010 PAGE_WRITEONLY
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375#define __S011 PAGE_SHARED
Paul Mundt21440cf2006-11-20 14:30:26 +0900376#define __S100 PAGE_EXECREAD
377#define __S101 PAGE_EXECREAD
378#define __S110 PAGE_RWX
379#define __S111 PAGE_RWX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Paul Mundt26ff6c12006-09-27 15:13:36 +0900381#ifndef __ASSEMBLY__
382
383/*
384 * Certain architectures need to do special things when PTEs
385 * within a page table are directly modified. Thus, the following
386 * hook is made available.
387 */
Paul Mundt21440cf2006-11-20 14:30:26 +0900388#ifdef CONFIG_X2TLB
389static inline void set_pte(pte_t *ptep, pte_t pte)
390{
391 ptep->pte_high = pte.pte_high;
392 smp_wmb();
393 ptep->pte_low = pte.pte_low;
394}
395#else
Paul Mundt26ff6c12006-09-27 15:13:36 +0900396#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
Paul Mundt21440cf2006-11-20 14:30:26 +0900397#endif
398
Paul Mundt26ff6c12006-09-27 15:13:36 +0900399#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
400
401/*
402 * (pmds are folded into pgds so this doesn't get actually called,
403 * but the define is needed for a generic inline function.)
404 */
405#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
406
Paul Mundt21440cf2006-11-20 14:30:26 +0900407#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
Paul Mundt26ff6c12006-09-27 15:13:36 +0900408
Paul Mundtd04a0f72007-09-21 11:55:03 +0900409#define pfn_pte(pfn, prot) \
410 __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
411#define pfn_pmd(pfn, prot) \
412 __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
413
414#define pte_none(x) (!pte_val(x))
415#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
416
Paul Mundt21440cf2006-11-20 14:30:26 +0900417#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419#define pmd_none(x) (!pmd_val(x))
Stuart Menefy99a596f2006-11-21 15:38:05 +0900420#define pmd_present(x) (pmd_val(x))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
Stuart Menefy99a596f2006-11-21 15:38:05 +0900422#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
Paul Mundtafca0352007-10-15 11:01:33 +0900425#define pte_page(x) pfn_to_page(pte_pfn(x))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427/*
428 * The following only work if pte_present() is true.
429 * Undefined behaviour if not..
430 */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900431#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
432#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
433#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
434#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Paul Mundt21440cf2006-11-20 14:30:26 +0900436#ifdef CONFIG_X2TLB
Paul Mundt21440cf2006-11-20 14:30:26 +0900437#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
438#else
Paul Mundtd04a0f72007-09-21 11:55:03 +0900439#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
Paul Mundtd2294012005-11-07 00:58:23 -0800440#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Paul Mundt21440cf2006-11-20 14:30:26 +0900442#define PTE_BIT_FUNC(h,fn,op) \
443static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
444
445#ifdef CONFIG_X2TLB
446/*
447 * We cheat a bit in the SH-X2 TLB case. As the permission bits are
448 * individually toggled (and user permissions are entirely decoupled from
449 * kernel permissions), we attempt to couple them a bit more sanely here.
450 */
Paul Mundt21440cf2006-11-20 14:30:26 +0900451PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
452PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
Paul Mundt21440cf2006-11-20 14:30:26 +0900453PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
454#else
Paul Mundt21440cf2006-11-20 14:30:26 +0900455PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
456PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
Paul Mundt21440cf2006-11-20 14:30:26 +0900457PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
458#endif
459
460PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
461PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
462PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
463PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465/*
466 * Macro and implementation to make a page protection as uncachable.
467 */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900468#define pgprot_writecombine(prot) \
469 __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Paul Mundtd04a0f72007-09-21 11:55:03 +0900471#define pgprot_noncached pgprot_writecombine
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473/*
474 * Conversion functions: convert a page and protection to a page entry,
475 * and a page entry and page directory to the page they refer to.
476 *
477 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
478 */
479#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
480
481static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Paul Mundt21440cf2006-11-20 14:30:26 +0900482{
Paul Mundtd04a0f72007-09-21 11:55:03 +0900483 pte.pte_low &= _PAGE_CHG_MASK;
484 pte.pte_low |= pgprot_val(newprot);
485
486#ifdef CONFIG_X2TLB
487 pte.pte_high |= pgprot_val(newprot) >> 32;
488#endif
489
Paul Mundt21440cf2006-11-20 14:30:26 +0900490 return pte;
491}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Paul Mundtd04a0f72007-09-21 11:55:03 +0900493#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
Stuart Menefy99a596f2006-11-21 15:38:05 +0900494#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
496/* to find an entry in a page-table-directory. */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900497#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
498#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500/* to find an entry in a kernel page-table-directory */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900501#define pgd_offset_k(address) pgd_offset(&init_mm, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
503/* Find an entry in the third-level page table.. */
Paul Mundtd04a0f72007-09-21 11:55:03 +0900504#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505#define pte_offset_kernel(dir, address) \
Dave McCracken46a82b22006-09-25 23:31:48 -0700506 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
Paul Mundtd04a0f72007-09-21 11:55:03 +0900507#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
508#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
509
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510#define pte_unmap(pte) do { } while (0)
511#define pte_unmap_nested(pte) do { } while (0)
512
Paul Mundt21440cf2006-11-20 14:30:26 +0900513#ifdef CONFIG_X2TLB
514#define pte_ERROR(e) \
515 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
516 &(e), (e).pte_high, (e).pte_low)
Paul Mundtd04a0f72007-09-21 11:55:03 +0900517#define pgd_ERROR(e) \
518 printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
Paul Mundt21440cf2006-11-20 14:30:26 +0900519#else
Paul Mundt26ff6c12006-09-27 15:13:36 +0900520#define pte_ERROR(e) \
521 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
522#define pgd_ERROR(e) \
523 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
Paul Mundtd04a0f72007-09-21 11:55:03 +0900524#endif
Paul Mundt26ff6c12006-09-27 15:13:36 +0900525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526struct vm_area_struct;
527extern void update_mmu_cache(struct vm_area_struct * vma,
528 unsigned long address, pte_t pte);
529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530/*
Paul Mundtb9b382d2006-12-07 12:43:06 +0900531 * Encode and de-code a swap entry
532 *
533 * Constraints:
534 * _PAGE_FILE at bit 0
535 * _PAGE_PRESENT at bit 8
536 * _PAGE_PROTNONE at bit 9
537 *
538 * For the normal case, we encode the swap type into bits 0:7 and the
539 * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
540 * preserved bits in the low 32-bits and use the upper 32 as the swap
541 * offset (along with a 5-bit type), following the same approach as x86
542 * PAE. This keeps the logic quite simple, and allows for a full 32
543 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
544 * in the pte_low case.
545 *
546 * As is evident by the Alpha code, if we ever get a 64-bit unsigned
547 * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
548 * much cleaner..
549 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
551 * and _PAGE_PROTNONE bits
552 */
Paul Mundtb9b382d2006-12-07 12:43:06 +0900553#ifdef CONFIG_X2TLB
554#define __swp_type(x) ((x).val & 0x1f)
555#define __swp_offset(x) ((x).val >> 5)
556#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
557#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
558#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
559
560/*
561 * Encode and decode a nonlinear file mapping entry
562 */
563#define pte_to_pgoff(pte) ((pte).pte_high)
564#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
565
566#define PTE_FILE_MAX_BITS 32
567#else
568#define __swp_type(x) ((x).val & 0xff)
569#define __swp_offset(x) ((x).val >> 10)
Paul Mundtb6250e32006-12-07 17:27:18 +0900570#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
Paul Mundtb9b382d2006-12-07 12:43:06 +0900571
572#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
573#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575/*
576 * Encode and decode a nonlinear file mapping entry
577 */
578#define PTE_FILE_MAX_BITS 29
579#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
580#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
Paul Mundtb9b382d2006-12-07 12:43:06 +0900581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583typedef pte_t *pte_addr_t;
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585#define kern_addr_valid(addr) (1)
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
588 remap_pfn_range(vma, vaddr, pfn, size, prot)
589
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800590struct mm_struct;
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592/*
593 * No page table caches to initialise
594 */
595#define pgtable_cache_init() do { } while (0)
596
597#ifndef CONFIG_MMU
598extern unsigned int kobjsize(const void *objp);
599#endif /* !CONFIG_MMU */
600
Paul Mundte7bd34a2007-07-31 17:07:28 +0900601#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
602 defined(CONFIG_SH7705_CACHE_32KB))
Paul Mundt39e688a2007-03-05 19:46:47 +0900603#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
604extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
605#endif
606
Paul Mundt21440cf2006-11-20 14:30:26 +0900607extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
608extern void paging_init(void);
609
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610#include <asm-generic/pgtable.h>
611
Paul Mundt26ff6c12006-09-27 15:13:36 +0900612#endif /* !__ASSEMBLY__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613#endif /* __ASM_SH_PAGE_H */