blob: d079878c6cbca2382db70404b5b8ea47572761d7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Thomas Garnier0483e1f2016-06-21 17:47:02 -07002/*
3 * This file implements KASLR memory randomization for x86_64. It randomizes
4 * the virtual address space of kernel memory regions (physical memory
5 * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
6 * exploits relying on predictable kernel addresses.
7 *
8 * Entropy is generated using the KASLR early boot functions now shared in
9 * the lib directory (originally written by Kees Cook). Randomization is
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +030010 * done on PGD & P4D/PUD page table levels to increase possible addresses.
11 * The physical memory mapping code was adapted to support P4D/PUD level
12 * virtual addresses. This implementation on the best configuration provides
13 * 30,000 possible virtual addresses in average for each memory region.
14 * An additional low memory page is used to ensure each CPU can start with
15 * a PGD aligned virtual address (for realmode).
Thomas Garnier0483e1f2016-06-21 17:47:02 -070016 *
17 * The order of each memory region is not changed. The feature looks at
18 * the available space for the regions based on different configuration
19 * options and randomizes the base and space between each. The size of the
20 * physical memory mapping is the available physical memory.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/random.h>
26
27#include <asm/pgalloc.h>
28#include <asm/pgtable.h>
29#include <asm/setup.h>
30#include <asm/kaslr.h>
31
32#include "mm_internal.h"
33
34#define TB_SHIFT 40
35
36/*
Thomas Gleixner1dddd252018-01-04 12:32:03 +010037 * Virtual address start and end range for randomization.
Thomas Garnier0483e1f2016-06-21 17:47:02 -070038 *
Thomas Gleixner1dddd252018-01-04 12:32:03 +010039 * The end address could depend on more configuration options to make the
40 * highest amount of space for randomization available, but that's too hard
41 * to keep straight and caused issues already.
Thomas Garnier0483e1f2016-06-21 17:47:02 -070042 */
Thomas Garnier021182e2016-06-21 17:47:03 -070043static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
Thomas Gleixner1dddd252018-01-04 12:32:03 +010044static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
Thomas Garnier021182e2016-06-21 17:47:03 -070045
Thomas Garnier0483e1f2016-06-21 17:47:02 -070046/*
47 * Memory regions randomized by KASLR (except modules that use a separate logic
48 * earlier during boot). The list is ordered based on virtual addresses. This
49 * order is kept after randomization.
50 */
51static __initdata struct kaslr_memory_region {
52 unsigned long *base;
53 unsigned long size_tb;
54} kaslr_regions[] = {
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +030055 { &page_offset_base, 0 },
Thomas Garniera95ae272016-06-21 17:47:04 -070056 { &vmalloc_base, VMALLOC_SIZE_TB },
Thomas Garnier25dfe472016-07-27 08:59:56 -070057 { &vmemmap_base, 1 },
Thomas Garnier0483e1f2016-06-21 17:47:02 -070058};
59
60/* Get size in bytes used by the memory region */
61static inline unsigned long get_padding(struct kaslr_memory_region *region)
62{
63 return (region->size_tb << TB_SHIFT);
64}
65
66/*
67 * Apply no randomization if KASLR was disabled at boot or if KASAN
68 * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
69 */
70static inline bool kaslr_memory_enabled(void)
71{
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -070072 return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
Thomas Garnier0483e1f2016-06-21 17:47:02 -070073}
74
75/* Initialize base and padding for each memory region randomized with KASLR */
76void __init kernel_randomize_memory(void)
77{
78 size_t i;
79 unsigned long vaddr = vaddr_start;
Thomas Garnier021182e2016-06-21 17:47:03 -070080 unsigned long rand, memory_tb;
Thomas Garnier0483e1f2016-06-21 17:47:02 -070081 struct rnd_state rand_state;
82 unsigned long remain_entropy;
83
Thomas Garnier25dfe472016-07-27 08:59:56 -070084 /*
Thomas Gleixner1dddd252018-01-04 12:32:03 +010085 * These BUILD_BUG_ON checks ensure the memory layout is consistent
86 * with the vaddr_start/vaddr_end variables. These checks are very
87 * limited....
Thomas Garnier25dfe472016-07-27 08:59:56 -070088 */
89 BUILD_BUG_ON(vaddr_start >= vaddr_end);
Thomas Gleixner1dddd252018-01-04 12:32:03 +010090 BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
Thomas Garnier25dfe472016-07-27 08:59:56 -070091 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
92
Thomas Garnier0483e1f2016-06-21 17:47:02 -070093 if (!kaslr_memory_enabled())
94 return;
95
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +030096 kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
97
Thomas Garnier90397a42016-06-21 17:47:06 -070098 /*
99 * Update Physical memory mapping to available and
100 * add padding if needed (especially for memory hotplug support).
101 */
Thomas Garnier021182e2016-06-21 17:47:03 -0700102 BUG_ON(kaslr_regions[0].base != &page_offset_base);
Thomas Garnierc7d23612016-08-09 10:11:04 -0700103 memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
Thomas Garnier90397a42016-06-21 17:47:06 -0700104 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
Thomas Garnier021182e2016-06-21 17:47:03 -0700105
106 /* Adapt phyiscal memory region size based on available memory */
107 if (memory_tb < kaslr_regions[0].size_tb)
108 kaslr_regions[0].size_tb = memory_tb;
109
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700110 /* Calculate entropy available between regions */
111 remain_entropy = vaddr_end - vaddr_start;
112 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
113 remain_entropy -= get_padding(&kaslr_regions[i]);
114
115 prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
116
117 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
118 unsigned long entropy;
119
120 /*
121 * Select a random virtual address using the extra entropy
122 * available.
123 */
124 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
125 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300126 if (IS_ENABLED(CONFIG_X86_5LEVEL))
127 entropy = (rand % (entropy + 1)) & P4D_MASK;
128 else
129 entropy = (rand % (entropy + 1)) & PUD_MASK;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700130 vaddr += entropy;
131 *kaslr_regions[i].base = vaddr;
132
133 /*
134 * Jump the region and add a minimum padding based on
135 * randomization alignment.
136 */
137 vaddr += get_padding(&kaslr_regions[i]);
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300138 if (IS_ENABLED(CONFIG_X86_5LEVEL))
139 vaddr = round_up(vaddr + 1, P4D_SIZE);
140 else
141 vaddr = round_up(vaddr + 1, PUD_SIZE);
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700142 remain_entropy -= entropy;
143 }
144}
145
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300146static void __meminit init_trampoline_pud(void)
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700147{
148 unsigned long paddr, paddr_next;
149 pgd_t *pgd;
150 pud_t *pud_page, *pud_page_tramp;
151 int i;
152
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700153 pud_page_tramp = alloc_low_page();
154
155 paddr = 0;
156 pgd = pgd_offset_k((unsigned long)__va(paddr));
157 pud_page = (pud_t *) pgd_page_vaddr(*pgd);
158
159 for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
160 pud_t *pud, *pud_tramp;
161 unsigned long vaddr = (unsigned long)__va(paddr);
162
163 pud_tramp = pud_page_tramp + pud_index(paddr);
164 pud = pud_page + pud_index(vaddr);
165 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
166
167 *pud_tramp = *pud;
168 }
169
170 set_pgd(&trampoline_pgd_entry,
171 __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
172}
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300173
174static void __meminit init_trampoline_p4d(void)
175{
176 unsigned long paddr, paddr_next;
177 pgd_t *pgd;
178 p4d_t *p4d_page, *p4d_page_tramp;
179 int i;
180
181 p4d_page_tramp = alloc_low_page();
182
183 paddr = 0;
184 pgd = pgd_offset_k((unsigned long)__va(paddr));
185 p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
186
187 for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
188 p4d_t *p4d, *p4d_tramp;
189 unsigned long vaddr = (unsigned long)__va(paddr);
190
191 p4d_tramp = p4d_page_tramp + p4d_index(paddr);
192 p4d = p4d_page + p4d_index(vaddr);
193 paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
194
195 *p4d_tramp = *p4d;
196 }
197
198 set_pgd(&trampoline_pgd_entry,
199 __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
200}
201
202/*
203 * Create PGD aligned trampoline table to allow real mode initialization
204 * of additional CPUs. Consume only 1 low memory page.
205 */
206void __meminit init_trampoline(void)
207{
208
209 if (!kaslr_memory_enabled()) {
210 init_trampoline_default();
211 return;
212 }
213
214 if (IS_ENABLED(CONFIG_X86_5LEVEL))
215 init_trampoline_p4d();
216 else
217 init_trampoline_pud();
218}