Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include <linux/spinlock.h> |
| 4 | #include <linux/percpu.h> |
Alexander Shishkin | d83212d | 2018-06-06 15:54:10 +0300 | [diff] [blame] | 5 | #include <linux/kallsyms.h> |
Adrian Hunter | 6855dc4 | 2018-06-06 15:54:11 +0300 | [diff] [blame] | 6 | #include <linux/kcore.h> |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 7 | |
| 8 | #include <asm/cpu_entry_area.h> |
| 9 | #include <asm/pgtable.h> |
| 10 | #include <asm/fixmap.h> |
| 11 | #include <asm/desc.h> |
| 12 | |
| 13 | static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); |
| 14 | |
| 15 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | 019b17b | 2019-04-14 17:59:47 +0200 | [diff] [blame] | 16 | static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); |
Thomas Gleixner | 7623f37 | 2019-04-14 17:59:49 +0200 | [diff] [blame] | 17 | DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 18 | #endif |
| 19 | |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 20 | struct cpu_entry_area *get_cpu_entry_area(int cpu) |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 21 | { |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 22 | unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; |
| 23 | BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); |
| 24 | |
| 25 | return (struct cpu_entry_area *) va; |
| 26 | } |
| 27 | EXPORT_SYMBOL(get_cpu_entry_area); |
| 28 | |
| 29 | void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) |
| 30 | { |
| 31 | unsigned long va = (unsigned long) cea_vaddr; |
Dave Hansen | 0f561fc | 2018-04-06 13:55:15 -0700 | [diff] [blame] | 32 | pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags); |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 33 | |
Dave Hansen | 0f561fc | 2018-04-06 13:55:15 -0700 | [diff] [blame] | 34 | /* |
| 35 | * The cpu_entry_area is shared between the user and kernel |
| 36 | * page tables. All of its ptes can safely be global. |
| 37 | * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for |
| 38 | * non-present PTEs, so be careful not to set it in that |
| 39 | * case to avoid confusion. |
| 40 | */ |
| 41 | if (boot_cpu_has(X86_FEATURE_PGE) && |
| 42 | (pgprot_val(flags) & _PAGE_PRESENT)) |
| 43 | pte = pte_set_flags(pte, _PAGE_GLOBAL); |
| 44 | |
| 45 | set_pte_vaddr(va, pte); |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | static void __init |
| 49 | cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) |
| 50 | { |
| 51 | for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) |
| 52 | cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 53 | } |
| 54 | |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 55 | static void __init percpu_setup_debug_store(unsigned int cpu) |
Thomas Gleixner | 10043e0 | 2017-12-04 15:07:49 +0100 | [diff] [blame] | 56 | { |
| 57 | #ifdef CONFIG_CPU_SUP_INTEL |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 58 | unsigned int npages; |
Thomas Gleixner | 10043e0 | 2017-12-04 15:07:49 +0100 | [diff] [blame] | 59 | void *cea; |
| 60 | |
| 61 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
| 62 | return; |
| 63 | |
| 64 | cea = &get_cpu_entry_area(cpu)->cpu_debug_store; |
| 65 | npages = sizeof(struct debug_store) / PAGE_SIZE; |
| 66 | BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0); |
| 67 | cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, |
| 68 | PAGE_KERNEL); |
| 69 | |
| 70 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers; |
| 71 | /* |
| 72 | * Force the population of PMDs for not yet allocated per cpu |
| 73 | * memory like debug store buffers. |
| 74 | */ |
| 75 | npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; |
| 76 | for (; npages; npages--, cea += PAGE_SIZE) |
| 77 | cea_set_pte(cea, 0, PAGE_NONE); |
| 78 | #endif |
| 79 | } |
| 80 | |
Thomas Gleixner | a4af767 | 2019-04-14 17:59:48 +0200 | [diff] [blame] | 81 | #ifdef CONFIG_X86_64 |
| 82 | |
| 83 | #define cea_map_stack(name) do { \ |
| 84 | npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ |
| 85 | cea_map_percpu_pages(cea->estacks.name## _stack, \ |
| 86 | estacks->name## _stack, npages, PAGE_KERNEL); \ |
| 87 | } while (0) |
| 88 | |
| 89 | static void __init percpu_setup_exception_stacks(unsigned int cpu) |
| 90 | { |
| 91 | struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu); |
| 92 | struct cpu_entry_area *cea = get_cpu_entry_area(cpu); |
| 93 | unsigned int npages; |
| 94 | |
| 95 | BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); |
Thomas Gleixner | 7623f37 | 2019-04-14 17:59:49 +0200 | [diff] [blame] | 96 | |
| 97 | per_cpu(cea_exception_stacks, cpu) = &cea->estacks; |
| 98 | |
Thomas Gleixner | a4af767 | 2019-04-14 17:59:48 +0200 | [diff] [blame] | 99 | /* |
| 100 | * The exceptions stack mappings in the per cpu area are protected |
Thomas Gleixner | 2a594d4 | 2019-04-14 17:59:57 +0200 | [diff] [blame] | 101 | * by guard pages so each stack must be mapped separately. DB2 is |
| 102 | * not mapped; it just exists to catch triple nesting of #DB. |
Thomas Gleixner | a4af767 | 2019-04-14 17:59:48 +0200 | [diff] [blame] | 103 | */ |
| 104 | cea_map_stack(DF); |
| 105 | cea_map_stack(NMI); |
Thomas Gleixner | 2a594d4 | 2019-04-14 17:59:57 +0200 | [diff] [blame] | 106 | cea_map_stack(DB1); |
Thomas Gleixner | a4af767 | 2019-04-14 17:59:48 +0200 | [diff] [blame] | 107 | cea_map_stack(DB); |
| 108 | cea_map_stack(MCE); |
| 109 | } |
| 110 | #else |
| 111 | static inline void percpu_setup_exception_stacks(unsigned int cpu) {} |
| 112 | #endif |
| 113 | |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 114 | /* Setup the fixmap mappings only once per-processor */ |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 115 | static void __init setup_cpu_entry_area(unsigned int cpu) |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 116 | { |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 117 | struct cpu_entry_area *cea = get_cpu_entry_area(cpu); |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 118 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 119 | /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ |
| 120 | pgprot_t gdt_prot = PAGE_KERNEL_RO; |
| 121 | pgprot_t tss_prot = PAGE_KERNEL_RO; |
| 122 | #else |
| 123 | /* |
| 124 | * On native 32-bit systems, the GDT cannot be read-only because |
| 125 | * our double fault handler uses a task gate, and entering through |
| 126 | * a task gate needs to change an available TSS to busy. If the |
| 127 | * GDT is read-only, that will triple fault. The TSS cannot be |
| 128 | * read-only because the CPU writes to it on task switches. |
| 129 | * |
| 130 | * On Xen PV, the GDT must be read-only because the hypervisor |
| 131 | * requires it. |
| 132 | */ |
| 133 | pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? |
| 134 | PAGE_KERNEL_RO : PAGE_KERNEL; |
| 135 | pgprot_t tss_prot = PAGE_KERNEL; |
| 136 | #endif |
| 137 | |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 138 | cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot); |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 139 | |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 140 | cea_map_percpu_pages(&cea->entry_stack_page, |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 141 | per_cpu_ptr(&entry_stack_storage, cpu), 1, |
| 142 | PAGE_KERNEL); |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 143 | |
| 144 | /* |
| 145 | * The Intel SDM says (Volume 3, 7.2.1): |
| 146 | * |
| 147 | * Avoid placing a page boundary in the part of the TSS that the |
| 148 | * processor reads during a task switch (the first 104 bytes). The |
| 149 | * processor may not correctly perform address translations if a |
| 150 | * boundary occurs in this area. During a task switch, the processor |
| 151 | * reads and writes into the first 104 bytes of each TSS (using |
| 152 | * contiguous physical addresses beginning with the physical address |
| 153 | * of the first byte of the TSS). So, after TSS access begins, if |
| 154 | * part of the 104 bytes is not physically contiguous, the processor |
| 155 | * will access incorrect information without generating a page-fault |
| 156 | * exception. |
| 157 | * |
| 158 | * There are also a lot of errata involving the TSS spanning a page |
| 159 | * boundary. Assert that we're not doing that. |
| 160 | */ |
| 161 | BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ |
| 162 | offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); |
| 163 | BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 164 | cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu), |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 165 | sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 166 | |
| 167 | #ifdef CONFIG_X86_32 |
Thomas Gleixner | 881a463 | 2019-04-14 17:59:46 +0200 | [diff] [blame] | 168 | per_cpu(cpu_entry_area, cpu) = cea; |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 169 | #endif |
| 170 | |
Thomas Gleixner | a4af767 | 2019-04-14 17:59:48 +0200 | [diff] [blame] | 171 | percpu_setup_exception_stacks(cpu); |
| 172 | |
Thomas Gleixner | 10043e0 | 2017-12-04 15:07:49 +0100 | [diff] [blame] | 173 | percpu_setup_debug_store(cpu); |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 174 | } |
| 175 | |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 176 | static __init void setup_cpu_entry_area_ptes(void) |
| 177 | { |
| 178 | #ifdef CONFIG_X86_32 |
| 179 | unsigned long start, end; |
| 180 | |
| 181 | BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); |
| 182 | BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); |
| 183 | |
| 184 | start = CPU_ENTRY_AREA_BASE; |
| 185 | end = start + CPU_ENTRY_AREA_MAP_SIZE; |
| 186 | |
Thomas Gleixner | f6c4fd5 | 2017-12-23 19:45:11 +0100 | [diff] [blame] | 187 | /* Careful here: start + PMD_SIZE might wrap around */ |
| 188 | for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 189 | populate_extra_pte(start); |
| 190 | #endif |
| 191 | } |
| 192 | |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 193 | void __init setup_cpu_entry_areas(void) |
| 194 | { |
| 195 | unsigned int cpu; |
| 196 | |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 197 | setup_cpu_entry_area_ptes(); |
| 198 | |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 199 | for_each_possible_cpu(cpu) |
| 200 | setup_cpu_entry_area(cpu); |
Thomas Gleixner | 945fd17 | 2018-02-28 21:14:26 +0100 | [diff] [blame] | 201 | |
| 202 | /* |
| 203 | * This is the last essential update to swapper_pgdir which needs |
| 204 | * to be synchronized to initial_page_table on 32bit. |
| 205 | */ |
| 206 | sync_initial_page_table(); |
Thomas Gleixner | ed1bbc4 | 2017-12-20 18:28:54 +0100 | [diff] [blame] | 207 | } |