Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 1 | /* |
| 2 | * machine_kexec.c - handle transition of Linux booting another kernel |
| 3 | * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> |
| 4 | * |
| 5 | * This source code is licensed under the GNU General Public License, |
| 6 | * Version 2. See the file COPYING for more details. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/kexec.h> |
| 11 | #include <linux/delay.h> |
Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 12 | #include <linux/init.h> |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 13 | #include <asm/pgtable.h> |
| 14 | #include <asm/pgalloc.h> |
| 15 | #include <asm/tlbflush.h> |
| 16 | #include <asm/mmu_context.h> |
| 17 | #include <asm/io.h> |
| 18 | #include <asm/apic.h> |
| 19 | #include <asm/cpufeature.h> |
Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 20 | #include <asm/desc.h> |
Zachary Amsden | 4bb0d3e | 2005-09-03 15:56:36 -0700 | [diff] [blame] | 21 | #include <asm/system.h> |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 22 | |
| 23 | #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) |
| 24 | |
| 25 | #define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
| 26 | #define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
| 27 | #define L2_ATTR (_PAGE_PRESENT) |
| 28 | |
| 29 | #define LEVEL0_SIZE (1UL << 12UL) |
| 30 | |
| 31 | #ifndef CONFIG_X86_PAE |
| 32 | #define LEVEL1_SIZE (1UL << 22UL) |
| 33 | static u32 pgtable_level1[1024] PAGE_ALIGNED; |
| 34 | |
| 35 | static void identity_map_page(unsigned long address) |
| 36 | { |
| 37 | unsigned long level1_index, level2_index; |
| 38 | u32 *pgtable_level2; |
| 39 | |
| 40 | /* Find the current page table */ |
| 41 | pgtable_level2 = __va(read_cr3()); |
| 42 | |
| 43 | /* Find the indexes of the physical address to identity map */ |
| 44 | level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; |
| 45 | level2_index = address / LEVEL1_SIZE; |
| 46 | |
| 47 | /* Identity map the page table entry */ |
| 48 | pgtable_level1[level1_index] = address | L0_ATTR; |
| 49 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; |
| 50 | |
| 51 | /* Flush the tlb so the new mapping takes effect. |
| 52 | * Global tlb entries are not flushed but that is not an issue. |
| 53 | */ |
| 54 | load_cr3(pgtable_level2); |
| 55 | } |
| 56 | |
| 57 | #else |
| 58 | #define LEVEL1_SIZE (1UL << 21UL) |
| 59 | #define LEVEL2_SIZE (1UL << 30UL) |
| 60 | static u64 pgtable_level1[512] PAGE_ALIGNED; |
| 61 | static u64 pgtable_level2[512] PAGE_ALIGNED; |
| 62 | |
| 63 | static void identity_map_page(unsigned long address) |
| 64 | { |
| 65 | unsigned long level1_index, level2_index, level3_index; |
| 66 | u64 *pgtable_level3; |
| 67 | |
| 68 | /* Find the current page table */ |
| 69 | pgtable_level3 = __va(read_cr3()); |
| 70 | |
| 71 | /* Find the indexes of the physical address to identity map */ |
| 72 | level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; |
| 73 | level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE; |
| 74 | level3_index = address / LEVEL2_SIZE; |
| 75 | |
| 76 | /* Identity map the page table entry */ |
| 77 | pgtable_level1[level1_index] = address | L0_ATTR; |
| 78 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 79 | set_64bit(&pgtable_level3[level3_index], |
| 80 | __pa(pgtable_level2) | L2_ATTR); |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 81 | |
| 82 | /* Flush the tlb so the new mapping takes effect. |
| 83 | * Global tlb entries are not flushed but that is not an issue. |
| 84 | */ |
| 85 | load_cr3(pgtable_level3); |
| 86 | } |
| 87 | #endif |
| 88 | |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 89 | static void set_idt(void *newidt, __u16 limit) |
| 90 | { |
Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 91 | struct Xgt_desc_struct curidt; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 92 | |
| 93 | /* ia32 supports unaliged loads & stores */ |
Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 94 | curidt.size = limit; |
| 95 | curidt.address = (unsigned long)newidt; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 96 | |
Zachary Amsden | f2ab446 | 2005-09-03 15:56:42 -0700 | [diff] [blame] | 97 | load_idt(&curidt); |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 98 | }; |
| 99 | |
| 100 | |
| 101 | static void set_gdt(void *newgdt, __u16 limit) |
| 102 | { |
Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 103 | struct Xgt_desc_struct curgdt; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 104 | |
| 105 | /* ia32 supports unaligned loads & stores */ |
Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 106 | curgdt.size = limit; |
| 107 | curgdt.address = (unsigned long)newgdt; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 108 | |
Zachary Amsden | f2ab446 | 2005-09-03 15:56:42 -0700 | [diff] [blame] | 109 | load_gdt(&curgdt); |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 110 | }; |
| 111 | |
| 112 | static void load_segments(void) |
| 113 | { |
| 114 | #define __STR(X) #X |
| 115 | #define STR(X) __STR(X) |
| 116 | |
| 117 | __asm__ __volatile__ ( |
| 118 | "\tljmp $"STR(__KERNEL_CS)",$1f\n" |
| 119 | "\t1:\n" |
Michael Matz | 2ec5e3a | 2006-03-07 21:55:48 -0800 | [diff] [blame] | 120 | "\tmovl $"STR(__KERNEL_DS)",%%eax\n" |
| 121 | "\tmovl %%eax,%%ds\n" |
| 122 | "\tmovl %%eax,%%es\n" |
| 123 | "\tmovl %%eax,%%fs\n" |
| 124 | "\tmovl %%eax,%%gs\n" |
| 125 | "\tmovl %%eax,%%ss\n" |
| 126 | ::: "eax", "memory"); |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 127 | #undef STR |
| 128 | #undef __STR |
| 129 | } |
| 130 | |
| 131 | typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 132 | unsigned long indirection_page, |
| 133 | unsigned long reboot_code_buffer, |
| 134 | unsigned long start_address, |
| 135 | unsigned int has_pae) ATTRIB_NORET; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 136 | |
Tobias Klauser | 2efe55a | 2006-06-26 18:57:34 +0200 | [diff] [blame] | 137 | extern const unsigned char relocate_new_kernel[]; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 138 | extern void relocate_new_kernel_end(void); |
Tobias Klauser | 2efe55a | 2006-06-26 18:57:34 +0200 | [diff] [blame] | 139 | extern const unsigned int relocate_new_kernel_size; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 140 | |
| 141 | /* |
| 142 | * A architecture hook called to validate the |
| 143 | * proposed image and prepare the control pages |
| 144 | * as needed. The pages for KEXEC_CONTROL_CODE_SIZE |
| 145 | * have been allocated, but the segments have yet |
| 146 | * been copied into the kernel. |
| 147 | * |
| 148 | * Do what every setup is needed on image and the |
| 149 | * reboot code buffer to allow us to avoid allocations |
| 150 | * later. |
| 151 | * |
| 152 | * Currently nothing. |
| 153 | */ |
| 154 | int machine_kexec_prepare(struct kimage *image) |
| 155 | { |
| 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Undo anything leftover by machine_kexec_prepare |
| 161 | * when an image is freed. |
| 162 | */ |
| 163 | void machine_kexec_cleanup(struct kimage *image) |
| 164 | { |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * Do not allocate memory (or fail in any way) in machine_kexec(). |
| 169 | * We are past the point of no return, committed to rebooting now. |
| 170 | */ |
| 171 | NORET_TYPE void machine_kexec(struct kimage *image) |
| 172 | { |
| 173 | unsigned long page_list; |
| 174 | unsigned long reboot_code_buffer; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 175 | |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 176 | relocate_new_kernel_t rnk; |
| 177 | |
| 178 | /* Interrupts aren't acceptable while we reboot */ |
| 179 | local_irq_disable(); |
| 180 | |
| 181 | /* Compute some offsets */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 182 | reboot_code_buffer = page_to_pfn(image->control_code_page) |
| 183 | << PAGE_SHIFT; |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 184 | page_list = image->head; |
| 185 | |
| 186 | /* Set up an identity mapping for the reboot_code_buffer */ |
| 187 | identity_map_page(reboot_code_buffer); |
| 188 | |
| 189 | /* copy it out */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 190 | memcpy((void *)reboot_code_buffer, relocate_new_kernel, |
| 191 | relocate_new_kernel_size); |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 192 | |
Eric W. Biederman | 2a8a3d5 | 2006-07-30 03:03:20 -0700 | [diff] [blame] | 193 | /* The segment registers are funny things, they have both a |
| 194 | * visible and an invisible part. Whenever the visible part is |
| 195 | * set to a specific selector, the invisible part is loaded |
| 196 | * with from a table in memory. At no other time is the |
| 197 | * descriptor table in memory accessed. |
Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 198 | * |
| 199 | * I take advantage of this here by force loading the |
| 200 | * segments, before I zap the gdt with an invalid value. |
| 201 | */ |
| 202 | load_segments(); |
| 203 | /* The gdt & idt are now invalid. |
| 204 | * If you want to load them you must set up your own idt & gdt. |
| 205 | */ |
| 206 | set_gdt(phys_to_virt(0),0); |
| 207 | set_idt(phys_to_virt(0),0); |
| 208 | |
| 209 | /* now call it */ |
| 210 | rnk = (relocate_new_kernel_t) reboot_code_buffer; |
| 211 | (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae); |
| 212 | } |
Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 213 | |
| 214 | /* crashkernel=size@addr specifies the location to reserve for |
| 215 | * a crash kernel. By reserving this memory we guarantee |
| 216 | * that linux never sets it up as a DMA target. |
| 217 | * Useful for holding code to do something appropriate |
| 218 | * after a kernel panic. |
| 219 | */ |
| 220 | static int __init parse_crashkernel(char *arg) |
| 221 | { |
| 222 | unsigned long size, base; |
| 223 | size = memparse(arg, &arg); |
| 224 | if (*arg == '@') { |
| 225 | base = memparse(arg+1, &arg); |
| 226 | /* FIXME: Do I want a sanity check |
| 227 | * to validate the memory range? |
| 228 | */ |
| 229 | crashk_res.start = base; |
| 230 | crashk_res.end = base + size - 1; |
| 231 | } |
| 232 | return 0; |
| 233 | } |
| 234 | early_param("crashkernel", parse_crashkernel); |