blob: 0d8577f0542257c325031d45693e7458622166ad [file] [log] [blame]
Eric W. Biederman5234f5e2005-06-25 14:58:02 -07001/*
Dave Jones835c34a2007-10-12 21:10:53 -04002 * handle transition of Linux booting another kernel
Eric W. Biederman5234f5e2005-06-25 14:58:02 -07003 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/mm.h>
10#include <linux/kexec.h>
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070011#include <linux/string.h>
12#include <linux/reboot.h>
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070013#include <linux/numa.h>
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070014#include <asm/pgtable.h>
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070015#include <asm/tlbflush.h>
16#include <asm/mmu_context.h>
17#include <asm/io.h>
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070018
Magnus Damm4bfaaef2006-09-26 10:52:38 +020019#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
20static u64 kexec_pgd[512] PAGE_ALIGNED;
21static u64 kexec_pud0[512] PAGE_ALIGNED;
22static u64 kexec_pmd0[512] PAGE_ALIGNED;
23static u64 kexec_pte0[512] PAGE_ALIGNED;
24static u64 kexec_pud1[512] PAGE_ALIGNED;
25static u64 kexec_pmd1[512] PAGE_ALIGNED;
26static u64 kexec_pte1[512] PAGE_ALIGNED;
27
Eric W. Biederman8bf27552005-07-29 13:25:28 -060028static void init_level2_page(pmd_t *level2p, unsigned long addr)
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070029{
30 unsigned long end_addr;
Maneesh Soni72414d32005-06-25 14:58:28 -070031
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070032 addr &= PAGE_MASK;
Eric W. Biederman8bf27552005-07-29 13:25:28 -060033 end_addr = addr + PUD_SIZE;
Maneesh Soni72414d32005-06-25 14:58:28 -070034 while (addr < end_addr) {
Eric W. Biederman8bf27552005-07-29 13:25:28 -060035 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
36 addr += PMD_SIZE;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070037 }
38}
39
Eric W. Biederman8bf27552005-07-29 13:25:28 -060040static int init_level3_page(struct kimage *image, pud_t *level3p,
Maneesh Soni72414d32005-06-25 14:58:28 -070041 unsigned long addr, unsigned long last_addr)
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070042{
43 unsigned long end_addr;
44 int result;
Maneesh Soni72414d32005-06-25 14:58:28 -070045
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070046 result = 0;
47 addr &= PAGE_MASK;
Eric W. Biederman8bf27552005-07-29 13:25:28 -060048 end_addr = addr + PGDIR_SIZE;
Maneesh Soni72414d32005-06-25 14:58:28 -070049 while ((addr < last_addr) && (addr < end_addr)) {
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070050 struct page *page;
Eric W. Biederman8bf27552005-07-29 13:25:28 -060051 pmd_t *level2p;
Maneesh Soni72414d32005-06-25 14:58:28 -070052
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070053 page = kimage_alloc_control_pages(image, 0);
54 if (!page) {
55 result = -ENOMEM;
56 goto out;
57 }
Eric W. Biederman8bf27552005-07-29 13:25:28 -060058 level2p = (pmd_t *)page_address(page);
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070059 init_level2_page(level2p, addr);
Eric W. Biederman8bf27552005-07-29 13:25:28 -060060 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
61 addr += PUD_SIZE;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070062 }
63 /* clear the unused entries */
Maneesh Soni72414d32005-06-25 14:58:28 -070064 while (addr < end_addr) {
Eric W. Biederman8bf27552005-07-29 13:25:28 -060065 pud_clear(level3p++);
66 addr += PUD_SIZE;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070067 }
68out:
69 return result;
70}
71
72
Eric W. Biederman8bf27552005-07-29 13:25:28 -060073static int init_level4_page(struct kimage *image, pgd_t *level4p,
Maneesh Soni72414d32005-06-25 14:58:28 -070074 unsigned long addr, unsigned long last_addr)
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070075{
76 unsigned long end_addr;
77 int result;
Maneesh Soni72414d32005-06-25 14:58:28 -070078
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070079 result = 0;
80 addr &= PAGE_MASK;
Eric W. Biederman8bf27552005-07-29 13:25:28 -060081 end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
Maneesh Soni72414d32005-06-25 14:58:28 -070082 while ((addr < last_addr) && (addr < end_addr)) {
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070083 struct page *page;
Eric W. Biederman8bf27552005-07-29 13:25:28 -060084 pud_t *level3p;
Maneesh Soni72414d32005-06-25 14:58:28 -070085
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070086 page = kimage_alloc_control_pages(image, 0);
87 if (!page) {
88 result = -ENOMEM;
89 goto out;
90 }
Eric W. Biederman8bf27552005-07-29 13:25:28 -060091 level3p = (pud_t *)page_address(page);
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070092 result = init_level3_page(image, level3p, addr, last_addr);
93 if (result) {
94 goto out;
95 }
Eric W. Biederman8bf27552005-07-29 13:25:28 -060096 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
97 addr += PGDIR_SIZE;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -070098 }
99 /* clear the unused entries */
Maneesh Soni72414d32005-06-25 14:58:28 -0700100 while (addr < end_addr) {
Eric W. Biederman8bf27552005-07-29 13:25:28 -0600101 pgd_clear(level4p++);
102 addr += PGDIR_SIZE;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700103 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700104out:
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700105 return result;
106}
107
108
109static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
110{
Eric W. Biederman8bf27552005-07-29 13:25:28 -0600111 pgd_t *level4p;
112 level4p = (pgd_t *)__va(start_pgtable);
Maneesh Soni72414d32005-06-25 14:58:28 -0700113 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700114}
115
116static void set_idt(void *newidt, u16 limit)
117{
Eric W. Biederman36c4fd22005-07-29 13:02:09 -0600118 struct desc_ptr curidt;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700119
120 /* x86-64 supports unaliged loads & stores */
Eric W. Biederman36c4fd22005-07-29 13:02:09 -0600121 curidt.size = limit;
122 curidt.address = (unsigned long)newidt;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700123
124 __asm__ __volatile__ (
Eric W. Biederman36c4fd22005-07-29 13:02:09 -0600125 "lidtq %0\n"
126 : : "m" (curidt)
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700127 );
128};
129
130
131static void set_gdt(void *newgdt, u16 limit)
132{
Eric W. Biederman36c4fd22005-07-29 13:02:09 -0600133 struct desc_ptr curgdt;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700134
135 /* x86-64 supports unaligned loads & stores */
Eric W. Biederman36c4fd22005-07-29 13:02:09 -0600136 curgdt.size = limit;
137 curgdt.address = (unsigned long)newgdt;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700138
139 __asm__ __volatile__ (
Eric W. Biederman36c4fd22005-07-29 13:02:09 -0600140 "lgdtq %0\n"
141 : : "m" (curgdt)
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700142 );
143};
144
145static void load_segments(void)
146{
147 __asm__ __volatile__ (
Eric W. Biederman36c4fd22005-07-29 13:02:09 -0600148 "\tmovl %0,%%ds\n"
149 "\tmovl %0,%%es\n"
150 "\tmovl %0,%%ss\n"
151 "\tmovl %0,%%fs\n"
152 "\tmovl %0,%%gs\n"
Michael Matz2ec5e3a2006-03-07 21:55:48 -0800153 : : "a" (__KERNEL_DS) : "memory"
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700154 );
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700155}
156
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700157int machine_kexec_prepare(struct kimage *image)
158{
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200159 unsigned long start_pgtable;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700160 int result;
161
162 /* Calculate the offsets */
Maneesh Soni72414d32005-06-25 14:58:28 -0700163 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700164
165 /* Setup the identity mapped 64bit page table */
166 result = init_pgtable(image, start_pgtable);
Maneesh Soni72414d32005-06-25 14:58:28 -0700167 if (result)
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700168 return result;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700169
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700170 return 0;
171}
172
173void machine_kexec_cleanup(struct kimage *image)
174{
175 return;
176}
177
178/*
179 * Do not allocate memory (or fail in any way) in machine_kexec().
180 * We are past the point of no return, committed to rebooting now.
181 */
182NORET_TYPE void machine_kexec(struct kimage *image)
183{
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200184 unsigned long page_list[PAGES_NR];
185 void *control_page;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700186
187 /* Interrupts aren't acceptable while we reboot */
188 local_irq_disable();
189
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200190 control_page = page_address(image->control_code_page) + PAGE_SIZE;
191 memcpy(control_page, relocate_kernel, PAGE_SIZE);
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700192
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700193 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200194 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700195 page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200196 page_list[VA_PGD] = (unsigned long)kexec_pgd;
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700197 page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200198 page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700199 page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200200 page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700201 page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200202 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700203 page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200204 page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700205 page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200206 page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700207 page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200208 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700209
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200210 page_list[PA_TABLE_PAGE] =
211 (unsigned long)__pa(page_address(image->control_code_page));
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700212
Eric W. Biederman2a8a3d52006-07-30 03:03:20 -0700213 /* The segment registers are funny things, they have both a
214 * visible and an invisible part. Whenever the visible part is
215 * set to a specific selector, the invisible part is loaded
216 * with from a table in memory. At no other time is the
217 * descriptor table in memory accessed.
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700218 *
219 * I take advantage of this here by force loading the
220 * segments, before I zap the gdt with an invalid value.
221 */
222 load_segments();
223 /* The gdt & idt are now invalid.
224 * If you want to load them you must set up your own idt & gdt.
225 */
226 set_gdt(phys_to_virt(0),0);
227 set_idt(phys_to_virt(0),0);
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200228
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700229 /* now call it */
Magnus Damm4bfaaef2006-09-26 10:52:38 +0200230 relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
231 image->start);
Eric W. Biederman5234f5e2005-06-25 14:58:02 -0700232}
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200233
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -0700234void arch_crash_save_vmcoreinfo(void)
235{
236#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -0700237 VMCOREINFO_SYMBOL(node_data);
238 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -0700239#endif
240}
241