2 * Copyright (c) 2008 Travis Geiselbrecht
3 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <arch/arm/mmu_ldesc_macros.h>
31 #include <arch/arm/mmu_sdesc_macros.h>
47 #if !defined(WITH_MONITOR_BIN)
48 adr r3, __jumpback_addr
51 /* save boot params passed by the bootloader */
52 adr r3, __save_boot_regs
56 adr r3, __save_boot_cpsr
61 adr r3, __bootarg_addr
65 /* save load address/size of image */
66 adr r3, __load_phys_size
69 /* Derive the phys_base addr */
71 sub r1, r1, #(.Laddr - _start)
73 adr r3, __load_phys_base
76 /* save phys offset (for v -> p conversions) */
77 adr r3, __load_phys_offset
82 /* ensure phys size doesn't exceed virt size */
87 /* copies of phys size / phys base */
93 mrc p15, 0, r0, c1, c0, 0
94 /* XXX this is currently for arm926, revist with armv6 cores */
95 /* new thumb behavior, low exception vectors, i/d cache disable, mmu disabled */
96 bic r0, r0, #(1<<15 | 1<<13 | 1<<12)
97 bic r0, r0, #(1<<2 | 1<<0)
98 bic r0, r0, #(1<<29 | 1<<28)
99 /* disable alignment faults */
100 /* user mode app/libs may have unaligned references */
102 mcr p15, 0, r0, c1, c0, 0
105 #if ARM_USE_MMU_RELOC
107 * Create page table(s), on entry:
113 sub r0, r0, r6 // second level phys
115 mmu_desc_create_null_entry r0, r1
116 mmu_desc_create_ident_mapping r0, r1, r2, r3
121 mmu_desc_setup_carveout_ident r0, r1, r2, r3, r11
123 #if defined(WITH_MONITOR_BIN)
125 * With a separate monitor binary, phys base with this VMEMBASE isn't
126 * guaranteed to be L2 block aligned (i.e. carveout size is reduced by
127 * the size of the monitor), so needs to be mapped with L3 mappings.
129 * Because the carveout size is arbitrary, we can't preallocate the
130 * number of L3 page tables needed, so its taken dynamically from the
131 * carveout, reducing what's available for the kernel's heap.
133 mov r2, r4 // carveout in bytes
134 mov32 r0, ((1 << MMU_L3_MAP_SHIFT) - 1)
136 lsr r2, r2, #MMU_L3_MAP_SHIFT // roundup to L3 map size
137 lsl r2, r2, #MMU_L3_SIZE_SHIFT // bytes of L3 tables
139 /* update _early_heap_allocs for page tables */
140 adr r0, __early_heap_allocs
142 add r3, r5, r4 // pt ptr = phys_base + phys_size
143 sub r3, r3, r2 // pt ptr -= L3 pagetable size
146 mov r2, r4 // carveout in bytes
147 sub r8, r0, r6 // second level phys
151 mmu_desc_map_phys_l3 r0, r1, r2, r3, r8, r9, r10, r11
154 * Should be L2 block aligned as there was no preceding memory
155 * usage, like in the case of a separate monitor binary.
157 mmu_desc_phy_align r5, r4, r9
159 /* map VMEMBASE -> phys carveout */
160 mov r2, r4 // carveout in bytes
162 sub r3, r3, r6 // second level phys
163 mov32 r0, VMEMBASE // virt
165 mmu_desc_map_phys_l2 r0, r1, r2, r3, r9
167 /* map alias -> phys carveout */
168 mov r2, r4 // carveout in bytes
170 sub r3, r3, r6 // second level phys
173 mmu_desc_map_phys_l2 r0, r1, r2, r3, r9
176 /* load phys pointers to first/second level tables */
178 sub r8, r3, r6 // first level phys
180 sub r9, r3, r6 // second level phys
183 /* setup L1 entries */
187 mmu_desc_map_phys_l1 r0, r1, r2
190 mcr p15, 0, r2, c8, c7, 0
196 mmu_desc_init_mmu r0, r1, r2
204 #if WITH_CPU_EARLY_INIT
205 /* call platform/arch/etc specific init code */
209 /* see if we need to relocate */
210 ldr r0, __load_phys_base
212 sub r1, r1, r6 // compare is in phys addrs
216 /* we need to relocate ourselves to the proper spot */
225 /* we're relocated, jump to the right address */
226 ldr r0, =.Lstack_setup
232 /* set up the stack for irq, fiq, abort, undefined, system/user, and lastly supervisor mode */
236 ldr r2, =abort_stack_top
237 orr r1, r0, #0x12 // irq
239 ldr r13, =irq_save_spot /* save a pointer to a temporary dumping spot used during irq delivery */
241 orr r1, r0, #0x11 // fiq
245 orr r1, r0, #0x17 // abort
249 orr r1, r0, #0x1b // undefined
253 orr r1, r0, #0x1f // system
257 orr r1, r0, #0x13 // supervisor
261 /* copy the initialized data segment out of rom if necessary */
262 ldr r0, =__data_start_rom
263 ldr r1, =__data_start
276 /* clear out the bss */
288 .type __save_boot_regs, %object
289 .global __save_boot_regs
291 .long 0, 0, 0, 0, 0, 0, 0, 0, 0
292 .size __save_boot_regs, . - __save_boot_regs
294 .global __save_boot_cpsr
300 #if ARM_USE_MMU_RELOC
303 * Keep location of where the image was loaded, and how much
304 * contiguous memory has been made available.
306 .global __load_phys_base
309 .global __load_phys_size
312 .global __load_phys_offset
315 .global __early_heap_allocs
320 .global __jumpback_addr
323 .global __bootarg_addr
329 /* the abort stack is for unrecoverable errors.
330 * also note the initial working stack is set to here.
331 * when the threading system starts up it'll switch to a new
332 * dynamically allocated stack, so we don't need it for very long
336 .global abort_stack_top
342 /* define the heap end as read-only data containing the end defined in the
343 * linker script. other archs that use dynamic memory length discovery can make
344 * this read-write and update it during init.