Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Russell King | bce495d | 2005-04-26 15:21:02 +0100 | [diff] [blame] | 2 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | #include <linux/linkage.h> |
| 4 | |
| 5 | #include <asm/assembler.h> |
Sam Ravnborg | e6ae744 | 2005-09-09 21:08:59 +0200 | [diff] [blame] | 6 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/errno.h> |
Russell King | bce495d | 2005-04-26 15:21:02 +0100 | [diff] [blame] | 8 | #include <asm/thread_info.h> |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame] | 9 | #include <asm/v7m.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
| 11 | @ Bad Abort numbers |
| 12 | @ ----------------- |
| 13 | @ |
| 14 | #define BAD_PREFETCH 0 |
| 15 | #define BAD_DATA 1 |
| 16 | #define BAD_ADDREXCPTN 2 |
| 17 | #define BAD_IRQ 3 |
| 18 | #define BAD_UNDEFINSTR 4 |
| 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | @ |
Russell King | 925c8a1 | 2005-04-26 15:18:59 +0100 | [diff] [blame] | 21 | @ Most of the stack format comes from struct pt_regs, but with |
| 22 | @ the addition of 8 bytes for storing syscall args 5 and 6. |
Nicolas Pitre | 2dede2d | 2006-01-14 16:18:08 +0000 | [diff] [blame] | 23 | @ This _must_ remain a multiple of 8 for EABI. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | @ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #define S_OFF 8 |
| 26 | |
Russell King | 925c8a1 | 2005-04-26 15:18:59 +0100 | [diff] [blame] | 27 | /* |
| 28 | * The SWI code relies on the fact that R0 is at the bottom of the stack |
| 29 | * (due to slow/fast restore user regs). |
| 30 | */ |
| 31 | #if S_R0 != 0 |
| 32 | #error "Please fix" |
| 33 | #endif |
| 34 | |
Russell King | bce495d | 2005-04-26 15:21:02 +0100 | [diff] [blame] | 35 | .macro zero_fp |
| 36 | #ifdef CONFIG_FRAME_POINTER |
| 37 | mov fp, #0 |
| 38 | #endif |
| 39 | .endm |
| 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #ifdef CONFIG_ALIGNMENT_TRAP |
Russell King | 195b58a | 2014-08-28 13:08:14 +0100 | [diff] [blame] | 42 | #define ATRAP(x...) x |
| 43 | #else |
| 44 | #define ATRAP(x...) |
| 45 | #endif |
| 46 | |
| 47 | .macro alignment_trap, rtmp1, rtmp2, label |
| 48 | #ifdef CONFIG_ALIGNMENT_TRAP |
| 49 | mrc p15, 0, \rtmp2, c1, c0, 0 |
| 50 | ldr \rtmp1, \label |
| 51 | ldr \rtmp1, [\rtmp1] |
| 52 | teq \rtmp1, \rtmp2 |
| 53 | mcrne p15, 0, \rtmp1, c1, c0, 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #endif |
| 55 | .endm |
| 56 | |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame] | 57 | #ifdef CONFIG_CPU_V7M |
| 58 | /* |
| 59 | * ARMv7-M exception entry/exit macros. |
| 60 | * |
| 61 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are |
| 62 | * automatically saved on the current stack (32 words) before |
| 63 | * switching to the exception stack (SP_main). |
| 64 | * |
| 65 | * If exception is taken while in user mode, SP_main is |
| 66 | * empty. Otherwise, SP_main is aligned to 64 bit automatically |
| 67 | * (CCR.STKALIGN set). |
| 68 | * |
| 69 | * Linux assumes that the interrupts are disabled when entering an |
| 70 | * exception handler and it may BUG if this is not the case. Interrupts |
| 71 | * are disabled during entry and reenabled in the exit macro. |
| 72 | * |
| 73 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. |
| 74 | * When returning to kernel mode, we don't return from exception. |
| 75 | */ |
| 76 | .macro v7m_exception_entry |
| 77 | @ determine the location of the registers saved by the core during |
| 78 | @ exception entry. Depending on the mode the cpu was in when the |
| 79 | @ exception happend that is either on the main or the process stack. |
| 80 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack |
| 81 | @ was used. |
| 82 | tst lr, #EXC_RET_STACK_MASK |
| 83 | mrsne r12, psp |
| 84 | moveq r12, sp |
| 85 | |
| 86 | @ we cannot rely on r0-r3 and r12 matching the value saved in the |
| 87 | @ exception frame because of tail-chaining. So these have to be |
| 88 | @ reloaded. |
| 89 | ldmia r12!, {r0-r3} |
| 90 | |
| 91 | @ Linux expects to have irqs off. Do it here before taking stack space |
| 92 | cpsid i |
| 93 | |
Russell King | 5745eef | 2016-05-10 16:34:27 +0100 | [diff] [blame] | 94 | sub sp, #PT_REGS_SIZE-S_IP |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame] | 95 | stmdb sp!, {r0-r11} |
| 96 | |
| 97 | @ load saved r12, lr, return address and xPSR. |
| 98 | @ r0-r7 are used for signals and never touched from now on. Clobbering |
| 99 | @ r8-r12 is OK. |
| 100 | mov r9, r12 |
| 101 | ldmia r9!, {r8, r10-r12} |
| 102 | |
| 103 | @ calculate the original stack pointer value. |
| 104 | @ r9 currently points to the memory location just above the auto saved |
| 105 | @ xPSR. |
| 106 | @ The cpu might automatically 8-byte align the stack. Bit 9 |
| 107 | @ of the saved xPSR specifies if stack aligning took place. In this case |
| 108 | @ another 32-bit value is included in the stack. |
| 109 | |
| 110 | tst r12, V7M_xPSR_FRAMEPTRALIGN |
| 111 | addne r9, r9, #4 |
| 112 | |
| 113 | @ store saved r12 using str to have a register to hold the base for stm |
| 114 | str r8, [sp, #S_IP] |
| 115 | add r8, sp, #S_SP |
| 116 | @ store r13-r15, xPSR |
| 117 | stmia r8!, {r9-r12} |
| 118 | @ store old_r0 |
| 119 | str r0, [r8] |
| 120 | .endm |
| 121 | |
| 122 | /* |
| 123 | * PENDSV and SVCALL are configured to have the same exception |
| 124 | * priorities. As a kernel thread runs at SVCALL execution priority it |
| 125 | * can never be preempted and so we will never have to return to a |
| 126 | * kernel thread here. |
| 127 | */ |
| 128 | .macro v7m_exception_slow_exit ret_r0 |
| 129 | cpsid i |
Vladimir Murzin | 72cd406 | 2019-01-25 15:18:37 +0100 | [diff] [blame] | 130 | ldr lr, =exc_ret |
| 131 | ldr lr, [lr] |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame] | 132 | |
| 133 | @ read original r12, sp, lr, pc and xPSR |
| 134 | add r12, sp, #S_IP |
| 135 | ldmia r12, {r1-r5} |
| 136 | |
| 137 | @ an exception frame is always 8-byte aligned. To tell the hardware if |
| 138 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR |
| 139 | @ accordingly. |
| 140 | tst r2, #4 |
| 141 | subne r2, r2, #4 |
| 142 | orrne r5, V7M_xPSR_FRAMEPTRALIGN |
| 143 | biceq r5, V7M_xPSR_FRAMEPTRALIGN |
| 144 | |
Rabin Vincent | 483a6c9 | 2014-05-24 17:38:01 +0100 | [diff] [blame] | 145 | @ ensure bit 0 is cleared in the PC, otherwise behaviour is |
| 146 | @ unpredictable |
| 147 | bic r4, #1 |
| 148 | |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame] | 149 | @ write basic exception frame |
| 150 | stmdb r2!, {r1, r3-r5} |
| 151 | ldmia sp, {r1, r3-r5} |
| 152 | .if \ret_r0 |
| 153 | stmdb r2!, {r0, r3-r5} |
| 154 | .else |
| 155 | stmdb r2!, {r1, r3-r5} |
| 156 | .endif |
| 157 | |
| 158 | @ restore process sp |
| 159 | msr psp, r2 |
| 160 | |
| 161 | @ restore original r4-r11 |
| 162 | ldmia sp!, {r0-r11} |
| 163 | |
| 164 | @ restore main sp |
Russell King | 5745eef | 2016-05-10 16:34:27 +0100 | [diff] [blame] | 165 | add sp, sp, #PT_REGS_SIZE-S_IP |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame] | 166 | |
| 167 | cpsie i |
| 168 | bx lr |
| 169 | .endm |
| 170 | #endif /* CONFIG_CPU_V7M */ |
| 171 | |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 172 | @ |
| 173 | @ Store/load the USER SP and LR registers by switching to the SYS |
| 174 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not |
| 175 | @ available. Should only be called from SVC mode |
| 176 | @ |
| 177 | .macro store_user_sp_lr, rd, rtemp, offset = 0 |
| 178 | mrs \rtemp, cpsr |
| 179 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 180 | msr cpsr_c, \rtemp @ switch to the SYS mode |
| 181 | |
| 182 | str sp, [\rd, #\offset] @ save sp_usr |
| 183 | str lr, [\rd, #\offset + 4] @ save lr_usr |
| 184 | |
| 185 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 186 | msr cpsr_c, \rtemp @ switch back to the SVC mode |
| 187 | .endm |
| 188 | |
| 189 | .macro load_user_sp_lr, rd, rtemp, offset = 0 |
| 190 | mrs \rtemp, cpsr |
| 191 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 192 | msr cpsr_c, \rtemp @ switch to the SYS mode |
| 193 | |
| 194 | ldr sp, [\rd, #\offset] @ load sp_usr |
| 195 | ldr lr, [\rd, #\offset + 4] @ load lr_usr |
| 196 | |
| 197 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 198 | msr cpsr_c, \rtemp @ switch back to the SVC mode |
| 199 | .endm |
| 200 | |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 201 | |
Russell King | 9b56feb | 2013-03-28 12:57:40 +0000 | [diff] [blame] | 202 | .macro svc_exit, rpsr, irq = 0 |
| 203 | .if \irq != 0 |
Russell King | f8f02ec | 2013-03-28 14:36:05 +0000 | [diff] [blame] | 204 | @ IRQs already off |
Russell King | 9b56feb | 2013-03-28 12:57:40 +0000 | [diff] [blame] | 205 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 206 | @ The parent context IRQs must have been enabled to get here in |
| 207 | @ the first place, so there's no point checking the PSR I bit. |
| 208 | bl trace_hardirqs_on |
| 209 | #endif |
| 210 | .else |
Russell King | f8f02ec | 2013-03-28 14:36:05 +0000 | [diff] [blame] | 211 | @ IRQs off again before pulling preserved data off the stack |
| 212 | disable_irq_notrace |
Russell King | 9b56feb | 2013-03-28 12:57:40 +0000 | [diff] [blame] | 213 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 214 | tst \rpsr, #PSR_I_BIT |
| 215 | bleq trace_hardirqs_on |
| 216 | tst \rpsr, #PSR_I_BIT |
| 217 | blne trace_hardirqs_off |
| 218 | #endif |
| 219 | .endif |
Russell King | e6978e4 | 2016-05-13 11:40:20 +0100 | [diff] [blame] | 220 | ldr r1, [sp, #SVC_ADDR_LIMIT] |
Russell King | 2190fed | 2015-08-20 10:32:02 +0100 | [diff] [blame] | 221 | uaccess_restore |
Russell King | e6978e4 | 2016-05-13 11:40:20 +0100 | [diff] [blame] | 222 | str r1, [tsk, #TI_ADDR_LIMIT] |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 223 | |
| 224 | #ifndef CONFIG_THUMB2_KERNEL |
| 225 | @ ARM mode SVC restore |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 226 | msr spsr_cxsf, \rpsr |
Mark Rutland | 2c32c65 | 2014-08-15 12:11:50 +0100 | [diff] [blame] | 227 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) |
| 228 | @ We must avoid clrex due to Cortex-A15 erratum #830321 |
| 229 | sub r0, sp, #4 @ uninhabited address |
| 230 | strex r1, r2, [r0] @ clear the exclusive monitor |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 231 | #endif |
Mark Rutland | 2c32c65 | 2014-08-15 12:11:50 +0100 | [diff] [blame] | 232 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 233 | #else |
| 234 | @ Thumb mode SVC restore |
| 235 | ldr lr, [sp, #S_SP] @ top of the stack |
| 236 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc |
| 237 | |
| 238 | @ We must avoid clrex due to Cortex-A15 erratum #830321 |
| 239 | strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor |
| 240 | |
| 241 | stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context |
| 242 | ldmia sp, {r0 - r12} |
| 243 | mov sp, lr |
| 244 | ldr lr, [sp], #4 |
| 245 | rfeia sp! |
| 246 | #endif |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 247 | .endm |
| 248 | |
Daniel Thompson | c0e7f7e | 2014-09-17 17:12:06 +0100 | [diff] [blame] | 249 | @ |
| 250 | @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit |
| 251 | @ |
| 252 | @ This macro acts in a similar manner to svc_exit but switches to FIQ |
| 253 | @ mode to restore the final part of the register state. |
| 254 | @ |
| 255 | @ We cannot use the normal svc_exit procedure because that would |
| 256 | @ clobber spsr_svc (FIQ could be delivered during the first few |
| 257 | @ instructions of vector_swi meaning its contents have not been |
| 258 | @ saved anywhere). |
| 259 | @ |
| 260 | @ Note that, unlike svc_exit, this macro also does not allow a caller |
| 261 | @ supplied rpsr. This is because the FIQ exceptions are not re-entrant |
| 262 | @ and the handlers cannot call into the scheduler (meaning the value |
| 263 | @ on the stack remains correct). |
| 264 | @ |
| 265 | .macro svc_exit_via_fiq |
Russell King | e6978e4 | 2016-05-13 11:40:20 +0100 | [diff] [blame] | 266 | ldr r1, [sp, #SVC_ADDR_LIMIT] |
Russell King | 2190fed | 2015-08-20 10:32:02 +0100 | [diff] [blame] | 267 | uaccess_restore |
Russell King | e6978e4 | 2016-05-13 11:40:20 +0100 | [diff] [blame] | 268 | str r1, [tsk, #TI_ADDR_LIMIT] |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 269 | #ifndef CONFIG_THUMB2_KERNEL |
| 270 | @ ARM mode restore |
Daniel Thompson | c0e7f7e | 2014-09-17 17:12:06 +0100 | [diff] [blame] | 271 | mov r0, sp |
| 272 | ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will |
| 273 | @ clobber state restored below) |
| 274 | msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT |
| 275 | add r8, r0, #S_PC |
| 276 | ldr r9, [r0, #S_PSR] |
| 277 | msr spsr_cxsf, r9 |
| 278 | ldr r0, [r0, #S_R0] |
| 279 | ldmia r8, {pc}^ |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 280 | #else |
| 281 | @ Thumb mode restore |
| 282 | add r0, sp, #S_R2 |
| 283 | ldr lr, [sp, #S_LR] |
| 284 | ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will |
| 285 | @ clobber state restored below) |
| 286 | ldmia r0, {r2 - r12} |
| 287 | mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT |
| 288 | msr cpsr_c, r1 |
| 289 | sub r0, #S_R2 |
| 290 | add r8, r0, #S_PC |
| 291 | ldmia r0, {r0 - r1} |
| 292 | rfeia r8 |
| 293 | #endif |
Daniel Thompson | c0e7f7e | 2014-09-17 17:12:06 +0100 | [diff] [blame] | 294 | .endm |
| 295 | |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 296 | |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 297 | .macro restore_user_regs, fast = 0, offset = 0 |
Russell King | 2190fed | 2015-08-20 10:32:02 +0100 | [diff] [blame] | 298 | uaccess_enable r1, isb=0 |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 299 | #ifndef CONFIG_THUMB2_KERNEL |
| 300 | @ ARM mode restore |
Daniel Thompson | a18f364 | 2015-01-09 18:30:13 +0100 | [diff] [blame] | 301 | mov r2, sp |
| 302 | ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr |
| 303 | ldr lr, [r2, #\offset + S_PC]! @ get pc |
Russell King | 3aaf33b | 2017-11-27 11:22:42 +0000 | [diff] [blame] | 304 | tst r1, #PSR_I_BIT | 0x0f |
Russell King | 8bafae2 | 2017-11-24 23:49:34 +0000 | [diff] [blame] | 305 | bne 1f |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 306 | msr spsr_cxsf, r1 @ save in spsr_svc |
Mark Rutland | 2c32c65 | 2014-08-15 12:11:50 +0100 | [diff] [blame] | 307 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) |
| 308 | @ We must avoid clrex due to Cortex-A15 erratum #830321 |
Daniel Thompson | a18f364 | 2015-01-09 18:30:13 +0100 | [diff] [blame] | 309 | strex r1, r2, [r2] @ clear the exclusive monitor |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 310 | #endif |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 311 | .if \fast |
Daniel Thompson | a18f364 | 2015-01-09 18:30:13 +0100 | [diff] [blame] | 312 | ldmdb r2, {r1 - lr}^ @ get calling r1 - lr |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 313 | .else |
Daniel Thompson | a18f364 | 2015-01-09 18:30:13 +0100 | [diff] [blame] | 314 | ldmdb r2, {r0 - lr}^ @ get calling r0 - lr |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 315 | .endif |
Anders Grafström | 8e4971f | 2010-03-15 16:04:14 +0100 | [diff] [blame] | 316 | mov r0, r0 @ ARMv5T and earlier require a nop |
| 317 | @ after ldm {}^ |
Russell King | 5745eef | 2016-05-10 16:34:27 +0100 | [diff] [blame] | 318 | add sp, sp, #\offset + PT_REGS_SIZE |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 319 | movs pc, lr @ return & move spsr_svc into cpsr |
Russell King | 8bafae2 | 2017-11-24 23:49:34 +0000 | [diff] [blame] | 320 | 1: bug "Returning to usermode but unexpected PSR bits set?", \@ |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 321 | #elif defined(CONFIG_CPU_V7M) |
| 322 | @ V7M restore. |
| 323 | @ Note that we don't need to do clrex here as clearing the local |
| 324 | @ monitor is part of the exception entry and exit sequence. |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame] | 325 | .if \offset |
| 326 | add sp, #\offset |
| 327 | .endif |
| 328 | v7m_exception_slow_exit ret_r0 = \fast |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 329 | #else |
| 330 | @ Thumb mode restore |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 331 | mov r2, sp |
| 332 | load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr |
| 333 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr |
| 334 | ldr lr, [sp, #\offset + S_PC] @ get pc |
| 335 | add sp, sp, #\offset + S_SP |
Russell King | 3aaf33b | 2017-11-27 11:22:42 +0000 | [diff] [blame] | 336 | tst r1, #PSR_I_BIT | 0x0f |
Russell King | 8bafae2 | 2017-11-24 23:49:34 +0000 | [diff] [blame] | 337 | bne 1f |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 338 | msr spsr_cxsf, r1 @ save in spsr_svc |
Mark Rutland | 2c32c65 | 2014-08-15 12:11:50 +0100 | [diff] [blame] | 339 | |
| 340 | @ We must avoid clrex due to Cortex-A15 erratum #830321 |
| 341 | strex r1, r2, [sp] @ clear the exclusive monitor |
| 342 | |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 343 | .if \fast |
| 344 | ldmdb sp, {r1 - r12} @ get calling r1 - r12 |
| 345 | .else |
| 346 | ldmdb sp, {r0 - r12} @ get calling r0 - r12 |
| 347 | .endif |
Russell King | 5745eef | 2016-05-10 16:34:27 +0100 | [diff] [blame] | 348 | add sp, sp, #PT_REGS_SIZE - S_SP |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 349 | movs pc, lr @ return & move spsr_svc into cpsr |
Russell King | 8bafae2 | 2017-11-24 23:49:34 +0000 | [diff] [blame] | 350 | 1: bug "Returning to usermode but unexpected PSR bits set?", \@ |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 351 | #endif /* !CONFIG_THUMB2_KERNEL */ |
Russell King | aa06e5c | 2015-08-26 20:07:25 +0100 | [diff] [blame] | 352 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | |
| 354 | /* |
Kevin Hilman | b008848 | 2013-03-28 22:54:40 +0100 | [diff] [blame] | 355 | * Context tracking subsystem. Used to instrument transitions |
| 356 | * between user and kernel mode. |
| 357 | */ |
| 358 | .macro ct_user_exit, save = 1 |
| 359 | #ifdef CONFIG_CONTEXT_TRACKING |
| 360 | .if \save |
| 361 | stmdb sp!, {r0-r3, ip, lr} |
Frederic Weisbecker | 0c06a5d | 2013-09-10 00:54:17 +0200 | [diff] [blame] | 362 | bl context_tracking_user_exit |
Kevin Hilman | b008848 | 2013-03-28 22:54:40 +0100 | [diff] [blame] | 363 | ldmia sp!, {r0-r3, ip, lr} |
| 364 | .else |
Frederic Weisbecker | 0c06a5d | 2013-09-10 00:54:17 +0200 | [diff] [blame] | 365 | bl context_tracking_user_exit |
Kevin Hilman | b008848 | 2013-03-28 22:54:40 +0100 | [diff] [blame] | 366 | .endif |
| 367 | #endif |
| 368 | .endm |
| 369 | |
| 370 | .macro ct_user_enter, save = 1 |
| 371 | #ifdef CONFIG_CONTEXT_TRACKING |
| 372 | .if \save |
| 373 | stmdb sp!, {r0-r3, ip, lr} |
Frederic Weisbecker | 0c06a5d | 2013-09-10 00:54:17 +0200 | [diff] [blame] | 374 | bl context_tracking_user_enter |
Kevin Hilman | b008848 | 2013-03-28 22:54:40 +0100 | [diff] [blame] | 375 | ldmia sp!, {r0-r3, ip, lr} |
| 376 | .else |
Frederic Weisbecker | 0c06a5d | 2013-09-10 00:54:17 +0200 | [diff] [blame] | 377 | bl context_tracking_user_enter |
Kevin Hilman | b008848 | 2013-03-28 22:54:40 +0100 | [diff] [blame] | 378 | .endif |
| 379 | #endif |
| 380 | .endm |
| 381 | |
Russell King | 10573ae | 2018-05-11 11:16:22 +0100 | [diff] [blame] | 382 | .macro invoke_syscall, table, nr, tmp, ret, reload=0 |
| 383 | #ifdef CONFIG_CPU_SPECTRE |
| 384 | mov \tmp, \nr |
| 385 | cmp \tmp, #NR_syscalls @ check upper syscall limit |
| 386 | movcs \tmp, #0 |
| 387 | csdb |
| 388 | badr lr, \ret @ return address |
| 389 | .if \reload |
| 390 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
Stefan Agner | e44fc38 | 2019-02-18 00:57:38 +0100 | [diff] [blame] | 391 | ldmiacc r1, {r0 - r6} @ reload r0-r6 |
| 392 | stmiacc sp, {r4, r5} @ update stack arguments |
Russell King | 10573ae | 2018-05-11 11:16:22 +0100 | [diff] [blame] | 393 | .endif |
| 394 | ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine |
| 395 | #else |
| 396 | cmp \nr, #NR_syscalls @ check upper syscall limit |
| 397 | badr lr, \ret @ return address |
| 398 | .if \reload |
| 399 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
Stefan Agner | e44fc38 | 2019-02-18 00:57:38 +0100 | [diff] [blame] | 400 | ldmiacc r1, {r0 - r6} @ reload r0-r6 |
| 401 | stmiacc sp, {r4, r5} @ update stack arguments |
Russell King | 10573ae | 2018-05-11 11:16:22 +0100 | [diff] [blame] | 402 | .endif |
| 403 | ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine |
| 404 | #endif |
| 405 | .endm |
| 406 | |
Kevin Hilman | b008848 | 2013-03-28 22:54:40 +0100 | [diff] [blame] | 407 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | * These are the registers used in the syscall handler, and allow us to |
| 409 | * have in theory up to 7 arguments to a function - r0 to r6. |
| 410 | * |
| 411 | * r7 is reserved for the system call number for thumb mode. |
| 412 | * |
| 413 | * Note that tbl == why is intentional. |
| 414 | * |
| 415 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. |
| 416 | */ |
| 417 | scno .req r7 @ syscall number |
| 418 | tbl .req r8 @ syscall table pointer |
| 419 | why .req r8 @ Linux syscall (!= 0) |
| 420 | tsk .req r9 @ current thread_info |