Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 1 | /* |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 2 | * This file contains idle entry/exit functions for POWER7, |
| 3 | * POWER8 and POWER9 CPUs. |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/threads.h> |
| 12 | #include <asm/processor.h> |
| 13 | #include <asm/page.h> |
| 14 | #include <asm/cputable.h> |
| 15 | #include <asm/thread_info.h> |
| 16 | #include <asm/ppc_asm.h> |
| 17 | #include <asm/asm-offsets.h> |
| 18 | #include <asm/ppc-opcode.h> |
Benjamin Herrenschmidt | 7230c56 | 2012-03-06 18:27:59 +1100 | [diff] [blame] | 19 | #include <asm/hw_irq.h> |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 20 | #include <asm/kvm_book3s_asm.h> |
Vaidyanathan Srinivasan | 97eb001f | 2014-02-26 05:38:43 +0530 | [diff] [blame] | 21 | #include <asm/opal.h> |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 22 | #include <asm/cpuidle.h> |
Aneesh Kumar K.V | f64e808 | 2016-03-01 12:59:20 +0530 | [diff] [blame] | 23 | #include <asm/book3s/64/mmu-hash.h> |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 24 | #include <asm/mmu.h> |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 25 | |
| 26 | #undef DEBUG |
| 27 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 28 | /* |
| 29 | * Use unused space in the interrupt stack to save and restore |
| 30 | * registers for winkle support. |
| 31 | */ |
| 32 | #define _SDR1 GPR3 |
| 33 | #define _RPR GPR4 |
| 34 | #define _SPURR GPR5 |
| 35 | #define _PURR GPR6 |
| 36 | #define _TSCR GPR7 |
| 37 | #define _DSCR GPR8 |
| 38 | #define _AMOR GPR9 |
| 39 | #define _WORT GPR10 |
| 40 | #define _WORC GPR11 |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 41 | #define _PTCR GPR12 |
| 42 | |
| 43 | #define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \ |
| 44 | PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ |
| 45 | PSSCR_MTL_MASK |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 46 | |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 47 | /* Idle state entry routines */ |
| 48 | |
| 49 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ |
| 50 | /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ |
| 51 | std r0,0(r1); \ |
| 52 | ptesync; \ |
| 53 | ld r0,0(r1); \ |
| 54 | 1: cmp cr0,r0,r0; \ |
| 55 | bne 1b; \ |
| 56 | IDLE_INST; \ |
| 57 | b . |
| 58 | |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 59 | .text |
| 60 | |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 61 | /* |
Shreyas B. Prabhu | 0dfffb4 | 2016-07-08 11:50:48 +0530 | [diff] [blame] | 62 | * Used by threads before entering deep idle states. Saves SPRs |
| 63 | * in interrupt stack frame |
| 64 | */ |
| 65 | save_sprs_to_stack: |
| 66 | /* |
| 67 | * Note all register i.e per-core, per-subcore or per-thread is saved |
| 68 | * here since any thread in the core might wake up first |
| 69 | */ |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 70 | BEGIN_FTR_SECTION |
| 71 | mfspr r3,SPRN_PTCR |
| 72 | std r3,_PTCR(r1) |
| 73 | /* |
| 74 | * Note - SDR1 is dropped in Power ISA v3. Hence not restoring |
| 75 | * SDR1 here |
| 76 | */ |
| 77 | FTR_SECTION_ELSE |
Shreyas B. Prabhu | 0dfffb4 | 2016-07-08 11:50:48 +0530 | [diff] [blame] | 78 | mfspr r3,SPRN_SDR1 |
| 79 | std r3,_SDR1(r1) |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 80 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
Shreyas B. Prabhu | 0dfffb4 | 2016-07-08 11:50:48 +0530 | [diff] [blame] | 81 | mfspr r3,SPRN_RPR |
| 82 | std r3,_RPR(r1) |
| 83 | mfspr r3,SPRN_SPURR |
| 84 | std r3,_SPURR(r1) |
| 85 | mfspr r3,SPRN_PURR |
| 86 | std r3,_PURR(r1) |
| 87 | mfspr r3,SPRN_TSCR |
| 88 | std r3,_TSCR(r1) |
| 89 | mfspr r3,SPRN_DSCR |
| 90 | std r3,_DSCR(r1) |
| 91 | mfspr r3,SPRN_AMOR |
| 92 | std r3,_AMOR(r1) |
| 93 | mfspr r3,SPRN_WORT |
| 94 | std r3,_WORT(r1) |
| 95 | mfspr r3,SPRN_WORC |
| 96 | std r3,_WORC(r1) |
| 97 | |
| 98 | blr |
| 99 | |
| 100 | /* |
Shreyas B. Prabhu | b32aadc | 2015-07-07 01:39:23 +0530 | [diff] [blame] | 101 | * Used by threads when the lock bit of core_idle_state is set. |
| 102 | * Threads will spin in HMT_LOW until the lock bit is cleared. |
| 103 | * r14 - pointer to core_idle_state |
| 104 | * r15 - used to load contents of core_idle_state |
| 105 | */ |
| 106 | |
| 107 | core_idle_lock_held: |
| 108 | HMT_LOW |
| 109 | 3: lwz r15,0(r14) |
| 110 | andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT |
| 111 | bne 3b |
| 112 | HMT_MEDIUM |
| 113 | lwarx r15,0,r14 |
| 114 | blr |
| 115 | |
| 116 | /* |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 117 | * Pass requested state in r3: |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 118 | * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 |
| 119 | * - Requested STOP state in POWER9 |
Michael Ellerman | 8d6f7c5 | 2014-05-23 18:15:26 +1000 | [diff] [blame] | 120 | * |
| 121 | * To check IRQ_HAPPENED in r4 |
| 122 | * 0 - don't check |
| 123 | * 1 - check |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 124 | * |
| 125 | * Address to 'rfid' to in r5 |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 126 | */ |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 127 | _GLOBAL(pnv_powersave_common) |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 128 | /* Use r3 to pass state nap/sleep/winkle */ |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 129 | /* NAP is a state loss, we create a regs frame on the |
| 130 | * stack, fill it up with the state we care about and |
| 131 | * stick a pointer to it in PACAR1. We really only |
| 132 | * need to save PC, some CR bits and the NV GPRs, |
| 133 | * but for now an interrupt frame will do. |
| 134 | */ |
| 135 | mflr r0 |
| 136 | std r0,16(r1) |
| 137 | stdu r1,-INT_FRAME_SIZE(r1) |
| 138 | std r0,_LINK(r1) |
| 139 | std r0,_NIP(r1) |
| 140 | |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 141 | /* Hard disable interrupts */ |
| 142 | mfmsr r9 |
| 143 | rldicl r9,r9,48,1 |
| 144 | rotldi r9,r9,16 |
| 145 | mtmsrd r9,1 /* hard-disable interrupts */ |
Benjamin Herrenschmidt | 7230c56 | 2012-03-06 18:27:59 +1100 | [diff] [blame] | 146 | |
| 147 | /* Check if something happened while soft-disabled */ |
| 148 | lbz r0,PACAIRQHAPPENED(r13) |
Paul Mackerras | d6a4f70 | 2014-09-02 14:23:16 +1000 | [diff] [blame] | 149 | andi. r0,r0,~PACA_IRQ_HARD_DIS@l |
Benjamin Herrenschmidt | 7230c56 | 2012-03-06 18:27:59 +1100 | [diff] [blame] | 150 | beq 1f |
Michael Ellerman | 8d6f7c5 | 2014-05-23 18:15:26 +1000 | [diff] [blame] | 151 | cmpwi cr0,r4,0 |
| 152 | beq 1f |
Benjamin Herrenschmidt | 7230c56 | 2012-03-06 18:27:59 +1100 | [diff] [blame] | 153 | addi r1,r1,INT_FRAME_SIZE |
| 154 | ld r0,16(r1) |
Paul Mackerras | f57333a | 2015-03-20 10:10:18 +1100 | [diff] [blame] | 155 | li r3,0 /* Return 0 (no nap) */ |
Benjamin Herrenschmidt | 7230c56 | 2012-03-06 18:27:59 +1100 | [diff] [blame] | 156 | mtlr r0 |
| 157 | blr |
| 158 | |
| 159 | 1: /* We mark irqs hard disabled as this is the state we'll |
| 160 | * be in when returning and we need to tell arch_local_irq_restore() |
| 161 | * about it |
| 162 | */ |
| 163 | li r0,PACA_IRQ_HARD_DIS |
| 164 | stb r0,PACAIRQHAPPENED(r13) |
| 165 | |
| 166 | /* We haven't lost state ... yet */ |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 167 | li r0,0 |
Paul Mackerras | 2fde6d2 | 2011-12-05 19:47:26 +0000 | [diff] [blame] | 168 | stb r0,PACA_NAPSTATELOST(r13) |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 169 | |
| 170 | /* Continue saving state */ |
| 171 | SAVE_GPR(2, r1) |
| 172 | SAVE_NVGPRS(r1) |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 173 | mfcr r4 |
| 174 | std r4,_CCR(r1) |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 175 | std r9,_MSR(r1) |
| 176 | std r1,PACAR1(r13) |
| 177 | |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 178 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 179 | /* Tell KVM we're entering idle */ |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 180 | li r4,KVM_HWTHREAD_IN_IDLE |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 181 | stb r4,HSTATE_HWTHREAD_STATE(r13) |
| 182 | #endif |
| 183 | |
Paul Mackerras | 8117ac6 | 2014-12-10 00:26:50 +0530 | [diff] [blame] | 184 | /* |
| 185 | * Go to real mode to do the nap, as required by the architecture. |
| 186 | * Also, we need to be in real mode before setting hwthread_state, |
| 187 | * because as soon as we do that, another thread can switch |
| 188 | * the MMU context to the guest. |
| 189 | */ |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 190 | LOAD_REG_IMMEDIATE(r7, MSR_IDLE) |
Paul Mackerras | 8117ac6 | 2014-12-10 00:26:50 +0530 | [diff] [blame] | 191 | li r6, MSR_RI |
| 192 | andc r6, r9, r6 |
Paul Mackerras | 8117ac6 | 2014-12-10 00:26:50 +0530 | [diff] [blame] | 193 | mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 194 | mtspr SPRN_SRR0, r5 |
| 195 | mtspr SPRN_SRR1, r7 |
Paul Mackerras | 8117ac6 | 2014-12-10 00:26:50 +0530 | [diff] [blame] | 196 | rfid |
| 197 | |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 198 | .globl pnv_enter_arch207_idle_mode |
| 199 | pnv_enter_arch207_idle_mode: |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 200 | stb r3,PACA_THREAD_IDLE_STATE(r13) |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 201 | cmpwi cr3,r3,PNV_THREAD_SLEEP |
| 202 | bge cr3,2f |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 203 | IDLE_STATE_ENTER_SEQ(PPC_NAP) |
| 204 | /* No return */ |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 205 | 2: |
| 206 | /* Sleep or winkle */ |
| 207 | lbz r7,PACA_THREAD_MASK(r13) |
| 208 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) |
| 209 | lwarx_loop1: |
| 210 | lwarx r15,0,r14 |
Shreyas B. Prabhu | b32aadc | 2015-07-07 01:39:23 +0530 | [diff] [blame] | 211 | |
| 212 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT |
| 213 | bnel core_idle_lock_held |
| 214 | |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 215 | andc r15,r15,r7 /* Clear thread bit */ |
| 216 | |
| 217 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS |
| 218 | |
| 219 | /* |
| 220 | * If cr0 = 0, then current thread is the last thread of the core entering |
| 221 | * sleep. Last thread needs to execute the hardware bug workaround code if |
| 222 | * required by the platform. |
| 223 | * Make the workaround call unconditionally here. The below branch call is |
| 224 | * patched out when the idle states are discovered if the platform does not |
| 225 | * require it. |
| 226 | */ |
| 227 | .global pnv_fastsleep_workaround_at_entry |
| 228 | pnv_fastsleep_workaround_at_entry: |
| 229 | beq fastsleep_workaround_at_entry |
| 230 | |
| 231 | stwcx. r15,0,r14 |
| 232 | bne- lwarx_loop1 |
| 233 | isync |
| 234 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 235 | common_enter: /* common code for all the threads entering sleep or winkle */ |
| 236 | bgt cr3,enter_winkle |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 237 | IDLE_STATE_ENTER_SEQ(PPC_SLEEP) |
| 238 | |
| 239 | fastsleep_workaround_at_entry: |
| 240 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT |
| 241 | stwcx. r15,0,r14 |
| 242 | bne- lwarx_loop1 |
| 243 | isync |
| 244 | |
| 245 | /* Fast sleep workaround */ |
| 246 | li r3,1 |
| 247 | li r4,1 |
| 248 | li r0,OPAL_CONFIG_CPU_IDLE_STATE |
| 249 | bl opal_call_realmode |
| 250 | |
| 251 | /* Clear Lock bit */ |
| 252 | li r0,0 |
| 253 | lwsync |
| 254 | stw r0,0(r14) |
| 255 | b common_enter |
| 256 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 257 | enter_winkle: |
Shreyas B. Prabhu | 0dfffb4 | 2016-07-08 11:50:48 +0530 | [diff] [blame] | 258 | bl save_sprs_to_stack |
| 259 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 260 | IDLE_STATE_ENTER_SEQ(PPC_WINKLE) |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 261 | |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 262 | /* |
| 263 | * r3 - requested stop state |
| 264 | */ |
| 265 | power_enter_stop: |
| 266 | /* |
| 267 | * Check if the requested state is a deep idle state. |
| 268 | */ |
| 269 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
| 270 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) |
| 271 | cmpd r3,r4 |
| 272 | bge 2f |
| 273 | IDLE_STATE_ENTER_SEQ(PPC_STOP) |
| 274 | 2: |
| 275 | /* |
| 276 | * Entering deep idle state. |
| 277 | * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to |
| 278 | * stack and enter stop |
| 279 | */ |
| 280 | lbz r7,PACA_THREAD_MASK(r13) |
| 281 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) |
| 282 | |
| 283 | lwarx_loop_stop: |
| 284 | lwarx r15,0,r14 |
| 285 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT |
| 286 | bnel core_idle_lock_held |
| 287 | andc r15,r15,r7 /* Clear thread bit */ |
| 288 | |
| 289 | stwcx. r15,0,r14 |
| 290 | bne- lwarx_loop_stop |
| 291 | isync |
| 292 | |
| 293 | bl save_sprs_to_stack |
| 294 | |
| 295 | IDLE_STATE_ENTER_SEQ(PPC_STOP) |
| 296 | |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 297 | _GLOBAL(power7_idle) |
| 298 | /* Now check if user or arch enabled NAP mode */ |
| 299 | LOAD_REG_ADDRBASE(r3,powersave_nap) |
| 300 | lwz r4,ADDROFF(powersave_nap)(r3) |
| 301 | cmpwi 0,r4,0 |
| 302 | beqlr |
Michael Ellerman | 8d6f7c5 | 2014-05-23 18:15:26 +1000 | [diff] [blame] | 303 | li r3, 1 |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 304 | /* fall through */ |
| 305 | |
| 306 | _GLOBAL(power7_nap) |
Michael Ellerman | 8d6f7c5 | 2014-05-23 18:15:26 +1000 | [diff] [blame] | 307 | mr r4,r3 |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 308 | li r3,PNV_THREAD_NAP |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 309 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 310 | b pnv_powersave_common |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 311 | /* No return */ |
| 312 | |
| 313 | _GLOBAL(power7_sleep) |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 314 | li r3,PNV_THREAD_SLEEP |
Preeti U Murthy | c733cf8 | 2014-07-02 09:19:35 +0530 | [diff] [blame] | 315 | li r4,1 |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 316 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 317 | b pnv_powersave_common |
Vaidyanathan Srinivasan | aca79d2 | 2014-02-26 05:38:25 +0530 | [diff] [blame] | 318 | /* No return */ |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 319 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 320 | _GLOBAL(power7_winkle) |
Shreyas B. Prabhu | bfd1b7a | 2016-07-08 11:50:43 +0530 | [diff] [blame] | 321 | li r3,PNV_THREAD_WINKLE |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 322 | li r4,1 |
Shreyas B. Prabhu | 4eae2c9 | 2016-07-08 11:50:47 +0530 | [diff] [blame] | 323 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 324 | b pnv_powersave_common |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 325 | /* No return */ |
| 326 | |
Mahesh Salgaonkar | bbdb760 | 2014-07-29 18:40:13 +0530 | [diff] [blame] | 327 | #define CHECK_HMI_INTERRUPT \ |
| 328 | mfspr r0,SPRN_SRR1; \ |
| 329 | BEGIN_FTR_SECTION_NESTED(66); \ |
| 330 | rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \ |
| 331 | FTR_SECTION_ELSE_NESTED(66); \ |
| 332 | rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \ |
| 333 | ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ |
| 334 | cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ |
| 335 | bne 20f; \ |
| 336 | /* Invoke opal call to handle hmi */ \ |
| 337 | ld r2,PACATOC(r13); \ |
| 338 | ld r1,PACAR1(r13); \ |
| 339 | std r3,ORIG_GPR3(r1); /* Save original r3 */ \ |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 340 | li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \ |
Mahesh Salgaonkar | db97eff | 2014-07-31 18:17:52 +0530 | [diff] [blame] | 341 | bl opal_call_realmode; \ |
Mahesh Salgaonkar | bbdb760 | 2014-07-29 18:40:13 +0530 | [diff] [blame] | 342 | ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ |
| 343 | 20: nop; |
| 344 | |
| 345 | |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 346 | /* |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 347 | * r3 - requested stop state |
| 348 | */ |
| 349 | _GLOBAL(power9_idle_stop) |
| 350 | LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE) |
| 351 | or r4,r4,r3 |
| 352 | mtspr SPRN_PSSCR, r4 |
| 353 | li r4, 1 |
| 354 | LOAD_REG_ADDR(r5,power_enter_stop) |
| 355 | b pnv_powersave_common |
| 356 | /* No return */ |
| 357 | /* |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 358 | * Called from reset vector. Check whether we have woken up with |
| 359 | * hypervisor state loss. If yes, restore hypervisor state and return |
| 360 | * back to reset vector. |
| 361 | * |
| 362 | * r13 - Contents of HSPRG0 |
| 363 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss |
| 364 | */ |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 365 | _GLOBAL(pnv_restore_hyp_resource) |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 366 | ld r2,PACATOC(r13); |
| 367 | BEGIN_FTR_SECTION |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 368 | /* |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 369 | * POWER ISA 3. Use PSSCR to determine if we |
| 370 | * are waking up from deep idle state |
| 371 | */ |
| 372 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
| 373 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) |
| 374 | |
| 375 | mfspr r5,SPRN_PSSCR |
| 376 | /* |
| 377 | * 0-3 bits correspond to Power-Saving Level Status |
| 378 | * which indicates the idle state we are waking up from |
| 379 | */ |
| 380 | rldicl r5,r5,4,60 |
| 381 | cmpd cr4,r5,r4 |
| 382 | bge cr4,pnv_wakeup_tb_loss |
| 383 | /* |
| 384 | * Waking up without hypervisor state loss. Return to |
| 385 | * reset vector |
| 386 | */ |
| 387 | blr |
| 388 | |
| 389 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 390 | |
| 391 | /* |
| 392 | * POWER ISA 2.07 or less. |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 393 | * Check if last bit of HSPGR0 is set. This indicates whether we are |
| 394 | * waking up from winkle. |
| 395 | */ |
| 396 | clrldi r5,r13,63 |
| 397 | clrrdi r13,r13,1 |
| 398 | cmpwi cr4,r5,1 |
| 399 | mtspr SPRN_HSPRG0,r13 |
| 400 | |
| 401 | lbz r0,PACA_THREAD_IDLE_STATE(r13) |
| 402 | cmpwi cr2,r0,PNV_THREAD_NAP |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 403 | bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 404 | |
| 405 | /* |
| 406 | * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking |
| 407 | * up from nap. At this stage CR3 shouldn't contains 'gt' since that |
| 408 | * indicates we are waking with hypervisor state loss from nap. |
| 409 | */ |
| 410 | bgt cr3,. |
| 411 | |
| 412 | blr /* Return back to System Reset vector from where |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 413 | pnv_restore_hyp_resource was invoked */ |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 414 | |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 415 | /* |
| 416 | * Called if waking up from idle state which can cause either partial or |
| 417 | * complete hyp state loss. |
| 418 | * In POWER8, called if waking up from fastsleep or winkle |
| 419 | * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state |
| 420 | * |
| 421 | * r13 - PACA |
| 422 | * cr3 - gt if waking up with partial/complete hypervisor state loss |
| 423 | * cr4 - eq if waking up from complete hypervisor state loss. |
| 424 | */ |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 425 | _GLOBAL(pnv_wakeup_tb_loss) |
Vaidyanathan Srinivasan | 97eb001f | 2014-02-26 05:38:43 +0530 | [diff] [blame] | 426 | ld r1,PACAR1(r13) |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 427 | /* |
| 428 | * Before entering any idle state, the NVGPRs are saved in the stack |
| 429 | * and they are restored before switching to the process context. Hence |
| 430 | * until they are restored, they are free to be used. |
| 431 | * |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 432 | * Save SRR1 and LR in NVGPRs as they might be clobbered in |
| 433 | * opal_call_realmode (called in CHECK_HMI_INTERRUPT). SRR1 is required |
| 434 | * to determine the wakeup reason if we branch to kvm_start_guest. LR |
| 435 | * is required to return back to reset vector after hypervisor state |
| 436 | * restore is complete. |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 437 | */ |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 438 | mflr r17 |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 439 | mfspr r16,SPRN_SRR1 |
Mahesh Salgaonkar | bbdb760 | 2014-07-29 18:40:13 +0530 | [diff] [blame] | 440 | BEGIN_FTR_SECTION |
| 441 | CHECK_HMI_INTERRUPT |
| 442 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
Vaidyanathan Srinivasan | 97eb001f | 2014-02-26 05:38:43 +0530 | [diff] [blame] | 443 | |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 444 | lbz r7,PACA_THREAD_MASK(r13) |
| 445 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) |
| 446 | lwarx_loop2: |
| 447 | lwarx r15,0,r14 |
| 448 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT |
| 449 | /* |
| 450 | * Lock bit is set in one of the 2 cases- |
| 451 | * a. In the sleep/winkle enter path, the last thread is executing |
| 452 | * fastsleep workaround code. |
| 453 | * b. In the wake up path, another thread is executing fastsleep |
| 454 | * workaround undo code or resyncing timebase or restoring context |
| 455 | * In either case loop until the lock bit is cleared. |
| 456 | */ |
Shreyas B. Prabhu | b32aadc | 2015-07-07 01:39:23 +0530 | [diff] [blame] | 457 | bnel core_idle_lock_held |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 458 | |
| 459 | cmpwi cr2,r15,0 |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 460 | |
| 461 | /* |
| 462 | * At this stage |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 463 | * cr2 - eq if first thread to wakeup in core |
| 464 | * cr3- gt if waking up with partial/complete hypervisor state loss |
| 465 | * cr4 - eq if waking up from complete hypervisor state loss. |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 466 | */ |
| 467 | |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 468 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT |
| 469 | stwcx. r15,0,r14 |
| 470 | bne- lwarx_loop2 |
| 471 | isync |
| 472 | |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 473 | BEGIN_FTR_SECTION |
| 474 | lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) |
| 475 | and r4,r4,r15 |
| 476 | cmpwi r4,0 /* Check if first in subcore */ |
| 477 | |
| 478 | or r15,r15,r7 /* Set thread bit */ |
| 479 | beq first_thread_in_subcore |
| 480 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
| 481 | |
| 482 | or r15,r15,r7 /* Set thread bit */ |
| 483 | beq cr2,first_thread_in_core |
| 484 | |
| 485 | /* Not first thread in core or subcore to wake up */ |
| 486 | b clear_lock |
| 487 | |
| 488 | first_thread_in_subcore: |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 489 | /* |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 490 | * If waking up from sleep, subcore state is not lost. Hence |
| 491 | * skip subcore state restore |
| 492 | */ |
| 493 | bne cr4,subcore_state_restored |
| 494 | |
| 495 | /* Restore per-subcore state */ |
| 496 | ld r4,_SDR1(r1) |
| 497 | mtspr SPRN_SDR1,r4 |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 498 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 499 | ld r4,_RPR(r1) |
| 500 | mtspr SPRN_RPR,r4 |
| 501 | ld r4,_AMOR(r1) |
| 502 | mtspr SPRN_AMOR,r4 |
| 503 | |
| 504 | subcore_state_restored: |
| 505 | /* |
| 506 | * Check if the thread is also the first thread in the core. If not, |
| 507 | * skip to clear_lock. |
| 508 | */ |
| 509 | bne cr2,clear_lock |
| 510 | |
| 511 | first_thread_in_core: |
| 512 | |
| 513 | /* |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 514 | * First thread in the core waking up from any state which can cause |
| 515 | * partial or complete hypervisor state loss. It needs to |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 516 | * call the fastsleep workaround code if the platform requires it. |
| 517 | * Call it unconditionally here. The below branch instruction will |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 518 | * be patched out if the platform does not have fastsleep or does not |
| 519 | * require the workaround. Patching will be performed during the |
| 520 | * discovery of idle-states. |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 521 | */ |
| 522 | .global pnv_fastsleep_workaround_at_exit |
| 523 | pnv_fastsleep_workaround_at_exit: |
| 524 | b fastsleep_workaround_at_exit |
| 525 | |
| 526 | timebase_resync: |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 527 | /* |
| 528 | * Use cr3 which indicates that we are waking up with atleast partial |
| 529 | * hypervisor state loss to determine if TIMEBASE RESYNC is needed. |
| 530 | */ |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 531 | ble cr3,clear_lock |
| 532 | /* Time base re-sync */ |
| 533 | li r0,OPAL_RESYNC_TIMEBASE |
| 534 | bl opal_call_realmode; |
Vaidyanathan Srinivasan | 97eb001f | 2014-02-26 05:38:43 +0530 | [diff] [blame] | 535 | /* TODO: Check r3 for failure */ |
| 536 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 537 | /* |
| 538 | * If waking up from sleep, per core state is not lost, skip to |
| 539 | * clear_lock. |
| 540 | */ |
| 541 | bne cr4,clear_lock |
| 542 | |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 543 | /* |
| 544 | * First thread in the core to wake up and its waking up with |
| 545 | * complete hypervisor state loss. Restore per core hypervisor |
| 546 | * state. |
| 547 | */ |
| 548 | BEGIN_FTR_SECTION |
| 549 | ld r4,_PTCR(r1) |
| 550 | mtspr SPRN_PTCR,r4 |
| 551 | ld r4,_RPR(r1) |
| 552 | mtspr SPRN_RPR,r4 |
| 553 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 554 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 555 | ld r4,_TSCR(r1) |
| 556 | mtspr SPRN_TSCR,r4 |
| 557 | ld r4,_WORC(r1) |
| 558 | mtspr SPRN_WORC,r4 |
| 559 | |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 560 | clear_lock: |
| 561 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS |
| 562 | lwsync |
| 563 | stw r15,0(r14) |
| 564 | |
| 565 | common_exit: |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 566 | /* |
| 567 | * Common to all threads. |
| 568 | * |
| 569 | * If waking up from sleep, hypervisor state is not lost. Hence |
| 570 | * skip hypervisor state restore. |
| 571 | */ |
| 572 | bne cr4,hypervisor_state_restored |
| 573 | |
| 574 | /* Waking up from winkle */ |
| 575 | |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 576 | BEGIN_MMU_FTR_SECTION |
| 577 | b no_segments |
| 578 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 579 | /* Restore SLB from PACA */ |
| 580 | ld r8,PACA_SLBSHADOWPTR(r13) |
| 581 | |
| 582 | .rept SLB_NUM_BOLTED |
| 583 | li r3, SLBSHADOW_SAVEAREA |
| 584 | LDX_BE r5, r8, r3 |
| 585 | addi r3, r3, 8 |
| 586 | LDX_BE r6, r8, r3 |
| 587 | andis. r7,r5,SLB_ESID_V@h |
| 588 | beq 1f |
| 589 | slbmte r6,r5 |
| 590 | 1: addi r8,r8,16 |
| 591 | .endr |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 592 | no_segments: |
| 593 | |
| 594 | /* Restore per thread state */ |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 595 | |
| 596 | ld r4,_SPURR(r1) |
| 597 | mtspr SPRN_SPURR,r4 |
| 598 | ld r4,_PURR(r1) |
| 599 | mtspr SPRN_PURR,r4 |
| 600 | ld r4,_DSCR(r1) |
| 601 | mtspr SPRN_DSCR,r4 |
| 602 | ld r4,_WORT(r1) |
| 603 | mtspr SPRN_WORT,r4 |
| 604 | |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame^] | 605 | /* Call cur_cpu_spec->cpu_restore() */ |
| 606 | LOAD_REG_ADDR(r4, cur_cpu_spec) |
| 607 | ld r4,0(r4) |
| 608 | ld r12,CPU_SPEC_RESTORE(r4) |
| 609 | #ifdef PPC64_ELF_ABI_v1 |
| 610 | ld r12,0(r12) |
| 611 | #endif |
| 612 | mtctr r12 |
| 613 | bctrl |
| 614 | |
Shreyas B. Prabhu | 77b54e9 | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 615 | hypervisor_state_restored: |
| 616 | |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 617 | mtspr SPRN_SRR1,r16 |
Shreyas B. Prabhu | 1706567 | 2016-07-08 11:50:44 +0530 | [diff] [blame] | 618 | mtlr r17 |
| 619 | blr /* Return back to System Reset vector from where |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 620 | pnv_restore_hyp_resource was invoked */ |
Vaidyanathan Srinivasan | 97eb001f | 2014-02-26 05:38:43 +0530 | [diff] [blame] | 621 | |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 622 | fastsleep_workaround_at_exit: |
| 623 | li r3,1 |
| 624 | li r4,0 |
| 625 | li r0,OPAL_CONFIG_CPU_IDLE_STATE |
| 626 | bl opal_call_realmode |
| 627 | b timebase_resync |
| 628 | |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 629 | /* |
| 630 | * R3 here contains the value that will be returned to the caller |
| 631 | * of power7_nap. |
| 632 | */ |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 633 | _GLOBAL(pnv_wakeup_loss) |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 634 | ld r1,PACAR1(r13) |
Mahesh Salgaonkar | bbdb760 | 2014-07-29 18:40:13 +0530 | [diff] [blame] | 635 | BEGIN_FTR_SECTION |
| 636 | CHECK_HMI_INTERRUPT |
| 637 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 638 | REST_NVGPRS(r1) |
| 639 | REST_GPR(2, r1) |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 640 | ld r6,_CCR(r1) |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 641 | ld r4,_MSR(r1) |
| 642 | ld r5,_NIP(r1) |
| 643 | addi r1,r1,INT_FRAME_SIZE |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 644 | mtcr r6 |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 645 | mtspr SPRN_SRR1,r4 |
| 646 | mtspr SPRN_SRR0,r5 |
| 647 | rfid |
| 648 | |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 649 | /* |
| 650 | * R3 here contains the value that will be returned to the caller |
| 651 | * of power7_nap. |
| 652 | */ |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 653 | _GLOBAL(pnv_wakeup_noloss) |
Paul Mackerras | 2fde6d2 | 2011-12-05 19:47:26 +0000 | [diff] [blame] | 654 | lbz r0,PACA_NAPSTATELOST(r13) |
| 655 | cmpwi r0,0 |
Shreyas B. Prabhu | 5fa6b6b | 2016-07-08 11:50:46 +0530 | [diff] [blame] | 656 | bne pnv_wakeup_loss |
Mahesh Salgaonkar | bbdb760 | 2014-07-29 18:40:13 +0530 | [diff] [blame] | 657 | BEGIN_FTR_SECTION |
| 658 | CHECK_HMI_INTERRUPT |
| 659 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 660 | ld r1,PACAR1(r13) |
Sam Bobroff | 0aab374 | 2015-05-01 16:50:34 +1000 | [diff] [blame] | 661 | ld r6,_CCR(r1) |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 662 | ld r4,_MSR(r1) |
| 663 | ld r5,_NIP(r1) |
| 664 | addi r1,r1,INT_FRAME_SIZE |
Sam Bobroff | 0aab374 | 2015-05-01 16:50:34 +1000 | [diff] [blame] | 665 | mtcr r6 |
Benjamin Herrenschmidt | 948cf67 | 2011-01-24 18:42:41 +1100 | [diff] [blame] | 666 | mtspr SPRN_SRR1,r4 |
| 667 | mtspr SPRN_SRR0,r5 |
| 668 | rfid |