Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 2 | #include <linux/highmem.h> |
| 3 | #include <linux/kdebug.h> |
| 4 | #include <linux/types.h> |
| 5 | #include <linux/notifier.h> |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/uprobes.h> |
| 8 | |
| 9 | #include <asm/branch.h> |
| 10 | #include <asm/cpu-features.h> |
| 11 | #include <asm/ptrace.h> |
Marcin Nowakowski | e3031b3 | 2016-09-30 11:33:45 +0200 | [diff] [blame] | 12 | |
| 13 | #include "probes-common.h" |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 14 | |
| 15 | static inline int insn_has_delay_slot(const union mips_instruction insn) |
| 16 | { |
Marcin Nowakowski | e3031b3 | 2016-09-30 11:33:45 +0200 | [diff] [blame] | 17 | return __insn_has_delay_slot(insn); |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 18 | } |
| 19 | |
| 20 | /** |
| 21 | * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. |
| 22 | * @mm: the probed address space. |
| 23 | * @arch_uprobe: the probepoint information. |
| 24 | * @addr: virtual address at which to install the probepoint |
| 25 | * Return 0 on success or a -ve number on error. |
| 26 | */ |
| 27 | int arch_uprobe_analyze_insn(struct arch_uprobe *aup, |
| 28 | struct mm_struct *mm, unsigned long addr) |
| 29 | { |
| 30 | union mips_instruction inst; |
| 31 | |
| 32 | /* |
| 33 | * For the time being this also blocks attempts to use uprobes with |
| 34 | * MIPS16 and microMIPS. |
| 35 | */ |
| 36 | if (addr & 0x03) |
| 37 | return -EINVAL; |
| 38 | |
| 39 | inst.word = aup->insn[0]; |
Marcin Nowakowski | d05c513 | 2016-09-30 11:33:46 +0200 | [diff] [blame] | 40 | |
| 41 | if (__insn_is_compact_branch(inst)) { |
| 42 | pr_notice("Uprobes for compact branches are not supported\n"); |
| 43 | return -EINVAL; |
| 44 | } |
| 45 | |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 46 | aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)]; |
| 47 | aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ |
| 48 | |
| 49 | return 0; |
| 50 | } |
| 51 | |
| 52 | /** |
| 53 | * is_trap_insn - check if the instruction is a trap variant |
| 54 | * @insn: instruction to be checked. |
| 55 | * Returns true if @insn is a trap variant. |
| 56 | * |
| 57 | * This definition overrides the weak definition in kernel/events/uprobes.c. |
| 58 | * and is needed for the case where an architecture has multiple trap |
| 59 | * instructions (like PowerPC or MIPS). We treat BREAK just like the more |
| 60 | * modern conditional trap instructions. |
| 61 | */ |
| 62 | bool is_trap_insn(uprobe_opcode_t *insn) |
| 63 | { |
| 64 | union mips_instruction inst; |
| 65 | |
| 66 | inst.word = *insn; |
| 67 | |
| 68 | switch (inst.i_format.opcode) { |
| 69 | case spec_op: |
| 70 | switch (inst.r_format.func) { |
| 71 | case break_op: |
| 72 | case teq_op: |
| 73 | case tge_op: |
| 74 | case tgeu_op: |
| 75 | case tlt_op: |
| 76 | case tltu_op: |
| 77 | case tne_op: |
| 78 | return 1; |
| 79 | } |
| 80 | break; |
| 81 | |
| 82 | case bcond_op: /* Yes, really ... */ |
| 83 | switch (inst.u_format.rt) { |
| 84 | case teqi_op: |
| 85 | case tgei_op: |
| 86 | case tgeiu_op: |
| 87 | case tlti_op: |
| 88 | case tltiu_op: |
| 89 | case tnei_op: |
| 90 | return 1; |
| 91 | } |
| 92 | break; |
| 93 | } |
| 94 | |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | #define UPROBE_TRAP_NR ULONG_MAX |
| 99 | |
| 100 | /* |
| 101 | * arch_uprobe_pre_xol - prepare to execute out of line. |
| 102 | * @auprobe: the probepoint information. |
| 103 | * @regs: reflects the saved user state of current task. |
| 104 | */ |
| 105 | int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) |
| 106 | { |
| 107 | struct uprobe_task *utask = current->utask; |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * Now find the EPC where to resume after the breakpoint has been |
| 111 | * dealt with. This may require emulation of a branch. |
| 112 | */ |
| 113 | aup->resume_epc = regs->cp0_epc + 4; |
| 114 | if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { |
| 115 | unsigned long epc; |
| 116 | |
| 117 | epc = regs->cp0_epc; |
Marcin Nowakowski | ca86c9e | 2016-09-22 15:38:33 +0200 | [diff] [blame] | 118 | __compute_return_epc_for_insn(regs, |
| 119 | (union mips_instruction) aup->insn[0]); |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 120 | aup->resume_epc = regs->cp0_epc; |
| 121 | } |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 122 | utask->autask.saved_trap_nr = current->thread.trap_nr; |
| 123 | current->thread.trap_nr = UPROBE_TRAP_NR; |
| 124 | regs->cp0_epc = current->utask->xol_vaddr; |
| 125 | |
| 126 | return 0; |
| 127 | } |
| 128 | |
| 129 | int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) |
| 130 | { |
| 131 | struct uprobe_task *utask = current->utask; |
| 132 | |
| 133 | current->thread.trap_nr = utask->autask.saved_trap_nr; |
| 134 | regs->cp0_epc = aup->resume_epc; |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | /* |
| 140 | * If xol insn itself traps and generates a signal(Say, |
| 141 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped |
| 142 | * instruction jumps back to its own address. It is assumed that anything |
| 143 | * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. |
| 144 | * |
| 145 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, |
| 146 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to |
| 147 | * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). |
| 148 | */ |
| 149 | bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) |
| 150 | { |
| 151 | if (tsk->thread.trap_nr != UPROBE_TRAP_NR) |
| 152 | return true; |
| 153 | |
| 154 | return false; |
| 155 | } |
| 156 | |
| 157 | int arch_uprobe_exception_notify(struct notifier_block *self, |
| 158 | unsigned long val, void *data) |
| 159 | { |
| 160 | struct die_args *args = data; |
| 161 | struct pt_regs *regs = args->regs; |
| 162 | |
| 163 | /* regs == NULL is a kernel bug */ |
| 164 | if (WARN_ON(!regs)) |
| 165 | return NOTIFY_DONE; |
| 166 | |
| 167 | /* We are only interested in userspace traps */ |
| 168 | if (!user_mode(regs)) |
| 169 | return NOTIFY_DONE; |
| 170 | |
| 171 | switch (val) { |
Marcin Nowakowski | 2809328 | 2016-08-11 09:02:30 +0200 | [diff] [blame] | 172 | case DIE_UPROBE: |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 173 | if (uprobe_pre_sstep_notifier(regs)) |
| 174 | return NOTIFY_STOP; |
| 175 | break; |
| 176 | case DIE_UPROBE_XOL: |
| 177 | if (uprobe_post_sstep_notifier(regs)) |
| 178 | return NOTIFY_STOP; |
| 179 | default: |
| 180 | break; |
| 181 | } |
| 182 | |
| 183 | return 0; |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * This function gets called when XOL instruction either gets trapped or |
| 188 | * the thread has a fatal signal. Reset the instruction pointer to its |
| 189 | * probed address for the potential restart or for post mortem analysis. |
| 190 | */ |
| 191 | void arch_uprobe_abort_xol(struct arch_uprobe *aup, |
| 192 | struct pt_regs *regs) |
| 193 | { |
| 194 | struct uprobe_task *utask = current->utask; |
| 195 | |
| 196 | instruction_pointer_set(regs, utask->vaddr); |
| 197 | } |
| 198 | |
| 199 | unsigned long arch_uretprobe_hijack_return_addr( |
| 200 | unsigned long trampoline_vaddr, struct pt_regs *regs) |
| 201 | { |
| 202 | unsigned long ra; |
| 203 | |
| 204 | ra = regs->regs[31]; |
| 205 | |
| 206 | /* Replace the return address with the trampoline address */ |
Marcin Nowakowski | db06068 | 2016-09-22 15:38:31 +0200 | [diff] [blame] | 207 | regs->regs[31] = trampoline_vaddr; |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 208 | |
| 209 | return ra; |
| 210 | } |
| 211 | |
| 212 | /** |
| 213 | * set_swbp - store breakpoint at a given address. |
| 214 | * @auprobe: arch specific probepoint information. |
| 215 | * @mm: the probed process address space. |
| 216 | * @vaddr: the virtual address to insert the opcode. |
| 217 | * |
| 218 | * For mm @mm, store the breakpoint instruction at @vaddr. |
| 219 | * Return 0 (success) or a negative errno. |
| 220 | * |
| 221 | * This version overrides the weak version in kernel/events/uprobes.c. |
| 222 | * It is required to handle MIPS16 and microMIPS. |
| 223 | */ |
| 224 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, |
| 225 | unsigned long vaddr) |
| 226 | { |
Ravi Bangoria | 6d43743 | 2018-08-09 09:48:52 +0530 | [diff] [blame] | 227 | return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 228 | } |
| 229 | |
Marcin Nowakowski | cd854fc | 2016-12-13 10:48:30 +0100 | [diff] [blame] | 230 | void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 231 | void *src, unsigned long len) |
| 232 | { |
James Hogan | d99a043 | 2016-09-01 17:30:13 +0100 | [diff] [blame] | 233 | unsigned long kaddr, kstart; |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 234 | |
| 235 | /* Initialize the slot */ |
James Hogan | d99a043 | 2016-09-01 17:30:13 +0100 | [diff] [blame] | 236 | kaddr = (unsigned long)kmap_atomic(page); |
| 237 | kstart = kaddr + (vaddr & ~PAGE_MASK); |
| 238 | memcpy((void *)kstart, src, len); |
| 239 | flush_icache_range(kstart, kstart + len); |
| 240 | kunmap_atomic((void *)kaddr); |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | /** |
| 244 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs |
| 245 | * @regs: Reflects the saved state of the task after it has hit a breakpoint |
| 246 | * instruction. |
| 247 | * Return the address of the breakpoint instruction. |
| 248 | * |
| 249 | * This overrides the weak version in kernel/events/uprobes.c. |
| 250 | */ |
| 251 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) |
| 252 | { |
| 253 | return instruction_pointer(regs); |
| 254 | } |
| 255 | |
| 256 | /* |
| 257 | * See if the instruction can be emulated. |
| 258 | * Returns true if instruction was emulated, false otherwise. |
| 259 | * |
| 260 | * For now we always emulate so this function just returns 0. |
| 261 | */ |
| 262 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 263 | { |
| 264 | return 0; |
| 265 | } |