[PATCH] Kprobes/IA64: support kprobe on branch/call instructions
[linux-2.6.git] / arch / ia64 / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  arch/ia64/kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  * Copyright (C) Intel Corporation, 2005
21  *
22  * 2005-Apr     Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23  *              <anil.s.keshavamurthy@intel.com> adapted from i386
24  */
25
26 #include <linux/config.h>
27 #include <linux/kprobes.h>
28 #include <linux/ptrace.h>
29 #include <linux/spinlock.h>
30 #include <linux/string.h>
31 #include <linux/slab.h>
32 #include <linux/preempt.h>
33 #include <linux/moduleloader.h>
34
35 #include <asm/pgtable.h>
36 #include <asm/kdebug.h>
37
38 extern void jprobe_inst_return(void);
39
40 /* kprobe_status settings */
41 #define KPROBE_HIT_ACTIVE       0x00000001
42 #define KPROBE_HIT_SS           0x00000002
43
44 static struct kprobe *current_kprobe;
45 static unsigned long kprobe_status;
46 static struct pt_regs jprobe_saved_regs;
47
48 enum instruction_type {A, I, M, F, B, L, X, u};
49 static enum instruction_type bundle_encoding[32][3] = {
50   { M, I, I },                          /* 00 */
51   { M, I, I },                          /* 01 */
52   { M, I, I },                          /* 02 */
53   { M, I, I },                          /* 03 */
54   { M, L, X },                          /* 04 */
55   { M, L, X },                          /* 05 */
56   { u, u, u },                          /* 06 */
57   { u, u, u },                          /* 07 */
58   { M, M, I },                          /* 08 */
59   { M, M, I },                          /* 09 */
60   { M, M, I },                          /* 0A */
61   { M, M, I },                          /* 0B */
62   { M, F, I },                          /* 0C */
63   { M, F, I },                          /* 0D */
64   { M, M, F },                          /* 0E */
65   { M, M, F },                          /* 0F */
66   { M, I, B },                          /* 10 */
67   { M, I, B },                          /* 11 */
68   { M, B, B },                          /* 12 */
69   { M, B, B },                          /* 13 */
70   { u, u, u },                          /* 14 */
71   { u, u, u },                          /* 15 */
72   { B, B, B },                          /* 16 */
73   { B, B, B },                          /* 17 */
74   { M, M, B },                          /* 18 */
75   { M, M, B },                          /* 19 */
76   { u, u, u },                          /* 1A */
77   { u, u, u },                          /* 1B */
78   { M, F, B },                          /* 1C */
79   { M, F, B },                          /* 1D */
80   { u, u, u },                          /* 1E */
81   { u, u, u },                          /* 1F */
82 };
83
84 int arch_prepare_kprobe(struct kprobe *p)
85 {
86         unsigned long addr = (unsigned long) p->addr;
87         unsigned long bundle_addr = addr & ~0xFULL;
88         unsigned long slot = addr & 0xf;
89         bundle_t bundle;
90         unsigned long template;
91
92         /*
93          * TODO: Verify that a probe is not being inserted
94          *       in sensitive regions of code
95          * TODO: Verify that the memory holding the probe is rwx
96          * TODO: verify this is a kernel address
97          */
98         memcpy(&bundle, (unsigned long *)bundle_addr, sizeof(bundle_t));
99         template = bundle.quad0.template;
100         if (((bundle_encoding[template][1] == L) && slot > 1) || (slot > 2)) {
101                 printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n", addr);
102                 return -EINVAL;
103         }
104         return 0;
105 }
106
107 void arch_copy_kprobe(struct kprobe *p)
108 {
109         unsigned long addr = (unsigned long)p->addr;
110         unsigned long bundle_addr = addr & ~0xFULL;
111
112         memcpy(&p->ainsn.insn.bundle, (unsigned long *)bundle_addr,
113                                 sizeof(bundle_t));
114         memcpy(&p->opcode.bundle, &p->ainsn.insn.bundle, sizeof(bundle_t));
115 }
116
117 void arch_arm_kprobe(struct kprobe *p)
118 {
119         unsigned long addr = (unsigned long)p->addr;
120         unsigned long arm_addr = addr & ~0xFULL;
121         unsigned long slot = addr & 0xf;
122         unsigned long template;
123         unsigned long major_opcode = 0;
124         unsigned long lx_type_inst = 0;
125         unsigned long kprobe_inst = 0;
126         bundle_t bundle;
127
128         p->ainsn.inst_flag = 0;
129         p->ainsn.target_br_reg = 0;
130
131         memcpy(&bundle, &p->ainsn.insn.bundle, sizeof(bundle_t));
132         template = bundle.quad0.template;
133         if (slot == 1 && bundle_encoding[template][1] == L) {
134                 lx_type_inst = 1;
135                 slot = 2;
136         }
137
138
139         switch (slot) {
140         case 0:
141                 major_opcode = (bundle.quad0.slot0 >> SLOT0_OPCODE_SHIFT);
142                 kprobe_inst = bundle.quad0.slot0;
143                 bundle.quad0.slot0 = BREAK_INST;
144                 break;
145         case 1:
146                 major_opcode = (bundle.quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
147                 kprobe_inst = (bundle.quad0.slot1_p0 |
148                                 (bundle.quad1.slot1_p1 << (64-46)));
149                 bundle.quad0.slot1_p0 = BREAK_INST;
150                 bundle.quad1.slot1_p1 = (BREAK_INST >> (64-46));
151                 break;
152         case 2:
153                 major_opcode = (bundle.quad1.slot2 >> SLOT2_OPCODE_SHIFT);
154                 kprobe_inst = bundle.quad1.slot2;
155                 bundle.quad1.slot2 = BREAK_INST;
156                 break;
157         }
158         /*
159          * Look for IP relative Branches, IP relative call or
160          * IP relative predicate instructions
161          */
162         if (bundle_encoding[template][slot] == B) {
163                 switch (major_opcode) {
164                         case INDIRECT_CALL_OPCODE:
165                                 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
166                                 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
167                                 break;
168                         case IP_RELATIVE_PREDICT_OPCODE:
169                         case IP_RELATIVE_BRANCH_OPCODE:
170                                 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
171                                 break;
172                         case IP_RELATIVE_CALL_OPCODE:
173                                 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
174                                 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
175                                 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
176                                 break;
177                         default:
178                                 /* Do nothing */
179                                 break;
180                 }
181         } else if (lx_type_inst) {
182                 switch (major_opcode) {
183                         case LONG_CALL_OPCODE:
184                                 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
185                                 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
186                                 break;
187                         default:
188                                 /* Do nothing */
189                                 break;
190                 }
191         }
192
193         /* Flush icache for the instruction at the emulated address */
194         flush_icache_range((unsigned long)&p->ainsn.insn.bundle,
195                         (unsigned long)&p->ainsn.insn.bundle +
196                         sizeof(bundle_t));
197         /*
198          * Patch the original instruction with the probe instruction
199          * and flush the instruction cache
200          */
201         memcpy((char *) arm_addr, (char *) &bundle, sizeof(bundle_t));
202         flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
203 }
204
205 void arch_disarm_kprobe(struct kprobe *p)
206 {
207         unsigned long addr = (unsigned long)p->addr;
208         unsigned long arm_addr = addr & ~0xFULL;
209
210         /* p->opcode contains the original unaltered bundle */
211         memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
212         flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
213 }
214
215 void arch_remove_kprobe(struct kprobe *p)
216 {
217 }
218
219 /*
220  * We are resuming execution after a single step fault, so the pt_regs
221  * structure reflects the register state after we executed the instruction
222  * located in the kprobe (p->ainsn.insn.bundle).  We still need to adjust
223  * the ip to point back to the original stack address. To set the IP address
224  * to original stack address, handle the case where we need to fixup the
225  * relative IP address and/or fixup branch register.
226  */
227 static void resume_execution(struct kprobe *p, struct pt_regs *regs)
228 {
229         unsigned long bundle_addr = ((unsigned long) (&p->ainsn.insn.bundle)) & ~0xFULL;
230         unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
231         unsigned long template;
232         int slot = ((unsigned long)p->addr & 0xf);
233
234         template = p->opcode.bundle.quad0.template;
235
236         if (slot == 1 && bundle_encoding[template][1] == L)
237                 slot = 2;
238
239         if (p->ainsn.inst_flag) {
240
241                 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
242                         /* Fix relative IP address */
243                         regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
244                 }
245
246                 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
247                 /*
248                  * Fix target branch register, software convention is
249                  * to use either b0 or b6 or b7, so just checking
250                  * only those registers
251                  */
252                         switch (p->ainsn.target_br_reg) {
253                         case 0:
254                                 if ((regs->b0 == bundle_addr) ||
255                                         (regs->b0 == bundle_addr + 0x10)) {
256                                         regs->b0 = (regs->b0 - bundle_addr) +
257                                                 resume_addr;
258                                 }
259                                 break;
260                         case 6:
261                                 if ((regs->b6 == bundle_addr) ||
262                                         (regs->b6 == bundle_addr + 0x10)) {
263                                         regs->b6 = (regs->b6 - bundle_addr) +
264                                                 resume_addr;
265                                 }
266                                 break;
267                         case 7:
268                                 if ((regs->b7 == bundle_addr) ||
269                                         (regs->b7 == bundle_addr + 0x10)) {
270                                         regs->b7 = (regs->b7 - bundle_addr) +
271                                                 resume_addr;
272                                 }
273                                 break;
274                         } /* end switch */
275                 }
276                 goto turn_ss_off;
277         }
278
279         if (slot == 2) {
280                 if (regs->cr_iip == bundle_addr + 0x10) {
281                         regs->cr_iip = resume_addr + 0x10;
282                 }
283         } else {
284                 if (regs->cr_iip == bundle_addr) {
285                         regs->cr_iip = resume_addr;
286                 }
287         }
288
289 turn_ss_off:
290         /* Turn off Single Step bit */
291         ia64_psr(regs)->ss = 0;
292 }
293
294 static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
295 {
296         unsigned long bundle_addr = (unsigned long) &p->ainsn.insn.bundle;
297         unsigned long slot = (unsigned long)p->addr & 0xf;
298
299         /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
300         regs->cr_iip = bundle_addr & ~0xFULL;
301
302         if (slot > 2)
303                 slot = 0;
304
305         ia64_psr(regs)->ri = slot;
306
307         /* turn on single stepping */
308         ia64_psr(regs)->ss = 1;
309 }
310
311 static int pre_kprobes_handler(struct pt_regs *regs)
312 {
313         struct kprobe *p;
314         int ret = 0;
315         kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
316
317         preempt_disable();
318
319         /* Handle recursion cases */
320         if (kprobe_running()) {
321                 p = get_kprobe(addr);
322                 if (p) {
323                         if (kprobe_status == KPROBE_HIT_SS) {
324                                 unlock_kprobes();
325                                 goto no_kprobe;
326                         }
327                         arch_disarm_kprobe(p);
328                         ret = 1;
329                 } else {
330                         /*
331                          * jprobe instrumented function just completed
332                          */
333                         p = current_kprobe;
334                         if (p->break_handler && p->break_handler(p, regs)) {
335                                 goto ss_probe;
336                         }
337                 }
338         }
339
340         lock_kprobes();
341         p = get_kprobe(addr);
342         if (!p) {
343                 unlock_kprobes();
344                 goto no_kprobe;
345         }
346
347         kprobe_status = KPROBE_HIT_ACTIVE;
348         current_kprobe = p;
349
350         if (p->pre_handler && p->pre_handler(p, regs))
351                 /*
352                  * Our pre-handler is specifically requesting that we just
353                  * do a return.  This is handling the case where the
354                  * pre-handler is really our special jprobe pre-handler.
355                  */
356                 return 1;
357
358 ss_probe:
359         prepare_ss(p, regs);
360         kprobe_status = KPROBE_HIT_SS;
361         return 1;
362
363 no_kprobe:
364         preempt_enable_no_resched();
365         return ret;
366 }
367
368 static int post_kprobes_handler(struct pt_regs *regs)
369 {
370         if (!kprobe_running())
371                 return 0;
372
373         if (current_kprobe->post_handler)
374                 current_kprobe->post_handler(current_kprobe, regs, 0);
375
376         resume_execution(current_kprobe, regs);
377
378         unlock_kprobes();
379         preempt_enable_no_resched();
380         return 1;
381 }
382
383 static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
384 {
385         if (!kprobe_running())
386                 return 0;
387
388         if (current_kprobe->fault_handler &&
389             current_kprobe->fault_handler(current_kprobe, regs, trapnr))
390                 return 1;
391
392         if (kprobe_status & KPROBE_HIT_SS) {
393                 resume_execution(current_kprobe, regs);
394                 unlock_kprobes();
395                 preempt_enable_no_resched();
396         }
397
398         return 0;
399 }
400
401 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
402                              void *data)
403 {
404         struct die_args *args = (struct die_args *)data;
405         switch(val) {
406         case DIE_BREAK:
407                 if (pre_kprobes_handler(args->regs))
408                         return NOTIFY_STOP;
409                 break;
410         case DIE_SS:
411                 if (post_kprobes_handler(args->regs))
412                         return NOTIFY_STOP;
413                 break;
414         case DIE_PAGE_FAULT:
415                 if (kprobes_fault_handler(args->regs, args->trapnr))
416                         return NOTIFY_STOP;
417         default:
418                 break;
419         }
420         return NOTIFY_DONE;
421 }
422
423 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
424 {
425         struct jprobe *jp = container_of(p, struct jprobe, kp);
426         unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
427
428         /* save architectural state */
429         jprobe_saved_regs = *regs;
430
431         /* after rfi, execute the jprobe instrumented function */
432         regs->cr_iip = addr & ~0xFULL;
433         ia64_psr(regs)->ri = addr & 0xf;
434         regs->r1 = ((struct fnptr *)(jp->entry))->gp;
435
436         /*
437          * fix the return address to our jprobe_inst_return() function
438          * in the jprobes.S file
439          */
440         regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
441
442         return 1;
443 }
444
445 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
446 {
447         *regs = jprobe_saved_regs;
448         return 1;
449 }