MIPS, Tracing: Clean up prepare_ftrace_return()
[linux-2.6.git] / arch / mips / kernel / ftrace.c
1 /*
2  * Code for replacing ftrace calls with jumps.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7  *
8  * Thanks goes to Steven Rostedt for writing the original x86 version.
9  */
10
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14
15 #include <asm/asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uasm.h>
19
20 #include <asm-generic/sections.h>
21
22 #ifdef CONFIG_DYNAMIC_FTRACE
23
24 #define JAL 0x0c000000          /* jump & link: ip --> ra, jump to target */
25 #define ADDR_MASK 0x03ffffff    /*  op_code|addr : 31...26|25 ....0 */
26
27 #define INSN_B_1F_4 0x10000004  /* b 1f; offset = 4 */
28 #define INSN_B_1F_5 0x10000005  /* b 1f; offset = 5 */
29 #define INSN_NOP 0x00000000     /* nop */
30 #define INSN_JAL(addr)  \
31         ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
32
33 static unsigned int insn_jal_ftrace_caller __read_mostly;
34 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
35 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
36
37 static inline void ftrace_dyn_arch_init_insns(void)
38 {
39         u32 *buf;
40         unsigned int v1;
41
42         /* lui v1, hi16_mcount */
43         v1 = 3;
44         buf = (u32 *)&insn_lui_v1_hi16_mcount;
45         UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
46
47         /* jal (ftrace_caller + 8), jump over the first two instruction */
48         buf = (u32 *)&insn_jal_ftrace_caller;
49         uasm_i_jal(&buf, (FTRACE_ADDR + 8));
50
51 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
52         /* j ftrace_graph_caller */
53         buf = (u32 *)&insn_j_ftrace_graph_caller;
54         uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
55 #endif
56 }
57
58 /*
59  * Check if the address is in kernel space
60  *
61  * Clone core_kernel_text() from kernel/extable.c, but doesn't call
62  * init_kernel_text() for Ftrace doesn't trace functions in init sections.
63  */
64 static inline int in_kernel_space(unsigned long ip)
65 {
66         if (ip >= (unsigned long)_stext &&
67             ip <= (unsigned long)_etext)
68                 return 1;
69         return 0;
70 }
71
72 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
73 {
74         int faulted;
75
76         /* *(unsigned int *)ip = new_code; */
77         safe_store_code(new_code, ip, faulted);
78
79         if (unlikely(faulted))
80                 return -EFAULT;
81
82         flush_icache_range(ip, ip + 8);
83
84         return 0;
85 }
86
87 int ftrace_make_nop(struct module *mod,
88                     struct dyn_ftrace *rec, unsigned long addr)
89 {
90         unsigned int new;
91         unsigned long ip = rec->ip;
92
93         /*
94          * If ip is in kernel space, no long call, otherwise, long call is
95          * needed.
96          */
97         if (in_kernel_space(ip)) {
98                 /*
99                  * move at, ra
100                  * jal _mcount          --> nop
101                  */
102                 new = INSN_NOP;
103         } else {
104 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
105                 /*
106                  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
107                  * addiu v1, v1, low_16bit_of_mcount
108                  * move at, ra
109                  * move $12, ra_address
110                  * jalr v1
111                  *  sub sp, sp, 8
112                  *                                  1: offset = 5 instructions
113                  */
114                 new = INSN_B_1F_5;
115 #else
116                 /*
117                  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
118                  * addiu v1, v1, low_16bit_of_mcount
119                  * move at, ra
120                  * jalr v1
121                  *  nop | move $12, ra_address | sub sp, sp, 8
122                  *                                  1: offset = 4 instructions
123                  */
124                 new = INSN_B_1F_4;
125 #endif
126         }
127         return ftrace_modify_code(ip, new);
128 }
129
130 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
131 {
132         unsigned int new;
133         unsigned long ip = rec->ip;
134
135         new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
136                 insn_lui_v1_hi16_mcount;
137
138         return ftrace_modify_code(ip, new);
139 }
140
141 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
142
143 int ftrace_update_ftrace_func(ftrace_func_t func)
144 {
145         unsigned int new;
146
147         new = INSN_JAL((unsigned long)func);
148
149         return ftrace_modify_code(FTRACE_CALL_IP, new);
150 }
151
152 int __init ftrace_dyn_arch_init(void *data)
153 {
154         /* Encode the instructions when booting */
155         ftrace_dyn_arch_init_insns();
156
157         /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
158         ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
159
160         /* The return code is retured via data */
161         *(unsigned long *)data = 0;
162
163         return 0;
164 }
165 #endif  /* CONFIG_DYNAMIC_FTRACE */
166
167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
168
169 #ifdef CONFIG_DYNAMIC_FTRACE
170
171 extern void ftrace_graph_call(void);
172 #define FTRACE_GRAPH_CALL_IP    ((unsigned long)(&ftrace_graph_call))
173
174 int ftrace_enable_ftrace_graph_caller(void)
175 {
176         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
177                         insn_j_ftrace_graph_caller);
178 }
179
180 int ftrace_disable_ftrace_graph_caller(void)
181 {
182         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
183 }
184
185 #endif  /* CONFIG_DYNAMIC_FTRACE */
186
187 #ifndef KBUILD_MCOUNT_RA_ADDRESS
188
189 #define S_RA_SP (0xafbf << 16)  /* s{d,w} ra, offset(sp) */
190 #define S_R_SP  (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
191 #define OFFSET_MASK     0xffff  /* stack offset range: 0 ~ PT_SIZE */
192
193 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
194                 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
195 {
196         unsigned long sp, ip, tmp;
197         unsigned int code;
198         int faulted;
199
200         /*
201          * For module, move the ip from the return address after the
202          * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
203          * kernel, move after the instruction "move ra, at"(offset is 16)
204          */
205         ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
206
207         /*
208          * search the text until finding the non-store instruction or "s{d,w}
209          * ra, offset(sp)" instruction
210          */
211         do {
212                 /* get the code at "ip": code = *(unsigned int *)ip; */
213                 safe_load_code(code, ip, faulted);
214
215                 if (unlikely(faulted))
216                         return 0;
217                 /*
218                  * If we hit the non-store instruction before finding where the
219                  * ra is stored, then this is a leaf function and it does not
220                  * store the ra on the stack
221                  */
222                 if ((code & S_R_SP) != S_R_SP)
223                         return parent_ra_addr;
224
225                 /* Move to the next instruction */
226                 ip -= 4;
227         } while ((code & S_RA_SP) != S_RA_SP);
228
229         sp = fp + (code & OFFSET_MASK);
230
231         /* tmp = *(unsigned long *)sp; */
232         safe_load_stack(tmp, sp, faulted);
233         if (unlikely(faulted))
234                 return 0;
235
236         if (tmp == old_parent_ra)
237                 return sp;
238         return 0;
239 }
240
241 #endif  /* !KBUILD_MCOUNT_RA_ADDRESS */
242
243 /*
244  * Hook the return address and push it in the stack of return addrs
245  * in current thread info.
246  */
247 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
248                            unsigned long fp)
249 {
250         unsigned long old_parent_ra;
251         struct ftrace_graph_ent trace;
252         unsigned long return_hooker = (unsigned long)
253             &return_to_handler;
254         int faulted;
255
256         if (unlikely(atomic_read(&current->tracing_graph_pause)))
257                 return;
258
259         /*
260          * "parent_ra_addr" is the stack address saved the return address of
261          * the caller of _mcount.
262          *
263          * if the gcc < 4.5, a leaf function does not save the return address
264          * in the stack address, so, we "emulate" one in _mcount's stack space,
265          * and hijack it directly, but for a non-leaf function, it save the
266          * return address to the its own stack space, we can not hijack it
267          * directly, but need to find the real stack address,
268          * ftrace_get_parent_addr() does it!
269          *
270          * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
271          * non-leaf function, the location of the return address will be saved
272          * to $12 for us, and for a leaf function, only put a zero into $12. we
273          * do it in ftrace_graph_caller of mcount.S.
274          */
275
276         /* old_parent_ra = *parent_ra_addr; */
277         safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
278         if (unlikely(faulted))
279                 goto out;
280 #ifndef KBUILD_MCOUNT_RA_ADDRESS
281         parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
282                         old_parent_ra, (unsigned long)parent_ra_addr, fp);
283         /*
284          * If fails when getting the stack address of the non-leaf function's
285          * ra, stop function graph tracer and return
286          */
287         if (parent_ra_addr == 0)
288                 goto out;
289 #endif
290         /* *parent_ra_addr = return_hooker; */
291         safe_store_stack(return_hooker, parent_ra_addr, faulted);
292         if (unlikely(faulted))
293                 goto out;
294
295         if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
296             == -EBUSY) {
297                 *parent_ra_addr = old_parent_ra;
298                 return;
299         }
300
301         trace.func = self_ra;
302
303         /* Only trace if the calling function expects to */
304         if (!ftrace_graph_entry(&trace)) {
305                 current->curr_ret_stack--;
306                 *parent_ra_addr = old_parent_ra;
307         }
308         return;
309 out:
310         ftrace_graph_stop();
311         WARN_ON(1);
312 }
313 #endif  /* CONFIG_FUNCTION_GRAPH_TRACER */