perf: Fix unexported generic perf_arch_fetch_caller_regs
[linux-2.6.git] / kernel / trace / trace_event_perf.c
1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12 DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13 EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
14
15 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
16
17 static char *perf_trace_buf;
18 static char *perf_trace_buf_nmi;
19
20 typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
21
22 /* Count the events in use (per event id, not per instance) */
23 static int      total_ref_count;
24
25 static int perf_trace_event_enable(struct ftrace_event_call *event)
26 {
27         char *buf;
28         int ret = -ENOMEM;
29
30         if (event->perf_refcount++ > 0)
31                 return 0;
32
33         if (!total_ref_count) {
34                 buf = (char *)alloc_percpu(perf_trace_t);
35                 if (!buf)
36                         goto fail_buf;
37
38                 rcu_assign_pointer(perf_trace_buf, buf);
39
40                 buf = (char *)alloc_percpu(perf_trace_t);
41                 if (!buf)
42                         goto fail_buf_nmi;
43
44                 rcu_assign_pointer(perf_trace_buf_nmi, buf);
45         }
46
47         ret = event->perf_event_enable(event);
48         if (!ret) {
49                 total_ref_count++;
50                 return 0;
51         }
52
53 fail_buf_nmi:
54         if (!total_ref_count) {
55                 free_percpu(perf_trace_buf_nmi);
56                 free_percpu(perf_trace_buf);
57                 perf_trace_buf_nmi = NULL;
58                 perf_trace_buf = NULL;
59         }
60 fail_buf:
61         event->perf_refcount--;
62
63         return ret;
64 }
65
66 int perf_trace_enable(int event_id)
67 {
68         struct ftrace_event_call *event;
69         int ret = -EINVAL;
70
71         mutex_lock(&event_mutex);
72         list_for_each_entry(event, &ftrace_events, list) {
73                 if (event->id == event_id && event->perf_event_enable &&
74                     try_module_get(event->mod)) {
75                         ret = perf_trace_event_enable(event);
76                         break;
77                 }
78         }
79         mutex_unlock(&event_mutex);
80
81         return ret;
82 }
83
84 static void perf_trace_event_disable(struct ftrace_event_call *event)
85 {
86         char *buf, *nmi_buf;
87
88         if (--event->perf_refcount > 0)
89                 return;
90
91         event->perf_event_disable(event);
92
93         if (!--total_ref_count) {
94                 buf = perf_trace_buf;
95                 rcu_assign_pointer(perf_trace_buf, NULL);
96
97                 nmi_buf = perf_trace_buf_nmi;
98                 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
99
100                 /*
101                  * Ensure every events in profiling have finished before
102                  * releasing the buffers
103                  */
104                 synchronize_sched();
105
106                 free_percpu(buf);
107                 free_percpu(nmi_buf);
108         }
109 }
110
111 void perf_trace_disable(int event_id)
112 {
113         struct ftrace_event_call *event;
114
115         mutex_lock(&event_mutex);
116         list_for_each_entry(event, &ftrace_events, list) {
117                 if (event->id == event_id) {
118                         perf_trace_event_disable(event);
119                         module_put(event->mod);
120                         break;
121                 }
122         }
123         mutex_unlock(&event_mutex);
124 }
125
126 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
127                                        int *rctxp, unsigned long *irq_flags)
128 {
129         struct trace_entry *entry;
130         char *trace_buf, *raw_data;
131         int pc, cpu;
132
133         pc = preempt_count();
134
135         /* Protect the per cpu buffer, begin the rcu read side */
136         local_irq_save(*irq_flags);
137
138         *rctxp = perf_swevent_get_recursion_context();
139         if (*rctxp < 0)
140                 goto err_recursion;
141
142         cpu = smp_processor_id();
143
144         if (in_nmi())
145                 trace_buf = rcu_dereference(perf_trace_buf_nmi);
146         else
147                 trace_buf = rcu_dereference(perf_trace_buf);
148
149         if (!trace_buf)
150                 goto err;
151
152         raw_data = per_cpu_ptr(trace_buf, cpu);
153
154         /* zero the dead bytes from align to not leak stack to user */
155         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
156
157         entry = (struct trace_entry *)raw_data;
158         tracing_generic_entry_update(entry, *irq_flags, pc);
159         entry->type = type;
160
161         return raw_data;
162 err:
163         perf_swevent_put_recursion_context(*rctxp);
164 err_recursion:
165         local_irq_restore(*irq_flags);
166         return NULL;
167 }
168 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);