e0d351b01f5ac9bacf230cf077cb1e5cbb82489a
[linux-2.6.git] / kernel / trace / trace_event_profile.c
1 /*
2  * trace event based perf counter profiling
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include "trace.h"
10
11
12 struct perf_trace_buf *perf_trace_buf;
13 EXPORT_SYMBOL_GPL(perf_trace_buf);
14
15 struct perf_trace_buf *perf_trace_buf_nmi;
16 EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
17
18 /* Count the events in use (per event id, not per instance) */
19 static int      total_profile_count;
20
21 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
22 {
23         struct perf_trace_buf *buf;
24         int ret = -ENOMEM;
25
26         if (atomic_inc_return(&event->profile_count))
27                 return 0;
28
29         if (!total_profile_count) {
30                 buf = alloc_percpu(struct perf_trace_buf);
31                 if (!buf)
32                         goto fail_buf;
33
34                 rcu_assign_pointer(perf_trace_buf, buf);
35
36                 buf = alloc_percpu(struct perf_trace_buf);
37                 if (!buf)
38                         goto fail_buf_nmi;
39
40                 rcu_assign_pointer(perf_trace_buf_nmi, buf);
41         }
42
43         ret = event->profile_enable(event);
44         if (!ret) {
45                 total_profile_count++;
46                 return 0;
47         }
48
49 fail_buf_nmi:
50         if (!total_profile_count) {
51                 free_percpu(perf_trace_buf_nmi);
52                 free_percpu(perf_trace_buf);
53                 perf_trace_buf_nmi = NULL;
54                 perf_trace_buf = NULL;
55         }
56 fail_buf:
57         atomic_dec(&event->profile_count);
58
59         return ret;
60 }
61
62 int ftrace_profile_enable(int event_id)
63 {
64         struct ftrace_event_call *event;
65         int ret = -EINVAL;
66
67         mutex_lock(&event_mutex);
68         list_for_each_entry(event, &ftrace_events, list) {
69                 if (event->id == event_id && event->profile_enable &&
70                     try_module_get(event->mod)) {
71                         ret = ftrace_profile_enable_event(event);
72                         break;
73                 }
74         }
75         mutex_unlock(&event_mutex);
76
77         return ret;
78 }
79
80 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81 {
82         struct perf_trace_buf *buf, *nmi_buf;
83
84         if (!atomic_add_negative(-1, &event->profile_count))
85                 return;
86
87         event->profile_disable(event);
88
89         if (!--total_profile_count) {
90                 buf = perf_trace_buf;
91                 rcu_assign_pointer(perf_trace_buf, NULL);
92
93                 nmi_buf = perf_trace_buf_nmi;
94                 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
95
96                 /*
97                  * Ensure every events in profiling have finished before
98                  * releasing the buffers
99                  */
100                 synchronize_sched();
101
102                 free_percpu(buf);
103                 free_percpu(nmi_buf);
104         }
105 }
106
107 void ftrace_profile_disable(int event_id)
108 {
109         struct ftrace_event_call *event;
110
111         mutex_lock(&event_mutex);
112         list_for_each_entry(event, &ftrace_events, list) {
113                 if (event->id == event_id) {
114                         ftrace_profile_disable_event(event);
115                         module_put(event->mod);
116                         break;
117                 }
118         }
119         mutex_unlock(&event_mutex);
120 }