tracing: Use the perf recursion protection from trace event
[linux-2.6.git] / kernel / perf_event.c
index fdfae88..aba8227 100644 (file)
@@ -1774,14 +1774,27 @@ static int perf_event_read_size(struct perf_event *event)
        return size;
 }
 
-u64 perf_event_read_value(struct perf_event *event)
+u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
 {
        struct perf_event *child;
        u64 total = 0;
 
+       *enabled = 0;
+       *running = 0;
+
+       mutex_lock(&event->child_mutex);
        total += perf_event_read(event);
-       list_for_each_entry(child, &event->child_list, child_list)
+       *enabled += event->total_time_enabled +
+                       atomic64_read(&event->child_total_time_enabled);
+       *running += event->total_time_running +
+                       atomic64_read(&event->child_total_time_running);
+
+       list_for_each_entry(child, &event->child_list, child_list) {
                total += perf_event_read(child);
+               *enabled += child->total_time_enabled;
+               *running += child->total_time_running;
+       }
+       mutex_unlock(&event->child_mutex);
 
        return total;
 }
@@ -1791,21 +1804,19 @@ static int perf_event_read_group(struct perf_event *event,
                                   u64 read_format, char __user *buf)
 {
        struct perf_event *leader = event->group_leader, *sub;
-       int n = 0, size = 0, ret = 0;
+       int n = 0, size = 0, ret = -EFAULT;
+       struct perf_event_context *ctx = leader->ctx;
        u64 values[5];
-       u64 count;
+       u64 count, enabled, running;
 
-       count = perf_event_read_value(leader);
+       mutex_lock(&ctx->mutex);
+       count = perf_event_read_value(leader, &enabled, &running);
 
        values[n++] = 1 + leader->nr_siblings;
-       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
-               values[n++] = leader->total_time_enabled +
-                       atomic64_read(&leader->child_total_time_enabled);
-       }
-       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
-               values[n++] = leader->total_time_running +
-                       atomic64_read(&leader->child_total_time_running);
-       }
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               values[n++] = enabled;
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               values[n++] = running;
        values[n++] = count;
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
@@ -1813,24 +1824,28 @@ static int perf_event_read_group(struct perf_event *event,
        size = n * sizeof(u64);
 
        if (copy_to_user(buf, values, size))
-               return -EFAULT;
+               goto unlock;
 
-       ret += size;
+       ret = size;
 
        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
                n = 0;
 
-               values[n++] = perf_event_read_value(sub);
+               values[n++] = perf_event_read_value(sub, &enabled, &running);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
 
                size = n * sizeof(u64);
 
-               if (copy_to_user(buf + size, values, size))
-                       return -EFAULT;
+               if (copy_to_user(buf + size, values, size)) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
 
                ret += size;
        }
+unlock:
+       mutex_unlock(&ctx->mutex);
 
        return ret;
 }
@@ -1838,18 +1853,15 @@ static int perf_event_read_group(struct perf_event *event,
 static int perf_event_read_one(struct perf_event *event,
                                 u64 read_format, char __user *buf)
 {
+       u64 enabled, running;
        u64 values[4];
        int n = 0;
 
-       values[n++] = perf_event_read_value(event);
-       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
-               values[n++] = event->total_time_enabled +
-                       atomic64_read(&event->child_total_time_enabled);
-       }
-       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
-               values[n++] = event->total_time_running +
-                       atomic64_read(&event->child_total_time_running);
-       }
+       values[n++] = perf_event_read_value(event, &enabled, &running);
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               values[n++] = enabled;
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               values[n++] = running;
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(event);
 
@@ -1880,12 +1892,10 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
                return -ENOSPC;
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
-       mutex_lock(&event->child_mutex);
        if (read_format & PERF_FORMAT_GROUP)
                ret = perf_event_read_group(event, read_format, buf);
        else
                ret = perf_event_read_one(event, read_format, buf);
-       mutex_unlock(&event->child_mutex);
 
        return ret;
 }
@@ -2330,7 +2340,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
        }
 
        if (!data->watermark)
-               data->watermark = max_t(long, PAGE_SIZE, max_size / 2);
+               data->watermark = max_size / 2;
 
 
        rcu_assign_pointer(event->data, data);
@@ -3870,34 +3880,42 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
        }
 }
 
-static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
+/*
+ * Must be called with preemption disabled
+ */
+int perf_swevent_get_recursion_context(int **recursion)
 {
+       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
        if (in_nmi())
-               return &cpuctx->recursion[3];
+               *recursion = &cpuctx->recursion[3];
+       else if (in_irq())
+               *recursion = &cpuctx->recursion[2];
+       else if (in_softirq())
+               *recursion = &cpuctx->recursion[1];
+       else
+               *recursion = &cpuctx->recursion[0];
 
-       if (in_irq())
-               return &cpuctx->recursion[2];
+       if (**recursion)
+               return -1;
 
-       if (in_softirq())
-               return &cpuctx->recursion[1];
+       (**recursion)++;
 
-       return &cpuctx->recursion[0];
+       return 0;
 }
 
-static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
-                                   u64 nr, int nmi,
-                                   struct perf_sample_data *data,
-                                   struct pt_regs *regs)
+void perf_swevent_put_recursion_context(int *recursion)
 {
-       struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
-       int *recursion = perf_swevent_recursion_context(cpuctx);
-       struct perf_event_context *ctx;
-
-       if (*recursion)
-               goto out;
+       (*recursion)--;
+}
 
-       (*recursion)++;
-       barrier();
+static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
+                              u64 nr, int nmi,
+                              struct perf_sample_data *data,
+                              struct pt_regs *regs)
+{
+       struct perf_event_context *ctx;
+       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 
        rcu_read_lock();
        perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
@@ -3910,12 +3928,25 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
        if (ctx)
                perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
        rcu_read_unlock();
+}
 
-       barrier();
-       (*recursion)--;
+static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
+                                   u64 nr, int nmi,
+                                   struct perf_sample_data *data,
+                                   struct pt_regs *regs)
+{
+       int *recursion;
+
+       preempt_disable();
+
+       if (perf_swevent_get_recursion_context(&recursion))
+               goto out;
+
+       __do_perf_sw_event(type, event_id, nr, nmi, data, regs);
 
+       perf_swevent_put_recursion_context(recursion);
 out:
-       put_cpu_var(perf_cpu_context);
+       preempt_enable();
 }
 
 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@ -4149,7 +4180,8 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
        if (!regs)
                regs = task_pt_regs(current);
 
-       do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
+       /* Trace events already protected against recursion */
+       __do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
                                &data, regs);
 }
 EXPORT_SYMBOL_GPL(perf_tp_event);