Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6.git] / kernel / trace / trace.c
index c1752da..4b1122d 100644 (file)
@@ -344,7 +344,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 /* trace_flags holds trace_options default values */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
        TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
-       TRACE_ITER_GRAPH_TIME;
+       TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD;
 
 static int trace_stop_count;
 static DEFINE_SPINLOCK(tracing_start_lock);
@@ -428,6 +428,7 @@ static const char *trace_options[] = {
        "latency-format",
        "sleep-time",
        "graph-time",
+       "record-cmd",
        NULL
 };
 
@@ -659,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
                return;
 
        WARN_ON_ONCE(!irqs_disabled());
+       if (!current_trace->use_max_tr) {
+               WARN_ON_ONCE(1);
+               return;
+       }
        arch_spin_lock(&ftrace_max_lock);
 
        tr->buffer = max_tr.buffer;
@@ -685,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
                return;
 
        WARN_ON_ONCE(!irqs_disabled());
+       if (!current_trace->use_max_tr) {
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        arch_spin_lock(&ftrace_max_lock);
 
        ftrace_disable_cpu();
@@ -729,7 +739,7 @@ __acquires(kernel_lock)
                return -1;
        }
 
-       if (strlen(type->name) > MAX_TRACER_SIZE) {
+       if (strlen(type->name) >= MAX_TRACER_SIZE) {
                pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
                return -1;
        }
@@ -2508,6 +2518,9 @@ static void set_tracer_flags(unsigned int mask, int enabled)
                trace_flags |= mask;
        else
                trace_flags &= ~mask;
+
+       if (mask == TRACE_ITER_RECORD_CMD)
+               trace_event_enable_cmd_record(enabled);
 }
 
 static ssize_t
@@ -2746,6 +2759,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
        if (ret < 0)
                return ret;
 
+       if (!current_trace->use_max_tr)
+               goto out;
+
        ret = ring_buffer_resize(max_tr.buffer, size);
        if (ret < 0) {
                int r;
@@ -2773,11 +2789,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
                return ret;
        }
 
+       max_tr.entries = size;
+ out:
        global_trace.entries = size;
 
        return ret;
 }
 
+
 /**
  * tracing_update_buffers - used by tracing facility to expand ring buffers
  *
@@ -2838,12 +2857,26 @@ static int tracing_set_tracer(const char *buf)
        trace_branch_disable();
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
-
+       if (current_trace && current_trace->use_max_tr) {
+               /*
+                * We don't free the ring buffer. instead, resize it because
+                * The max_tr ring buffer has some state (e.g. ring->clock) and
+                * we want preserve it.
+                */
+               ring_buffer_resize(max_tr.buffer, 1);
+               max_tr.entries = 1;
+       }
        destroy_trace_option_files(topts);
 
        current_trace = t;
 
        topts = create_trace_option_files(current_trace);
+       if (current_trace->use_max_tr) {
+               ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
+               if (ret < 0)
+                       goto out;
+               max_tr.entries = global_trace.entries;
+       }
 
        if (t->init) {
                ret = tracer_init(t, tr);
@@ -3426,7 +3459,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        }
 
        tracing_start();
-       max_tr.entries = global_trace.entries;
        mutex_unlock(&trace_types_lock);
 
        return cnt;
@@ -4531,16 +4563,14 @@ __init static int tracer_alloc_buffers(void)
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       max_tr.buffer = ring_buffer_alloc(ring_buf_size,
-                                            TRACE_BUFFER_FLAGS);
+       max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
        if (!max_tr.buffer) {
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
                WARN_ON(1);
                ring_buffer_free(global_trace.buffer);
                goto out_free_cpumask;
        }
-       max_tr.entries = ring_buffer_size(max_tr.buffer);
-       WARN_ON(max_tr.entries != global_trace.entries);
+       max_tr.entries = 1;
 #endif
 
        /* Allocate the first page for all buffers */