uprobes/perf: Avoid perf_trace_buf_prepare/submit if ->perf_events is empty
[linux-3.10.git] / kernel / trace / trace_clock.c
index b588fd8..aa8f5f4 100644 (file)
@@ -13,6 +13,7 @@
  * Tracer plugins will chose a default from these clocks.
  */
 #include <linux/spinlock.h>
+#include <linux/irqflags.h>
 #include <linux/hardirq.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
@@ -28,7 +29,6 @@
  */
 u64 notrace trace_clock_local(void)
 {
-       unsigned long flags;
        u64 clock;
 
        /*
@@ -36,15 +36,16 @@ u64 notrace trace_clock_local(void)
         * lockless clock. It is not guaranteed to be coherent across
         * CPUs, nor across CPU idle events.
         */
-       raw_local_irq_save(flags);
+       preempt_disable_notrace();
        clock = sched_clock();
-       raw_local_irq_restore(flags);
+       preempt_enable_notrace();
 
        return clock;
 }
+EXPORT_SYMBOL_GPL(trace_clock_local);
 
 /*
- * trace_clock(): 'inbetween' trace clock. Not completely serialized,
+ * trace_clock(): 'between' trace clock. Not completely serialized,
  * but not completely incorrect when crossing CPUs either.
  *
  * This is based on cpu_clock(), which will allow at most ~1 jiffy of
@@ -53,7 +54,7 @@ u64 notrace trace_clock_local(void)
  */
 u64 notrace trace_clock(void)
 {
-       return cpu_clock(raw_smp_processor_id());
+       return local_clock();
 }
 
 
@@ -66,10 +67,14 @@ u64 notrace trace_clock(void)
  * Used by plugins that need globally coherent timestamps.
  */
 
-static u64 prev_trace_clock_time;
-
-static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
-       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+/* keep prev_time and lock in the same cacheline. */
+static struct {
+       u64 prev_time;
+       arch_spinlock_t lock;
+} trace_clock_struct ____cacheline_aligned_in_smp =
+       {
+               .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
+       };
 
 u64 notrace trace_clock_global(void)
 {
@@ -77,10 +82,10 @@ u64 notrace trace_clock_global(void)
        int this_cpu;
        u64 now;
 
-       raw_local_irq_save(flags);
+       local_irq_save(flags);
 
        this_cpu = raw_smp_processor_id();
-       now = cpu_clock(this_cpu);
+       now = sched_clock_cpu(this_cpu);
        /*
         * If in an NMI context then dont risk lockups and return the
         * cpu_clock() time:
@@ -88,22 +93,34 @@ u64 notrace trace_clock_global(void)
        if (unlikely(in_nmi()))
                goto out;
 
-       __raw_spin_lock(&trace_clock_lock);
+       arch_spin_lock(&trace_clock_struct.lock);
 
        /*
         * TODO: if this happens often then maybe we should reset
-        * my_scd->clock to prev_trace_clock_time+1, to make sure
+        * my_scd->clock to prev_time+1, to make sure
         * we start ticking with the local clock from now on?
         */
-       if ((s64)(now - prev_trace_clock_time) < 0)
-               now = prev_trace_clock_time + 1;
+       if ((s64)(now - trace_clock_struct.prev_time) < 0)
+               now = trace_clock_struct.prev_time + 1;
 
-       prev_trace_clock_time = now;
+       trace_clock_struct.prev_time = now;
 
-       __raw_spin_unlock(&trace_clock_lock);
+       arch_spin_unlock(&trace_clock_struct.lock);
 
  out:
-       raw_local_irq_restore(flags);
+       local_irq_restore(flags);
 
        return now;
 }
+
+static atomic64_t trace_counter;
+
+/*
+ * trace_clock_counter(): simply an atomic counter.
+ * Use the trace_counter "counter" for cases where you do not care
+ * about timings, but are interested in strict ordering.
+ */
+u64 notrace trace_clock_counter(void)
+{
+       return atomic64_add_return(1, &trace_counter);
+}