perf_event: Fix raw event processing
[linux-3.10.git] / tools / perf / builtin-sched.c
index de93a26..4655e16 100644 (file)
@@ -1,4 +1,5 @@
 #include "builtin.h"
+#include "perf.h"
 
 #include "util/util.h"
 #include "util/cache.h"
 #include "util/header.h"
 
 #include "util/parse-options.h"
+#include "util/trace-event.h"
 
-#include "perf.h"
 #include "util/debug.h"
+#include "util/data_map.h"
 
-#include "util/trace-event.h"
 #include <sys/types.h>
+#include <sys/prctl.h>
 
-static char                    const *input_name = "perf.data";
-static int                     input;
-static unsigned long           page_size;
-static unsigned long           mmap_window = 32;
-
-static unsigned long           total_comm = 0;
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
 
-static struct rb_root          threads;
-static struct thread           *last_match;
+static char                    const *input_name = "perf.data";
 
 static struct perf_header      *header;
 static u64                     sample_type;
 
+static char                    default_sort_order[] = "avg, max, switch, runtime";
+static char                    *sort_order = default_sort_order;
 
-/*
- * Scheduler benchmarks
- */
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/prctl.h>
+static int                     profile_cpu = -1;
 
-#include <linux/unistd.h>
+#define PR_SET_NAME            15               /* Set process name */
+#define MAX_CPUS               4096
 
-#include <semaphore.h>
-#include <pthread.h>
-#include <signal.h>
-#include <values.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <fcntl.h>
-#include <time.h>
-#include <math.h>
+static u64                     run_measurement_overhead;
+static u64                     sleep_measurement_overhead;
+
+#define COMM_LEN               20
+#define SYM_LEN                        129
+
+#define MAX_PID                        65536
+
+static unsigned long           nr_tasks;
+
+struct sched_atom;
+
+struct task_desc {
+       unsigned long           nr;
+       unsigned long           pid;
+       char                    comm[COMM_LEN];
+
+       unsigned long           nr_events;
+       unsigned long           curr_event;
+       struct sched_atom       **atoms;
+
+       pthread_t               thread;
+       sem_t                   sleep_sem;
+
+       sem_t                   ready_for_work;
+       sem_t                   work_done_sem;
+
+       u64                     cpu_usage;
+};
+
+enum sched_event_type {
+       SCHED_EVENT_RUN,
+       SCHED_EVENT_SLEEP,
+       SCHED_EVENT_WAKEUP,
+       SCHED_EVENT_MIGRATION,
+};
 
-#include <stdio.h>
+struct sched_atom {
+       enum sched_event_type   type;
+       u64                     timestamp;
+       u64                     duration;
+       unsigned long           nr;
+       int                     specific_wait;
+       sem_t                   *wait_sem;
+       struct task_desc        *wakee;
+};
 
-#define PR_SET_NAME    15               /* Set process name */
+static struct task_desc                *pid_to_task[MAX_PID];
 
-#define BUG_ON(x)      assert(!(x))
+static struct task_desc                **tasks;
 
-#define DEBUG          0
+static pthread_mutex_t         start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
+static u64                     start_time;
+
+static pthread_mutex_t         work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static unsigned long           nr_run_events;
+static unsigned long           nr_sleep_events;
+static unsigned long           nr_wakeup_events;
 
-typedef unsigned long long nsec_t;
+static unsigned long           nr_sleep_corrections;
+static unsigned long           nr_run_events_optimized;
 
-static nsec_t run_measurement_overhead;
-static nsec_t sleep_measurement_overhead;
+static unsigned long           targetless_wakeups;
+static unsigned long           multitarget_wakeups;
+
+static u64                     cpu_usage;
+static u64                     runavg_cpu_usage;
+static u64                     parent_cpu_usage;
+static u64                     runavg_parent_cpu_usage;
+
+static unsigned long           nr_runs;
+static u64                     sum_runtime;
+static u64                     sum_fluct;
+static u64                     run_avg;
+
+static unsigned long           replay_repeat = 10;
+static unsigned long           nr_timestamps;
+static unsigned long           nr_unordered_timestamps;
+static unsigned long           nr_state_machine_bugs;
+static unsigned long           nr_context_switch_bugs;
+static unsigned long           nr_events;
+static unsigned long           nr_lost_chunks;
+static unsigned long           nr_lost_events;
+
+#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
+
+enum thread_state {
+       THREAD_SLEEPING = 0,
+       THREAD_WAIT_CPU,
+       THREAD_SCHED_IN,
+       THREAD_IGNORE
+};
 
-static nsec_t get_nsecs(void)
+struct work_atom {
+       struct list_head        list;
+       enum thread_state       state;
+       u64                     sched_out_time;
+       u64                     wake_up_time;
+       u64                     sched_in_time;
+       u64                     runtime;
+};
+
+struct work_atoms {
+       struct list_head        work_list;
+       struct thread           *thread;
+       struct rb_node          node;
+       u64                     max_lat;
+       u64                     total_lat;
+       u64                     nb_atoms;
+       u64                     total_runtime;
+};
+
+typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
+
+static struct rb_root          atom_root, sorted_atom_root;
+
+static u64                     all_runtime;
+static u64                     all_count;
+
+
+static u64 get_nsecs(void)
 {
        struct timespec ts;
 
@@ -73,16 +163,16 @@ static nsec_t get_nsecs(void)
        return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
 }
 
-static void burn_nsecs(nsec_t nsecs)
+static void burn_nsecs(u64 nsecs)
 {
-       nsec_t T0 = get_nsecs(), T1;
+       u64 T0 = get_nsecs(), T1;
 
        do {
                T1 = get_nsecs();
        } while (T1 + run_measurement_overhead < T0 + nsecs);
 }
 
-static void sleep_nsecs(nsec_t nsecs)
+static void sleep_nsecs(u64 nsecs)
 {
        struct timespec ts;
 
@@ -94,7 +184,7 @@ static void sleep_nsecs(nsec_t nsecs)
 
 static void calibrate_run_measurement_overhead(void)
 {
-       nsec_t T0, T1, delta, min_delta = 1000000000ULL;
+       u64 T0, T1, delta, min_delta = 1000000000ULL;
        int i;
 
        for (i = 0; i < 10; i++) {
@@ -111,7 +201,7 @@ static void calibrate_run_measurement_overhead(void)
 
 static void calibrate_sleep_measurement_overhead(void)
 {
-       nsec_t T0, T1, delta, min_delta = 1000000000ULL;
+       u64 T0, T1, delta, min_delta = 1000000000ULL;
        int i;
 
        for (i = 0; i < 10; i++) {
@@ -127,69 +217,10 @@ static void calibrate_sleep_measurement_overhead(void)
        printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
 }
 
-#define COMM_LEN       20
-#define SYM_LEN                129
-
-#define MAX_PID                65536
-
-static unsigned long nr_tasks;
-
-struct sched_event;
-
-struct task_desc {
-       unsigned long           nr;
-       unsigned long           pid;
-       char                    comm[COMM_LEN];
-
-       unsigned long           nr_events;
-       unsigned long           curr_event;
-       struct sched_event      **events;
-
-       pthread_t               thread;
-       sem_t                   sleep_sem;
-
-       sem_t                   ready_for_work;
-       sem_t                   work_done_sem;
-
-       nsec_t                  cpu_usage;
-};
-
-enum sched_event_type {
-       SCHED_EVENT_RUN,
-       SCHED_EVENT_SLEEP,
-       SCHED_EVENT_WAKEUP,
-};
-
-struct sched_event {
-       enum sched_event_type   type;
-       nsec_t                  timestamp;
-       nsec_t                  duration;
-       unsigned long           nr;
-       int                     specific_wait;
-       sem_t                   *wait_sem;
-       struct task_desc        *wakee;
-};
-
-static struct task_desc                *pid_to_task[MAX_PID];
-
-static struct task_desc                **tasks;
-
-static pthread_mutex_t         start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
-static nsec_t                  start_time;
-
-static pthread_mutex_t         work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-static unsigned long           nr_run_events;
-static unsigned long           nr_sleep_events;
-static unsigned long           nr_wakeup_events;
-
-static unsigned long           nr_sleep_corrections;
-static unsigned long           nr_run_events_optimized;
-
-static struct sched_event *
-get_new_event(struct task_desc *task, nsec_t timestamp)
+static struct sched_atom *
+get_new_event(struct task_desc *task, u64 timestamp)
 {
-       struct sched_event *event = calloc(1, sizeof(*event));
+       struct sched_atom *event = zalloc(sizeof(*event));
        unsigned long idx = task->nr_events;
        size_t size;
 
@@ -197,27 +228,27 @@ get_new_event(struct task_desc *task, nsec_t timestamp)
        event->nr = idx;
 
        task->nr_events++;
-       size = sizeof(struct sched_event *) * task->nr_events;
-       task->events = realloc(task->events, size);
-       BUG_ON(!task->events);
+       size = sizeof(struct sched_atom *) * task->nr_events;
+       task->atoms = realloc(task->atoms, size);
+       BUG_ON(!task->atoms);
 
-       task->events[idx] = event;
+       task->atoms[idx] = event;
 
        return event;
 }
 
-static struct sched_event *last_event(struct task_desc *task)
+static struct sched_atom *last_event(struct task_desc *task)
 {
        if (!task->nr_events)
                return NULL;
 
-       return task->events[task->nr_events - 1];
+       return task->atoms[task->nr_events - 1];
 }
 
 static void
-add_sched_event_run(struct task_desc *task, nsec_t timestamp, u64 duration)
+add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
 {
-       struct sched_event *event, *curr_event = last_event(task);
+       struct sched_atom *event, *curr_event = last_event(task);
 
        /*
         * optimize an existing RUN event by merging this one
@@ -237,14 +268,11 @@ add_sched_event_run(struct task_desc *task, nsec_t timestamp, u64 duration)
        nr_run_events++;
 }
 
-static unsigned long targetless_wakeups;
-static unsigned long multitarget_wakeups;
-
 static void
-add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp,
+add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
                       struct task_desc *wakee)
 {
-       struct sched_event *event, *wakee_event;
+       struct sched_atom *event, *wakee_event;
 
        event = get_new_event(task, timestamp);
        event->type = SCHED_EVENT_WAKEUP;
@@ -260,7 +288,7 @@ add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp,
                return;
        }
 
-       wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
+       wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
        sem_init(wakee_event->wait_sem, 0, 0);
        wakee_event->specific_wait = 1;
        event->wait_sem = wakee_event->wait_sem;
@@ -269,10 +297,10 @@ add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp,
 }
 
 static void
-add_sched_event_sleep(struct task_desc *task, nsec_t timestamp,
+add_sched_event_sleep(struct task_desc *task, u64 timestamp,
                      u64 task_state __used)
 {
-       struct sched_event *event = get_new_event(task, timestamp);
+       struct sched_atom *event = get_new_event(task, timestamp);
 
        event->type = SCHED_EVENT_SLEEP;
 
@@ -290,7 +318,7 @@ static struct task_desc *register_pid(unsigned long pid, const char *comm)
        if (task)
                return task;
 
-       task = calloc(1, sizeof(*task));
+       task = zalloc(sizeof(*task));
        task->pid = pid;
        task->nr = nr_tasks;
        strcpy(task->comm, comm);
@@ -341,38 +369,40 @@ static void add_cross_task_wakeups(void)
 }
 
 static void
-process_sched_event(struct task_desc *this_task __used, struct sched_event *event)
+process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
 {
        int ret = 0;
-       nsec_t now;
+       u64 now;
        long long delta;
 
        now = get_nsecs();
-       delta = start_time + event->timestamp - now;
+       delta = start_time + atom->timestamp - now;
 
-       switch (event->type) {
+       switch (atom->type) {
                case SCHED_EVENT_RUN:
-                       burn_nsecs(event->duration);
+                       burn_nsecs(atom->duration);
                        break;
                case SCHED_EVENT_SLEEP:
-                       if (event->wait_sem)
-                               ret = sem_wait(event->wait_sem);
+                       if (atom->wait_sem)
+                               ret = sem_wait(atom->wait_sem);
                        BUG_ON(ret);
                        break;
                case SCHED_EVENT_WAKEUP:
-                       if (event->wait_sem)
-                               ret = sem_post(event->wait_sem);
+                       if (atom->wait_sem)
+                               ret = sem_post(atom->wait_sem);
                        BUG_ON(ret);
                        break;
+               case SCHED_EVENT_MIGRATION:
+                       break;
                default:
                        BUG_ON(1);
        }
 }
 
-static nsec_t get_cpu_usage_nsec_parent(void)
+static u64 get_cpu_usage_nsec_parent(void)
 {
        struct rusage ru;
-       nsec_t sum;
+       u64 sum;
        int err;
 
        err = getrusage(RUSAGE_SELF, &ru);
@@ -384,12 +414,12 @@ static nsec_t get_cpu_usage_nsec_parent(void)
        return sum;
 }
 
-static nsec_t get_cpu_usage_nsec_self(void)
+static u64 get_cpu_usage_nsec_self(void)
 {
        char filename [] = "/proc/1234567890/sched";
        unsigned long msecs, nsecs;
        char *line = NULL;
-       nsec_t total = 0;
+       u64 total = 0;
        size_t len = 0;
        ssize_t chars;
        FILE *file;
@@ -417,7 +447,7 @@ static nsec_t get_cpu_usage_nsec_self(void)
 static void *thread_func(void *ctx)
 {
        struct task_desc *this_task = ctx;
-       nsec_t cpu_usage_0, cpu_usage_1;
+       u64 cpu_usage_0, cpu_usage_1;
        unsigned long i, ret;
        char comm2[22];
 
@@ -436,7 +466,7 @@ again:
 
        for (i = 0; i < this_task->nr_events; i++) {
                this_task->curr_event = i;
-               process_sched_event(this_task, this_task->events[i]);
+               process_sched_event(this_task, this_task->atoms[i]);
        }
 
        cpu_usage_1 = get_cpu_usage_nsec_self();
@@ -479,14 +509,9 @@ static void create_tasks(void)
        }
 }
 
-static nsec_t cpu_usage;
-static nsec_t runavg_cpu_usage;
-static nsec_t parent_cpu_usage;
-static nsec_t runavg_parent_cpu_usage;
-
 static void wait_for_tasks(void)
 {
-       nsec_t cpu_usage_0, cpu_usage_1;
+       u64 cpu_usage_0, cpu_usage_1;
        struct task_desc *task;
        unsigned long i, ret;
 
@@ -537,33 +562,9 @@ static void wait_for_tasks(void)
        }
 }
 
-static int __cmd_sched(void);
-
-static void parse_trace(void)
-{
-       __cmd_sched();
-
-       printf("nr_run_events:        %ld\n", nr_run_events);
-       printf("nr_sleep_events:      %ld\n", nr_sleep_events);
-       printf("nr_wakeup_events:     %ld\n", nr_wakeup_events);
-
-       if (targetless_wakeups)
-               printf("target-less wakeups:  %ld\n", targetless_wakeups);
-       if (multitarget_wakeups)
-               printf("multi-target wakeups: %ld\n", multitarget_wakeups);
-       if (nr_run_events_optimized)
-               printf("run events optimized: %ld\n",
-                       nr_run_events_optimized);
-}
-
-static unsigned long nr_runs;
-static nsec_t sum_runtime;
-static nsec_t sum_fluct;
-static nsec_t run_avg;
-
 static void run_one_test(void)
 {
-       nsec_t T0, T1, delta, avg_delta, fluct, std_dev;
+       u64 T0, T1, delta, avg_delta, fluct, std_dev;
 
        T0 = get_nsecs();
        wait_for_tasks();
@@ -587,10 +588,6 @@ static void run_one_test(void)
        printf("#%-3ld: %0.3f, ",
                nr_runs, (double)delta/1000000.0);
 
-#if 0
-       printf("%0.2f +- %0.2f, ",
-               (double)avg_delta/1e6, (double)std_dev/1e6);
-#endif
        printf("ravg: %0.2f, ",
                (double)run_avg/1e6);
 
@@ -616,7 +613,7 @@ static void run_one_test(void)
 
 static void test_calibrations(void)
 {
-       nsec_t T0, T1;
+       u64 T0, T1;
 
        T0 = get_nsecs();
        burn_nsecs(1e6);
@@ -631,27 +628,63 @@ static void test_calibrations(void)
        printf("the sleep test took %Ld nsecs\n", T1-T0);
 }
 
-static int
-process_comm_event(event_t *event, unsigned long offset, unsigned long head)
-{
-       struct thread *thread;
+struct raw_event_sample {
+       u32 size;
+       char data[0];
+};
 
-       thread = threads__findnew(event->comm.pid, &threads, &last_match);
+#define FILL_FIELD(ptr, field, event, data)    \
+       ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
 
-       dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
-               (void *)(offset + head),
-               (void *)(long)(event->header.size),
-               event->comm.comm, event->comm.pid);
+#define FILL_ARRAY(ptr, array, event, data)                    \
+do {                                                           \
+       void *__array = raw_field_ptr(event, #array, data);     \
+       memcpy(ptr.array, __array, sizeof(ptr.array));  \
+} while(0)
 
-       if (thread == NULL ||
-           thread__set_comm(thread, event->comm.comm)) {
-               dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
-               return -1;
-       }
-       total_comm++;
+#define FILL_COMMON_FIELDS(ptr, event, data)                   \
+do {                                                           \
+       FILL_FIELD(ptr, common_type, event, data);              \
+       FILL_FIELD(ptr, common_flags, event, data);             \
+       FILL_FIELD(ptr, common_preempt_count, event, data);     \
+       FILL_FIELD(ptr, common_pid, event, data);               \
+       FILL_FIELD(ptr, common_tgid, event, data);              \
+} while (0)
 
-       return 0;
-}
+
+
+struct trace_switch_event {
+       u32 size;
+
+       u16 common_type;
+       u8 common_flags;
+       u8 common_preempt_count;
+       u32 common_pid;
+       u32 common_tgid;
+
+       char prev_comm[16];
+       u32 prev_pid;
+       u32 prev_prio;
+       u64 prev_state;
+       char next_comm[16];
+       u32 next_pid;
+       u32 next_prio;
+};
+
+struct trace_runtime_event {
+       u32 size;
+
+       u16 common_type;
+       u8 common_flags;
+       u8 common_preempt_count;
+       u32 common_pid;
+       u32 common_tgid;
+
+       char comm[16];
+       u32 pid;
+       u64 runtime;
+       u64 vruntime;
+};
 
 struct trace_wakeup_event {
        u32 size;
@@ -670,9 +703,76 @@ struct trace_wakeup_event {
        u32 cpu;
 };
 
+struct trace_fork_event {
+       u32 size;
+
+       u16 common_type;
+       u8 common_flags;
+       u8 common_preempt_count;
+       u32 common_pid;
+       u32 common_tgid;
+
+       char parent_comm[16];
+       u32 parent_pid;
+       char child_comm[16];
+       u32 child_pid;
+};
+
+struct trace_migrate_task_event {
+       u32 size;
+
+       u16 common_type;
+       u8 common_flags;
+       u8 common_preempt_count;
+       u32 common_pid;
+       u32 common_tgid;
+
+       char comm[16];
+       u32 pid;
+
+       u32 prio;
+       u32 cpu;
+};
+
+struct trace_sched_handler {
+       void (*switch_event)(struct trace_switch_event *,
+                            struct event *,
+                            int cpu,
+                            u64 timestamp,
+                            struct thread *thread);
+
+       void (*runtime_event)(struct trace_runtime_event *,
+                             struct event *,
+                             int cpu,
+                             u64 timestamp,
+                             struct thread *thread);
+
+       void (*wakeup_event)(struct trace_wakeup_event *,
+                            struct event *,
+                            int cpu,
+                            u64 timestamp,
+                            struct thread *thread);
+
+       void (*fork_event)(struct trace_fork_event *,
+                          struct event *,
+                          int cpu,
+                          u64 timestamp,
+                          struct thread *thread);
+
+       void (*migrate_task_event)(struct trace_migrate_task_event *,
+                          struct event *,
+                          int cpu,
+                          u64 timestamp,
+                          struct thread *thread);
+};
+
+
 static void
-process_sched_wakeup_event(struct trace_wakeup_event *wakeup_event, struct event *event,
-                 int cpu __used, u64 timestamp __used, struct thread *thread __used)
+replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
+                   struct event *event,
+                   int cpu __used,
+                   u64 timestamp __used,
+                   struct thread *thread __used)
 {
        struct task_desc *waker, *wakee;
 
@@ -691,31 +791,14 @@ process_sched_wakeup_event(struct trace_wakeup_event *wakeup_event, struct event
        add_sched_event_wakeup(waker, timestamp, wakee);
 }
 
-struct trace_switch_event {
-       u32 size;
-
-       u16 common_type;
-       u8 common_flags;
-       u8 common_preempt_count;
-       u32 common_pid;
-       u32 common_tgid;
-
-       char prev_comm[16];
-       u32 prev_pid;
-       u32 prev_prio;
-       u64 prev_state;
-       char next_comm[16];
-       u32 next_pid;
-       u32 next_prio;
-};
-
-#define MAX_CPUS 4096
-
-unsigned long cpu_last_switched[MAX_CPUS];
+static u64 cpu_last_switched[MAX_CPUS];
 
 static void
-process_sched_switch_event(struct trace_switch_event *switch_event, struct event *event,
-                 int cpu __used, u64 timestamp __used, struct thread *thread __used)
+replay_switch_event(struct trace_switch_event *switch_event,
+                   struct event *event,
+                   int cpu,
+                   u64 timestamp,
+                   struct thread *thread __used)
 {
        struct task_desc *prev, *next;
        u64 timestamp0;
@@ -752,24 +835,13 @@ process_sched_switch_event(struct trace_switch_event *switch_event, struct event
        add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
 }
 
-struct trace_fork_event {
-       u32 size;
-
-       u16 common_type;
-       u8 common_flags;
-       u8 common_preempt_count;
-       u32 common_pid;
-       u32 common_tgid;
-
-       char parent_comm[16];
-       u32 parent_pid;
-       char child_comm[16];
-       u32 child_pid;
-};
 
 static void
-process_sched_fork_event(struct trace_fork_event *fork_event, struct event *event,
-                 int cpu __used, u64 timestamp __used, struct thread *thread __used)
+replay_fork_event(struct trace_fork_event *fork_event,
+                 struct event *event,
+                 int cpu __used,
+                 u64 timestamp __used,
+                 struct thread *thread __used)
 {
        if (verbose) {
                printf("sched_fork event %p\n", event);
@@ -780,284 +852,1085 @@ process_sched_fork_event(struct trace_fork_event *fork_event, struct event *even
        register_pid(fork_event->child_pid, fork_event->child_comm);
 }
 
-static void process_sched_exit_event(struct event *event,
-                 int cpu __used, u64 timestamp __used, struct thread *thread __used)
+static struct trace_sched_handler replay_ops  = {
+       .wakeup_event           = replay_wakeup_event,
+       .switch_event           = replay_switch_event,
+       .fork_event             = replay_fork_event,
+};
+
+struct sort_dimension {
+       const char              *name;
+       sort_fn_t               cmp;
+       struct list_head        list;
+};
+
+static LIST_HEAD(cmp_pid);
+
+static int
+thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
+{
+       struct sort_dimension *sort;
+       int ret = 0;
+
+       BUG_ON(list_empty(list));
+
+       list_for_each_entry(sort, list, list) {
+               ret = sort->cmp(l, r);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+static struct work_atoms *
+thread_atoms_search(struct rb_root *root, struct thread *thread,
+                        struct list_head *sort_list)
 {
-       if (verbose)
-               printf("sched_exit event %p\n", event);
+       struct rb_node *node = root->rb_node;
+       struct work_atoms key = { .thread = thread };
+
+       while (node) {
+               struct work_atoms *atoms;
+               int cmp;
+
+               atoms = container_of(node, struct work_atoms, node);
+
+               cmp = thread_lat_cmp(sort_list, &key, atoms);
+               if (cmp > 0)
+                       node = node->rb_left;
+               else if (cmp < 0)
+                       node = node->rb_right;
+               else {
+                       BUG_ON(thread != atoms->thread);
+                       return atoms;
+               }
+       }
+       return NULL;
 }
 
 static void
-process_raw_event(event_t *raw_event __used, void *more_data,
-                 int cpu, u64 timestamp, struct thread *thread)
+__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
+                        struct list_head *sort_list)
 {
-       struct {
-               u32 size;
-               char data[0];
-       } *raw = more_data;
-       struct event *event;
-       int type;
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
 
-       type = trace_parse_common_type(raw->data);
-       event = trace_find_event(type);
+       while (*new) {
+               struct work_atoms *this;
+               int cmp;
 
-       if (!strcmp(event->name, "sched_switch"))
-               process_sched_switch_event(more_data, event, cpu, timestamp, thread);
-       if (!strcmp(event->name, "sched_wakeup"))
-               process_sched_wakeup_event(more_data, event, cpu, timestamp, thread);
-       if (!strcmp(event->name, "sched_wakeup_new"))
-               process_sched_wakeup_event(more_data, event, cpu, timestamp, thread);
-       if (!strcmp(event->name, "sched_process_fork"))
-               process_sched_fork_event(more_data, event, cpu, timestamp, thread);
-       if (!strcmp(event->name, "sched_process_exit"))
-               process_sched_exit_event(event, cpu, timestamp, thread);
+               this = container_of(*new, struct work_atoms, node);
+               parent = *new;
+
+               cmp = thread_lat_cmp(sort_list, data, this);
+
+               if (cmp > 0)
+                       new = &((*new)->rb_left);
+               else
+                       new = &((*new)->rb_right);
+       }
+
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
 }
 
-static int
-process_sample_event(event_t *event, unsigned long offset, unsigned long head)
+static void thread_atoms_insert(struct thread *thread)
 {
-       char level;
-       int show = 0;
-       struct dso *dso = NULL;
-       struct thread *thread;
-       u64 ip = event->ip.ip;
-       u64 timestamp = -1;
-       u32 cpu = -1;
-       u64 period = 1;
-       void *more_data = event->ip.__more_data;
-       int cpumode;
-
-       thread = threads__findnew(event->ip.pid, &threads, &last_match);
-
-       if (sample_type & PERF_SAMPLE_TIME) {
-               timestamp = *(u64 *)more_data;
-               more_data += sizeof(u64);
-       }
+       struct work_atoms *atoms = zalloc(sizeof(*atoms));
+       if (!atoms)
+               die("No memory");
+
+       atoms->thread = thread;
+       INIT_LIST_HEAD(&atoms->work_list);
+       __thread_latency_insert(&atom_root, atoms, &cmp_pid);
+}
+
+static void
+latency_fork_event(struct trace_fork_event *fork_event __used,
+                  struct event *event __used,
+                  int cpu __used,
+                  u64 timestamp __used,
+                  struct thread *thread __used)
+{
+       /* should insert the newcomer */
+}
 
-       if (sample_type & PERF_SAMPLE_CPU) {
-               cpu = *(u32 *)more_data;
-               more_data += sizeof(u32);
-               more_data += sizeof(u32); /* reserved */
+__used
+static char sched_out_state(struct trace_switch_event *switch_event)
+{
+       const char *str = TASK_STATE_TO_CHAR_STR;
+
+       return str[switch_event->prev_state];
+}
+
+static void
+add_sched_out_event(struct work_atoms *atoms,
+                   char run_state,
+                   u64 timestamp)
+{
+       struct work_atom *atom = zalloc(sizeof(*atom));
+       if (!atom)
+               die("Non memory");
+
+       atom->sched_out_time = timestamp;
+
+       if (run_state == 'R') {
+               atom->state = THREAD_WAIT_CPU;
+               atom->wake_up_time = atom->sched_out_time;
        }
 
-       if (sample_type & PERF_SAMPLE_PERIOD) {
-               period = *(u64 *)more_data;
-               more_data += sizeof(u64);
+       list_add_tail(&atom->list, &atoms->work_list);
+}
+
+static void
+add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
+{
+       struct work_atom *atom;
+
+       BUG_ON(list_empty(&atoms->work_list));
+
+       atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+
+       atom->runtime += delta;
+       atoms->total_runtime += delta;
+}
+
+static void
+add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
+{
+       struct work_atom *atom;
+       u64 delta;
+
+       if (list_empty(&atoms->work_list))
+               return;
+
+       atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+
+       if (atom->state != THREAD_WAIT_CPU)
+               return;
+
+       if (timestamp < atom->wake_up_time) {
+               atom->state = THREAD_IGNORE;
+               return;
        }
 
-       dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
-               (void *)(offset + head),
-               (void *)(long)(event->header.size),
-               event->header.misc,
-               event->ip.pid, event->ip.tid,
-               (void *)(long)ip,
-               (long long)period);
+       atom->state = THREAD_SCHED_IN;
+       atom->sched_in_time = timestamp;
 
-       dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+       delta = atom->sched_in_time - atom->wake_up_time;
+       atoms->total_lat += delta;
+       if (delta > atoms->max_lat)
+               atoms->max_lat = delta;
+       atoms->nb_atoms++;
+}
 
-       if (thread == NULL) {
-               eprintf("problem processing %d event, skipping it.\n",
-                       event->header.type);
-               return -1;
+static void
+latency_switch_event(struct trace_switch_event *switch_event,
+                    struct event *event __used,
+                    int cpu,
+                    u64 timestamp,
+                    struct thread *thread __used)
+{
+       struct work_atoms *out_events, *in_events;
+       struct thread *sched_out, *sched_in;
+       u64 timestamp0;
+       s64 delta;
+
+       BUG_ON(cpu >= MAX_CPUS || cpu < 0);
+
+       timestamp0 = cpu_last_switched[cpu];
+       cpu_last_switched[cpu] = timestamp;
+       if (timestamp0)
+               delta = timestamp - timestamp0;
+       else
+               delta = 0;
+
+       if (delta < 0)
+               die("hm, delta: %Ld < 0 ?\n", delta);
+
+
+       sched_out = threads__findnew(switch_event->prev_pid);
+       sched_in = threads__findnew(switch_event->next_pid);
+
+       out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
+       if (!out_events) {
+               thread_atoms_insert(sched_out);
+               out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
+               if (!out_events)
+                       die("out-event: Internal tree error");
+       }
+       add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
+
+       in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
+       if (!in_events) {
+               thread_atoms_insert(sched_in);
+               in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
+               if (!in_events)
+                       die("in-event: Internal tree error");
+               /*
+                * Take came in we have not heard about yet,
+                * add in an initial atom in runnable state:
+                */
+               add_sched_out_event(in_events, 'R', timestamp);
        }
+       add_sched_in_event(in_events, timestamp);
+}
 
-       cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
+static void
+latency_runtime_event(struct trace_runtime_event *runtime_event,
+                    struct event *event __used,
+                    int cpu,
+                    u64 timestamp,
+                    struct thread *this_thread __used)
+{
+       struct thread *thread = threads__findnew(runtime_event->pid);
+       struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
+
+       BUG_ON(cpu >= MAX_CPUS || cpu < 0);
+       if (!atoms) {
+               thread_atoms_insert(thread);
+               atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
+               if (!atoms)
+                       die("in-event: Internal tree error");
+               add_sched_out_event(atoms, 'R', timestamp);
+       }
 
-       if (cpumode == PERF_EVENT_MISC_KERNEL) {
-               show = SHOW_KERNEL;
-               level = 'k';
+       add_runtime_event(atoms, runtime_event->runtime, timestamp);
+}
 
-               dso = kernel_dso;
+static void
+latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
+                    struct event *__event __used,
+                    int cpu __used,
+                    u64 timestamp,
+                    struct thread *thread __used)
+{
+       struct work_atoms *atoms;
+       struct work_atom *atom;
+       struct thread *wakee;
 
-               dump_printf(" ...... dso: %s\n", dso->name);
+       /* Note for later, it may be interesting to observe the failing cases */
+       if (!wakeup_event->success)
+               return;
 
-       } else if (cpumode == PERF_EVENT_MISC_USER) {
+       wakee = threads__findnew(wakeup_event->pid);
+       atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
+       if (!atoms) {
+               thread_atoms_insert(wakee);
+               atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
+               if (!atoms)
+                       die("wakeup-event: Internal tree error");
+               add_sched_out_event(atoms, 'S', timestamp);
+       }
 
-               show = SHOW_USER;
-               level = '.';
+       BUG_ON(list_empty(&atoms->work_list));
 
-       } else {
-               show = SHOW_HV;
-               level = 'H';
+       atom = list_entry(atoms->work_list.prev, struct work_atom, list);
 
-               dso = hypervisor_dso;
+       /*
+        * You WILL be missing events if you've recorded only
+        * one CPU, or are only looking at only one, so don't
+        * make useless noise.
+        */
+       if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
+               nr_state_machine_bugs++;
+
+       nr_timestamps++;
+       if (atom->sched_out_time > timestamp) {
+               nr_unordered_timestamps++;
+               return;
+       }
+
+       atom->state = THREAD_WAIT_CPU;
+       atom->wake_up_time = timestamp;
+}
+
+static void
+latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
+                    struct event *__event __used,
+                    int cpu __used,
+                    u64 timestamp,
+                    struct thread *thread __used)
+{
+       struct work_atoms *atoms;
+       struct work_atom *atom;
+       struct thread *migrant;
 
-               dump_printf(" ...... dso: [hypervisor]\n");
+       /*
+        * Only need to worry about migration when profiling one CPU.
+        */
+       if (profile_cpu == -1)
+               return;
+
+       migrant = threads__findnew(migrate_task_event->pid);
+       atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+       if (!atoms) {
+               thread_atoms_insert(migrant);
+               register_pid(migrant->pid, migrant->comm);
+               atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+               if (!atoms)
+                       die("migration-event: Internal tree error");
+               add_sched_out_event(atoms, 'R', timestamp);
        }
 
-       if (sample_type & PERF_SAMPLE_RAW)
-               process_raw_event(event, more_data, cpu, timestamp, thread);
+       BUG_ON(list_empty(&atoms->work_list));
+
+       atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+       atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
+
+       nr_timestamps++;
+
+       if (atom->sched_out_time > timestamp)
+               nr_unordered_timestamps++;
+}
+
+static struct trace_sched_handler lat_ops  = {
+       .wakeup_event           = latency_wakeup_event,
+       .switch_event           = latency_switch_event,
+       .runtime_event          = latency_runtime_event,
+       .fork_event             = latency_fork_event,
+       .migrate_task_event     = latency_migrate_task_event,
+};
+
+static void output_lat_thread(struct work_atoms *work_list)
+{
+       int i;
+       int ret;
+       u64 avg;
+
+       if (!work_list->nb_atoms)
+               return;
+       /*
+        * Ignore idle threads:
+        */
+       if (!strcmp(work_list->thread->comm, "swapper"))
+               return;
+
+       all_runtime += work_list->total_runtime;
+       all_count += work_list->nb_atoms;
+
+       ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
+
+       for (i = 0; i < 24 - ret; i++)
+               printf(" ");
+
+       avg = work_list->total_lat / work_list->nb_atoms;
+
+       printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
+             (double)work_list->total_runtime / 1e6,
+                work_list->nb_atoms, (double)avg / 1e6,
+                (double)work_list->max_lat / 1e6);
+}
+
+static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+       if (l->thread->pid < r->thread->pid)
+               return -1;
+       if (l->thread->pid > r->thread->pid)
+               return 1;
 
        return 0;
 }
 
-static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
+static struct sort_dimension pid_sort_dimension = {
+       .name                   = "pid",
+       .cmp                    = pid_cmp,
+};
+
+static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
 {
-       trace_event(event);
+       u64 avgl, avgr;
 
-       switch (event->header.type) {
-       case PERF_EVENT_MMAP ... PERF_EVENT_LOST:
-               return 0;
+       if (!l->nb_atoms)
+               return -1;
 
-       case PERF_EVENT_COMM:
-               return process_comm_event(event, offset, head);
+       if (!r->nb_atoms)
+               return 1;
 
-       case PERF_EVENT_EXIT ... PERF_EVENT_READ:
-               return 0;
+       avgl = l->total_lat / l->nb_atoms;
+       avgr = r->total_lat / r->nb_atoms;
+
+       if (avgl < avgr)
+               return -1;
+       if (avgl > avgr)
+               return 1;
 
-       case PERF_EVENT_SAMPLE:
-               return process_sample_event(event, offset, head);
+       return 0;
+}
 
-       case PERF_EVENT_MAX:
-       default:
+static struct sort_dimension avg_sort_dimension = {
+       .name                   = "avg",
+       .cmp                    = avg_cmp,
+};
+
+static int max_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+       if (l->max_lat < r->max_lat)
                return -1;
-       }
+       if (l->max_lat > r->max_lat)
+               return 1;
 
        return 0;
 }
 
-static int __cmd_sched(void)
+static struct sort_dimension max_sort_dimension = {
+       .name                   = "max",
+       .cmp                    = max_cmp,
+};
+
+static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
 {
-       int ret, rc = EXIT_FAILURE;
-       unsigned long offset = 0;
-       unsigned long head = 0;
-       struct stat perf_stat;
-       event_t *event;
-       uint32_t size;
-       char *buf;
+       if (l->nb_atoms < r->nb_atoms)
+               return -1;
+       if (l->nb_atoms > r->nb_atoms)
+               return 1;
 
-       trace_report();
-       register_idle_thread(&threads, &last_match);
+       return 0;
+}
 
-       input = open(input_name, O_RDONLY);
-       if (input < 0) {
-               perror("failed to open file");
-               exit(-1);
-       }
+static struct sort_dimension switch_sort_dimension = {
+       .name                   = "switch",
+       .cmp                    = switch_cmp,
+};
 
-       ret = fstat(input, &perf_stat);
-       if (ret < 0) {
-               perror("failed to stat file");
-               exit(-1);
-       }
+static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+       if (l->total_runtime < r->total_runtime)
+               return -1;
+       if (l->total_runtime > r->total_runtime)
+               return 1;
 
-       if (!perf_stat.st_size) {
-               fprintf(stderr, "zero-sized file, nothing to do!\n");
-               exit(0);
-       }
-       header = perf_header__read(input);
-       head = header->data_offset;
-       sample_type = perf_header__sample_type(header);
+       return 0;
+}
 
-       if (!(sample_type & PERF_SAMPLE_RAW))
-               die("No trace sample to read. Did you call perf record "
-                   "without -R?");
+static struct sort_dimension runtime_sort_dimension = {
+       .name                   = "runtime",
+       .cmp                    = runtime_cmp,
+};
 
-       if (load_kernel() < 0) {
-               perror("failed to load kernel symbols");
-               return EXIT_FAILURE;
+static struct sort_dimension *available_sorts[] = {
+       &pid_sort_dimension,
+       &avg_sort_dimension,
+       &max_sort_dimension,
+       &switch_sort_dimension,
+       &runtime_sort_dimension,
+};
+
+#define NB_AVAILABLE_SORTS     (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
+
+static LIST_HEAD(sort_list);
+
+static int sort_dimension__add(const char *tok, struct list_head *list)
+{
+       int i;
+
+       for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
+               if (!strcmp(available_sorts[i]->name, tok)) {
+                       list_add_tail(&available_sorts[i]->list, list);
+
+                       return 0;
+               }
        }
 
-remap:
-       buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
-                          MAP_SHARED, input, offset);
-       if (buf == MAP_FAILED) {
-               perror("failed to mmap file");
-               exit(-1);
+       return -1;
+}
+
+static void setup_sorting(void);
+
+static void sort_lat(void)
+{
+       struct rb_node *node;
+
+       for (;;) {
+               struct work_atoms *data;
+               node = rb_first(&atom_root);
+               if (!node)
+                       break;
+
+               rb_erase(node, &atom_root);
+               data = rb_entry(node, struct work_atoms, node);
+               __thread_latency_insert(&sorted_atom_root, data, &sort_list);
        }
+}
+
+static struct trace_sched_handler *trace_handler;
+
+static void
+process_sched_wakeup_event(struct raw_event_sample *raw,
+                          struct event *event,
+                          int cpu __used,
+                          u64 timestamp __used,
+                          struct thread *thread __used)
+{
+       struct trace_wakeup_event wakeup_event;
+
+       FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
+
+       FILL_ARRAY(wakeup_event, comm, event, raw->data);
+       FILL_FIELD(wakeup_event, pid, event, raw->data);
+       FILL_FIELD(wakeup_event, prio, event, raw->data);
+       FILL_FIELD(wakeup_event, success, event, raw->data);
+       FILL_FIELD(wakeup_event, cpu, event, raw->data);
+
+       if (trace_handler->wakeup_event)
+               trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
+}
+
+/*
+ * Track the current task - that way we can know whether there's any
+ * weird events, such as a task being switched away that is not current.
+ */
+static int max_cpu;
+
+static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
+
+static struct thread *curr_thread[MAX_CPUS];
+
+static char next_shortname1 = 'A';
+static char next_shortname2 = '0';
+
+static void
+map_switch_event(struct trace_switch_event *switch_event,
+                struct event *event __used,
+                int this_cpu,
+                u64 timestamp,
+                struct thread *thread __used)
+{
+       struct thread *sched_out, *sched_in;
+       int new_shortname;
+       u64 timestamp0;
+       s64 delta;
+       int cpu;
+
+       BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
+
+       if (this_cpu > max_cpu)
+               max_cpu = this_cpu;
+
+       timestamp0 = cpu_last_switched[this_cpu];
+       cpu_last_switched[this_cpu] = timestamp;
+       if (timestamp0)
+               delta = timestamp - timestamp0;
+       else
+               delta = 0;
+
+       if (delta < 0)
+               die("hm, delta: %Ld < 0 ?\n", delta);
 
-more:
-       event = (event_t *)(buf + head);
 
-       size = event->header.size;
-       if (!size)
-               size = 8;
+       sched_out = threads__findnew(switch_event->prev_pid);
+       sched_in = threads__findnew(switch_event->next_pid);
 
-       if (head + event->header.size >= page_size * mmap_window) {
-               unsigned long shift = page_size * (head / page_size);
-               int res;
+       curr_thread[this_cpu] = sched_in;
 
-               res = munmap(buf, page_size * mmap_window);
-               assert(res == 0);
+       printf("  ");
 
-               offset += shift;
-               head -= shift;
-               goto remap;
+       new_shortname = 0;
+       if (!sched_in->shortname[0]) {
+               sched_in->shortname[0] = next_shortname1;
+               sched_in->shortname[1] = next_shortname2;
+
+               if (next_shortname1 < 'Z') {
+                       next_shortname1++;
+               } else {
+                       next_shortname1='A';
+                       if (next_shortname2 < '9') {
+                               next_shortname2++;
+                       } else {
+                               next_shortname2='0';
+                       }
+               }
+               new_shortname = 1;
+       }
+
+       for (cpu = 0; cpu <= max_cpu; cpu++) {
+               if (cpu != this_cpu)
+                       printf(" ");
+               else
+                       printf("*");
+
+               if (curr_thread[cpu]) {
+                       if (curr_thread[cpu]->pid)
+                               printf("%2s ", curr_thread[cpu]->shortname);
+                       else
+                               printf(".  ");
+               } else
+                       printf("   ");
+       }
+
+       printf("  %12.6f secs ", (double)timestamp/1e9);
+       if (new_shortname) {
+               printf("%s => %s:%d\n",
+                       sched_in->shortname, sched_in->comm, sched_in->pid);
+       } else {
+               printf("\n");
        }
+}
 
-       size = event->header.size;
 
+static void
+process_sched_switch_event(struct raw_event_sample *raw,
+                          struct event *event,
+                          int this_cpu,
+                          u64 timestamp __used,
+                          struct thread *thread __used)
+{
+       struct trace_switch_event switch_event;
 
-       if (!size || process_event(event, offset, head) < 0) {
+       FILL_COMMON_FIELDS(switch_event, event, raw->data);
 
+       FILL_ARRAY(switch_event, prev_comm, event, raw->data);
+       FILL_FIELD(switch_event, prev_pid, event, raw->data);
+       FILL_FIELD(switch_event, prev_prio, event, raw->data);
+       FILL_FIELD(switch_event, prev_state, event, raw->data);
+       FILL_ARRAY(switch_event, next_comm, event, raw->data);
+       FILL_FIELD(switch_event, next_pid, event, raw->data);
+       FILL_FIELD(switch_event, next_prio, event, raw->data);
+
+       if (curr_pid[this_cpu] != (u32)-1) {
                /*
-                * assume we lost track of the stream, check alignment, and
-                * increment a single u64 in the hope to catch on again 'soon'.
+                * Are we trying to switch away a PID that is
+                * not current?
                 */
+               if (curr_pid[this_cpu] != switch_event.prev_pid)
+                       nr_context_switch_bugs++;
+       }
+       if (trace_handler->switch_event)
+               trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
+
+       curr_pid[this_cpu] = switch_event.next_pid;
+}
+
+static void
+process_sched_runtime_event(struct raw_event_sample *raw,
+                          struct event *event,
+                          int cpu __used,
+                          u64 timestamp __used,
+                          struct thread *thread __used)
+{
+       struct trace_runtime_event runtime_event;
+
+       FILL_ARRAY(runtime_event, comm, event, raw->data);
+       FILL_FIELD(runtime_event, pid, event, raw->data);
+       FILL_FIELD(runtime_event, runtime, event, raw->data);
+       FILL_FIELD(runtime_event, vruntime, event, raw->data);
+
+       if (trace_handler->runtime_event)
+               trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_sched_fork_event(struct raw_event_sample *raw,
+                        struct event *event,
+                        int cpu __used,
+                        u64 timestamp __used,
+                        struct thread *thread __used)
+{
+       struct trace_fork_event fork_event;
+
+       FILL_COMMON_FIELDS(fork_event, event, raw->data);
+
+       FILL_ARRAY(fork_event, parent_comm, event, raw->data);
+       FILL_FIELD(fork_event, parent_pid, event, raw->data);
+       FILL_ARRAY(fork_event, child_comm, event, raw->data);
+       FILL_FIELD(fork_event, child_pid, event, raw->data);
+
+       if (trace_handler->fork_event)
+               trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_sched_exit_event(struct event *event,
+                        int cpu __used,
+                        u64 timestamp __used,
+                        struct thread *thread __used)
+{
+       if (verbose)
+               printf("sched_exit event %p\n", event);
+}
+
+static void
+process_sched_migrate_task_event(struct raw_event_sample *raw,
+                          struct event *event,
+                          int cpu __used,
+                          u64 timestamp __used,
+                          struct thread *thread __used)
+{
+       struct trace_migrate_task_event migrate_task_event;
 
-               if (unlikely(head & 7))
-                       head &= ~7ULL;
+       FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
 
-               size = 8;
+       FILL_ARRAY(migrate_task_event, comm, event, raw->data);
+       FILL_FIELD(migrate_task_event, pid, event, raw->data);
+       FILL_FIELD(migrate_task_event, prio, event, raw->data);
+       FILL_FIELD(migrate_task_event, cpu, event, raw->data);
+
+       if (trace_handler->migrate_task_event)
+               trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_raw_event(event_t *raw_event __used, u32 size, void *data,
+                 int cpu, u64 timestamp, struct thread *thread)
+{
+       struct raw_event_sample *raw;
+       struct event *event;
+       int type;
+
+       raw = malloc_or_die(sizeof(*raw)+size);
+       raw->size = size;
+       memcpy(raw->data, data, size);
+
+       type = trace_parse_common_type(raw->data);
+       event = trace_find_event(type);
+
+       if (!strcmp(event->name, "sched_switch"))
+               process_sched_switch_event(raw, event, cpu, timestamp, thread);
+       if (!strcmp(event->name, "sched_stat_runtime"))
+               process_sched_runtime_event(raw, event, cpu, timestamp, thread);
+       if (!strcmp(event->name, "sched_wakeup"))
+               process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
+       if (!strcmp(event->name, "sched_wakeup_new"))
+               process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
+       if (!strcmp(event->name, "sched_process_fork"))
+               process_sched_fork_event(raw, event, cpu, timestamp, thread);
+       if (!strcmp(event->name, "sched_process_exit"))
+               process_sched_exit_event(event, cpu, timestamp, thread);
+       if (!strcmp(event->name, "sched_migrate_task"))
+               process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
+}
+
+static int process_sample_event(event_t *event)
+{
+       struct sample_data data;
+       struct thread *thread;
+
+       if (!(sample_type & PERF_SAMPLE_RAW))
+               return 0;
+
+       memset(&data, 0, sizeof(data));
+       data.time = -1;
+       data.cpu = -1;
+       data.period = -1;
+
+       event__parse_sample(event, sample_type, &data);
+
+       dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
+               event->header.misc,
+               data.pid, data.tid,
+               (void *)(long)data.ip,
+               (long long)data.period);
+
+       thread = threads__findnew(data.pid);
+       if (thread == NULL) {
+               pr_debug("problem processing %d event, skipping it.\n",
+                        event->header.type);
+               return -1;
        }
 
-       head += size;
+       dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+       if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
+               return 0;
+
+       process_raw_event(event, data.raw_size, data.raw_data, data.cpu,
+                         data.time, thread);
 
-       if (offset + head < (unsigned long)perf_stat.st_size)
-               goto more;
+       return 0;
+}
 
-       rc = EXIT_SUCCESS;
-       close(input);
+static int process_lost_event(event_t *event __used)
+{
+       nr_lost_chunks++;
+       nr_lost_events += event->lost.lost;
 
-       return rc;
+       return 0;
 }
 
-static const char * const annotate_usage[] = {
-       "perf trace [<options>] <command>",
-       NULL
-};
+static int sample_type_check(u64 type)
+{
+       sample_type = type;
 
-static const struct option options[] = {
-       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
-                   "dump raw trace in ASCII"),
-       OPT_BOOLEAN('v', "verbose", &verbose,
-                   "be more verbose (show symbol address, etc)"),
-       OPT_END()
+       if (!(sample_type & PERF_SAMPLE_RAW)) {
+               fprintf(stderr,
+                       "No trace sample to read. Did you call perf record "
+                       "without -R?");
+               return -1;
+       }
+
+       return 0;
+}
+
+static struct perf_file_handler file_handler = {
+       .process_sample_event   = process_sample_event,
+       .process_comm_event     = event__process_comm,
+       .process_lost_event     = process_lost_event,
+       .sample_type_check      = sample_type_check,
 };
 
-int cmd_sched(int argc, const char **argv, const char *prefix __used)
+static int read_events(void)
 {
-       long nr_iterations = 10, i;
+       register_idle_thread();
+       register_perf_file_handler(&file_handler);
 
-       symbol__init();
-       page_size = getpagesize();
+       return mmap_dispatch_perf_file(&header, input_name, 0, 0,
+                                      &event__cwdlen, &event__cwd);
+}
 
-       argc = parse_options(argc, argv, options, annotate_usage, 0);
-       if (argc) {
-               /*
-                * Special case: if there's an argument left then assume tha
-                * it's a symbol filter:
-                */
-               if (argc > 1)
-                       usage_with_options(annotate_usage, options);
+static void print_bad_events(void)
+{
+       if (nr_unordered_timestamps && nr_timestamps) {
+               printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
+                       (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
+                       nr_unordered_timestamps, nr_timestamps);
+       }
+       if (nr_lost_events && nr_events) {
+               printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
+                       (double)nr_lost_events/(double)nr_events*100.0,
+                       nr_lost_events, nr_events, nr_lost_chunks);
+       }
+       if (nr_state_machine_bugs && nr_timestamps) {
+               printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
+                       (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
+                       nr_state_machine_bugs, nr_timestamps);
+               if (nr_lost_events)
+                       printf(" (due to lost events?)");
+               printf("\n");
+       }
+       if (nr_context_switch_bugs && nr_timestamps) {
+               printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
+                       (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
+                       nr_context_switch_bugs, nr_timestamps);
+               if (nr_lost_events)
+                       printf(" (due to lost events?)");
+               printf("\n");
+       }
+}
+
+static void __cmd_lat(void)
+{
+       struct rb_node *next;
+
+       setup_pager();
+       read_events();
+       sort_lat();
+
+       printf("\n -----------------------------------------------------------------------------------------\n");
+       printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms |\n");
+       printf(" -----------------------------------------------------------------------------------------\n");
+
+       next = rb_first(&sorted_atom_root);
+
+       while (next) {
+               struct work_atoms *work_list;
+
+               work_list = rb_entry(next, struct work_atoms, node);
+               output_lat_thread(work_list);
+               next = rb_next(next);
        }
 
-//     setup_pager();
+       printf(" -----------------------------------------------------------------------------------------\n");
+       printf("  TOTAL:                |%11.3f ms |%9Ld |\n",
+               (double)all_runtime/1e6, all_count);
+
+       printf(" ---------------------------------------------------\n");
+
+       print_bad_events();
+       printf("\n");
+
+}
+
+static struct trace_sched_handler map_ops  = {
+       .wakeup_event           = NULL,
+       .switch_event           = map_switch_event,
+       .runtime_event          = NULL,
+       .fork_event             = NULL,
+};
+
+static void __cmd_map(void)
+{
+       max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+
+       setup_pager();
+       read_events();
+       print_bad_events();
+}
+
+static void __cmd_replay(void)
+{
+       unsigned long i;
 
        calibrate_run_measurement_overhead();
        calibrate_sleep_measurement_overhead();
 
        test_calibrations();
 
-       parse_trace();
+       read_events();
+
+       printf("nr_run_events:        %ld\n", nr_run_events);
+       printf("nr_sleep_events:      %ld\n", nr_sleep_events);
+       printf("nr_wakeup_events:     %ld\n", nr_wakeup_events);
+
+       if (targetless_wakeups)
+               printf("target-less wakeups:  %ld\n", targetless_wakeups);
+       if (multitarget_wakeups)
+               printf("multi-target wakeups: %ld\n", multitarget_wakeups);
+       if (nr_run_events_optimized)
+               printf("run atoms optimized: %ld\n",
+                       nr_run_events_optimized);
+
        print_task_traces();
        add_cross_task_wakeups();
 
        create_tasks();
        printf("------------------------------------------------------------\n");
-       for (i = 0; i < nr_iterations; i++)
+       for (i = 0; i < replay_repeat; i++)
                run_one_test();
+}
+
+
+static const char * const sched_usage[] = {
+       "perf sched [<options>] {record|latency|map|replay|trace}",
+       NULL
+};
+
+static const struct option sched_options[] = {
+       OPT_STRING('i', "input", &input_name, "file",
+                   "input file name"),
+       OPT_BOOLEAN('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+       OPT_END()
+};
+
+static const char * const latency_usage[] = {
+       "perf sched latency [<options>]",
+       NULL
+};
+
+static const struct option latency_options[] = {
+       OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
+                  "sort by key(s): runtime, switch, avg, max"),
+       OPT_BOOLEAN('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_INTEGER('C', "CPU", &profile_cpu,
+                   "CPU to profile on"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+       OPT_END()
+};
+
+static const char * const replay_usage[] = {
+       "perf sched replay [<options>]",
+       NULL
+};
+
+static const struct option replay_options[] = {
+       OPT_INTEGER('r', "repeat", &replay_repeat,
+                   "repeat the workload replay N times (-1: infinite)"),
+       OPT_BOOLEAN('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+       OPT_END()
+};
+
+static void setup_sorting(void)
+{
+       char *tmp, *tok, *str = strdup(sort_order);
+
+       for (tok = strtok_r(str, ", ", &tmp);
+                       tok; tok = strtok_r(NULL, ", ", &tmp)) {
+               if (sort_dimension__add(tok, &sort_list) < 0) {
+                       error("Unknown --sort key: `%s'", tok);
+                       usage_with_options(latency_usage, latency_options);
+               }
+       }
+
+       free(str);
+
+       sort_dimension__add("pid", &cmp_pid);
+}
+
+static const char *record_args[] = {
+       "record",
+       "-a",
+       "-R",
+       "-M",
+       "-f",
+       "-m", "1024",
+       "-c", "1",
+       "-e", "sched:sched_switch:r",
+       "-e", "sched:sched_stat_wait:r",
+       "-e", "sched:sched_stat_sleep:r",
+       "-e", "sched:sched_stat_iowait:r",
+       "-e", "sched:sched_stat_runtime:r",
+       "-e", "sched:sched_process_exit:r",
+       "-e", "sched:sched_process_fork:r",
+       "-e", "sched:sched_wakeup:r",
+       "-e", "sched:sched_migrate_task:r",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+       unsigned int rec_argc, i, j;
+       const char **rec_argv;
+
+       rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+       rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+       for (i = 0; i < ARRAY_SIZE(record_args); i++)
+               rec_argv[i] = strdup(record_args[i]);
+
+       for (j = 1; j < (unsigned int)argc; j++, i++)
+               rec_argv[i] = argv[j];
+
+       BUG_ON(i != rec_argc);
+
+       return cmd_record(i, rec_argv, NULL);
+}
+
+int cmd_sched(int argc, const char **argv, const char *prefix __used)
+{
+       argc = parse_options(argc, argv, sched_options, sched_usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
+       if (!argc)
+               usage_with_options(sched_usage, sched_options);
+
+       /*
+        * Aliased to 'perf trace' for now:
+        */
+       if (!strcmp(argv[0], "trace"))
+               return cmd_trace(argc, argv, prefix);
+
+       symbol__init(0);
+       if (!strncmp(argv[0], "rec", 3)) {
+               return __cmd_record(argc, argv);
+       } else if (!strncmp(argv[0], "lat", 3)) {
+               trace_handler = &lat_ops;
+               if (argc > 1) {
+                       argc = parse_options(argc, argv, latency_options, latency_usage, 0);
+                       if (argc)
+                               usage_with_options(latency_usage, latency_options);
+               }
+               setup_sorting();
+               __cmd_lat();
+       } else if (!strcmp(argv[0], "map")) {
+               trace_handler = &map_ops;
+               setup_sorting();
+               __cmd_map();
+       } else if (!strncmp(argv[0], "rep", 3)) {
+               trace_handler = &replay_ops;
+               if (argc) {
+                       argc = parse_options(argc, argv, replay_options, replay_usage, 0);
+                       if (argc)
+                               usage_with_options(replay_usage, replay_options);
+               }
+               __cmd_replay();
+       } else {
+               usage_with_options(sched_usage, sched_options);
+       }
 
        return 0;
 }