perf: Fix unexported generic perf_arch_fetch_caller_regs
[linux-2.6.git] / kernel / perf_event.c
index c4e90b8..574ee58 100644 (file)
@@ -56,21 +56,6 @@ static atomic_t nr_task_events __read_mostly;
  */
 int sysctl_perf_event_paranoid __read_mostly = 1;
 
-static inline bool perf_paranoid_tracepoint_raw(void)
-{
-       return sysctl_perf_event_paranoid > -1;
-}
-
-static inline bool perf_paranoid_cpu(void)
-{
-       return sysctl_perf_event_paranoid > 0;
-}
-
-static inline bool perf_paranoid_kernel(void)
-{
-       return sysctl_perf_event_paranoid > 1;
-}
-
 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
 
 /*
@@ -96,13 +81,10 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
 void __weak hw_perf_disable(void)              { barrier(); }
 void __weak hw_perf_enable(void)               { barrier(); }
 
-void __weak hw_perf_event_setup(int cpu)       { barrier(); }
-void __weak hw_perf_event_setup_online(int cpu)        { barrier(); }
-
 int __weak
 hw_perf_group_sched_in(struct perf_event *group_leader,
               struct perf_cpu_context *cpuctx,
-              struct perf_event_context *ctx, int cpu)
+              struct perf_event_context *ctx)
 {
        return 0;
 }
@@ -111,25 +93,15 @@ void __weak perf_event_print_debug(void)   { }
 
 static DEFINE_PER_CPU(int, perf_disable_count);
 
-void __perf_disable(void)
-{
-       __get_cpu_var(perf_disable_count)++;
-}
-
-bool __perf_enable(void)
-{
-       return !--__get_cpu_var(perf_disable_count);
-}
-
 void perf_disable(void)
 {
-       __perf_disable();
-       hw_perf_disable();
+       if (!__get_cpu_var(perf_disable_count)++)
+               hw_perf_disable();
 }
 
 void perf_enable(void)
 {
-       if (__perf_enable())
+       if (!--__get_cpu_var(perf_disable_count))
                hw_perf_enable();
 }
 
@@ -248,7 +220,7 @@ static void perf_unpin_context(struct perf_event_context *ctx)
 
 static inline u64 perf_clock(void)
 {
-       return cpu_clock(smp_processor_id());
+       return cpu_clock(raw_smp_processor_id());
 }
 
 /*
@@ -632,14 +604,13 @@ void perf_event_disable(struct perf_event *event)
 static int
 event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
-                struct perf_event_context *ctx,
-                int cpu)
+                struct perf_event_context *ctx)
 {
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
        event->state = PERF_EVENT_STATE_ACTIVE;
-       event->oncpu = cpu;     /* TODO: put 'cpu' into cpuctx->cpu */
+       event->oncpu = smp_processor_id();
        /*
         * The new state must be visible before we turn it on in the hardware:
         */
@@ -666,8 +637,7 @@ event_sched_in(struct perf_event *event,
 static int
 group_sched_in(struct perf_event *group_event,
               struct perf_cpu_context *cpuctx,
-              struct perf_event_context *ctx,
-              int cpu)
+              struct perf_event_context *ctx)
 {
        struct perf_event *event, *partial_group;
        int ret;
@@ -675,18 +645,18 @@ group_sched_in(struct perf_event *group_event,
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
-       ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
+       ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
        if (ret)
                return ret < 0 ? ret : 0;
 
-       if (event_sched_in(group_event, cpuctx, ctx, cpu))
+       if (event_sched_in(group_event, cpuctx, ctx))
                return -EAGAIN;
 
        /*
         * Schedule in siblings as one group (if any):
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
-               if (event_sched_in(event, cpuctx, ctx, cpu)) {
+               if (event_sched_in(event, cpuctx, ctx)) {
                        partial_group = event;
                        goto group_error;
                }
@@ -760,7 +730,6 @@ static void __perf_install_in_context(void *info)
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
-       int cpu = smp_processor_id();
        int err;
 
        /*
@@ -807,7 +776,7 @@ static void __perf_install_in_context(void *info)
        if (!group_can_go_on(event, cpuctx, 1))
                err = -EEXIST;
        else
-               err = event_sched_in(event, cpuctx, ctx, cpu);
+               err = event_sched_in(event, cpuctx, ctx);
 
        if (err) {
                /*
@@ -949,11 +918,9 @@ static void __perf_event_enable(void *info)
        } else {
                perf_disable();
                if (event == leader)
-                       err = group_sched_in(event, cpuctx, ctx,
-                                            smp_processor_id());
+                       err = group_sched_in(event, cpuctx, ctx);
                else
-                       err = event_sched_in(event, cpuctx, ctx,
-                                              smp_processor_id());
+                       err = event_sched_in(event, cpuctx, ctx);
                perf_enable();
        }
 
@@ -1049,8 +1016,15 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
        return 0;
 }
 
-static void __perf_event_sched_out(struct perf_event_context *ctx,
-                                  struct perf_cpu_context *cpuctx)
+enum event_type_t {
+       EVENT_FLEXIBLE = 0x1,
+       EVENT_PINNED = 0x2,
+       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
+static void ctx_sched_out(struct perf_event_context *ctx,
+                         struct perf_cpu_context *cpuctx,
+                         enum event_type_t event_type)
 {
        struct perf_event *event;
 
@@ -1061,13 +1035,18 @@ static void __perf_event_sched_out(struct perf_event_context *ctx,
        update_context_time(ctx);
 
        perf_disable();
-       if (ctx->nr_active) {
+       if (!ctx->nr_active)
+               goto out_enable;
+
+       if (event_type & EVENT_PINNED)
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
 
+       if (event_type & EVENT_FLEXIBLE)
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
-       }
+
+ out_enable:
        perf_enable();
  out:
        raw_spin_unlock(&ctx->lock);
@@ -1229,15 +1208,13 @@ void perf_event_task_sched_out(struct task_struct *task,
        rcu_read_unlock();
 
        if (do_switch) {
-               __perf_event_sched_out(ctx, cpuctx);
+               ctx_sched_out(ctx, cpuctx, EVENT_ALL);
                cpuctx->task_ctx = NULL;
        }
 }
 
-/*
- * Called with IRQs disabled
- */
-static void __perf_event_task_sched_out(struct perf_event_context *ctx)
+static void task_ctx_sched_out(struct perf_event_context *ctx,
+                              enum event_type_t event_type)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 
@@ -1247,47 +1224,41 @@ static void __perf_event_task_sched_out(struct perf_event_context *ctx)
        if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
                return;
 
-       __perf_event_sched_out(ctx, cpuctx);
+       ctx_sched_out(ctx, cpuctx, event_type);
        cpuctx->task_ctx = NULL;
 }
 
 /*
  * Called with IRQs disabled
  */
-static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
+static void __perf_event_task_sched_out(struct perf_event_context *ctx)
+{
+       task_ctx_sched_out(ctx, EVENT_ALL);
+}
+
+/*
+ * Called with IRQs disabled
+ */
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                             enum event_type_t event_type)
 {
-       __perf_event_sched_out(&cpuctx->ctx, cpuctx);
+       ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
 }
 
 static void
-__perf_event_sched_in(struct perf_event_context *ctx,
-                       struct perf_cpu_context *cpuctx)
+ctx_pinned_sched_in(struct perf_event_context *ctx,
+                   struct perf_cpu_context *cpuctx)
 {
-       int cpu = smp_processor_id();
        struct perf_event *event;
-       int can_add_hw = 1;
-
-       raw_spin_lock(&ctx->lock);
-       ctx->is_active = 1;
-       if (likely(!ctx->nr_events))
-               goto out;
-
-       ctx->timestamp = perf_clock();
 
-       perf_disable();
-
-       /*
-        * First go through the list and put on any pinned groups
-        * in order to give them the best chance of going on.
-        */
        list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
                if (event->state <= PERF_EVENT_STATE_OFF)
                        continue;
-               if (event->cpu != -1 && event->cpu != cpu)
+               if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
 
                if (group_can_go_on(event, cpuctx, 1))
-                       group_sched_in(event, cpuctx, ctx, cpu);
+                       group_sched_in(event, cpuctx, ctx);
 
                /*
                 * If this pinned group hasn't been scheduled,
@@ -1298,6 +1269,14 @@ __perf_event_sched_in(struct perf_event_context *ctx,
                        event->state = PERF_EVENT_STATE_ERROR;
                }
        }
+}
+
+static void
+ctx_flexible_sched_in(struct perf_event_context *ctx,
+                     struct perf_cpu_context *cpuctx)
+{
+       struct perf_event *event;
+       int can_add_hw = 1;
 
        list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
                /* Ignore events in OFF or ERROR state */
@@ -1307,18 +1286,66 @@ __perf_event_sched_in(struct perf_event_context *ctx,
                 * Listen to the 'cpu' scheduling filter constraint
                 * of events:
                 */
-               if (event->cpu != -1 && event->cpu != cpu)
+               if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
 
                if (group_can_go_on(event, cpuctx, can_add_hw))
-                       if (group_sched_in(event, cpuctx, ctx, cpu))
+                       if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
        }
+}
+
+static void
+ctx_sched_in(struct perf_event_context *ctx,
+            struct perf_cpu_context *cpuctx,
+            enum event_type_t event_type)
+{
+       raw_spin_lock(&ctx->lock);
+       ctx->is_active = 1;
+       if (likely(!ctx->nr_events))
+               goto out;
+
+       ctx->timestamp = perf_clock();
+
+       perf_disable();
+
+       /*
+        * First go through the list and put on any pinned groups
+        * in order to give them the best chance of going on.
+        */
+       if (event_type & EVENT_PINNED)
+               ctx_pinned_sched_in(ctx, cpuctx);
+
+       /* Then walk through the lower prio flexible groups */
+       if (event_type & EVENT_FLEXIBLE)
+               ctx_flexible_sched_in(ctx, cpuctx);
+
        perf_enable();
  out:
        raw_spin_unlock(&ctx->lock);
 }
 
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+                            enum event_type_t event_type)
+{
+       struct perf_event_context *ctx = &cpuctx->ctx;
+
+       ctx_sched_in(ctx, cpuctx, event_type);
+}
+
+static void task_ctx_sched_in(struct task_struct *task,
+                             enum event_type_t event_type)
+{
+       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       struct perf_event_context *ctx = task->perf_event_ctxp;
+
+       if (likely(!ctx))
+               return;
+       if (cpuctx->task_ctx == ctx)
+               return;
+       ctx_sched_in(ctx, cpuctx, event_type);
+       cpuctx->task_ctx = ctx;
+}
 /*
  * Called from scheduler to add the events of the current task
  * with interrupts disabled.
@@ -1337,31 +1364,121 @@ void perf_event_task_sched_in(struct task_struct *task)
 
        if (likely(!ctx))
                return;
+
        if (cpuctx->task_ctx == ctx)
                return;
-       __perf_event_sched_in(ctx, cpuctx);
+
+       /*
+        * We want to keep the following priority order:
+        * cpu pinned (that don't need to move), task pinned,
+        * cpu flexible, task flexible.
+        */
+       cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+
+       ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
+       ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
+
        cpuctx->task_ctx = ctx;
 }
 
-static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
+static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       u64 frequency = event->attr.sample_freq;
+       u64 sec = NSEC_PER_SEC;
+       u64 divisor, dividend;
+
+       int count_fls, nsec_fls, frequency_fls, sec_fls;
 
-       __perf_event_sched_in(ctx, cpuctx);
+       count_fls = fls64(count);
+       nsec_fls = fls64(nsec);
+       frequency_fls = fls64(frequency);
+       sec_fls = 30;
+
+       /*
+        * We got @count in @nsec, with a target of sample_freq HZ
+        * the target period becomes:
+        *
+        *             @count * 10^9
+        * period = -------------------
+        *          @nsec * sample_freq
+        *
+        */
+
+       /*
+        * Reduce accuracy by one bit such that @a and @b converge
+        * to a similar magnitude.
+        */
+#define REDUCE_FLS(a, b)               \
+do {                                   \
+       if (a##_fls > b##_fls) {        \
+               a >>= 1;                \
+               a##_fls--;              \
+       } else {                        \
+               b >>= 1;                \
+               b##_fls--;              \
+       }                               \
+} while (0)
+
+       /*
+        * Reduce accuracy until either term fits in a u64, then proceed with
+        * the other, so that finally we can do a u64/u64 division.
+        */
+       while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
+               REDUCE_FLS(nsec, frequency);
+               REDUCE_FLS(sec, count);
+       }
+
+       if (count_fls + sec_fls > 64) {
+               divisor = nsec * frequency;
+
+               while (count_fls + sec_fls > 64) {
+                       REDUCE_FLS(count, sec);
+                       divisor >>= 1;
+               }
+
+               dividend = count * sec;
+       } else {
+               dividend = count * sec;
+
+               while (nsec_fls + frequency_fls > 64) {
+                       REDUCE_FLS(nsec, frequency);
+                       dividend >>= 1;
+               }
+
+               divisor = nsec * frequency;
+       }
+
+       return div64_u64(dividend, divisor);
 }
 
-#define MAX_INTERRUPTS (~0ULL)
+static void perf_event_stop(struct perf_event *event)
+{
+       if (!event->pmu->stop)
+               return event->pmu->disable(event);
 
-static void perf_log_throttle(struct perf_event *event, int enable);
+       return event->pmu->stop(event);
+}
+
+static int perf_event_start(struct perf_event *event)
+{
+       if (!event->pmu->start)
+               return event->pmu->enable(event);
+
+       return event->pmu->start(event);
+}
 
-static void perf_adjust_period(struct perf_event *event, u64 events)
+static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
 {
        struct hw_perf_event *hwc = &event->hw;
        u64 period, sample_period;
        s64 delta;
 
-       events *= hwc->sample_period;
-       period = div64_u64(events, event->attr.sample_freq);
+       period = perf_calculate_period(event, nsec, count);
 
        delta = (s64)(period - hwc->sample_period);
        delta = (delta + 7) / 8; /* low pass filter */
@@ -1372,13 +1489,22 @@ static void perf_adjust_period(struct perf_event *event, u64 events)
                sample_period = 1;
 
        hwc->sample_period = sample_period;
+
+       if (atomic64_read(&hwc->period_left) > 8*sample_period) {
+               perf_disable();
+               perf_event_stop(event);
+               atomic64_set(&hwc->period_left, 0);
+               perf_event_start(event);
+               perf_enable();
+       }
 }
 
 static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
 {
        struct perf_event *event;
        struct hw_perf_event *hwc;
-       u64 interrupts, freq;
+       u64 interrupts, now;
+       s64 delta;
 
        raw_spin_lock(&ctx->lock);
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
@@ -1398,45 +1524,23 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
+                       perf_disable();
                        event->pmu->unthrottle(event);
-                       interrupts = 2*sysctl_perf_event_sample_rate/HZ;
+                       perf_enable();
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
-               /*
-                * if the specified freq < HZ then we need to skip ticks
-                */
-               if (event->attr.sample_freq < HZ) {
-                       freq = event->attr.sample_freq;
-
-                       hwc->freq_count += freq;
-                       hwc->freq_interrupts += interrupts;
-
-                       if (hwc->freq_count < HZ)
-                               continue;
-
-                       interrupts = hwc->freq_interrupts;
-                       hwc->freq_interrupts = 0;
-                       hwc->freq_count -= HZ;
-               } else
-                       freq = HZ;
-
-               perf_adjust_period(event, freq * interrupts);
+               perf_disable();
+               event->pmu->read(event);
+               now = atomic64_read(&event->count);
+               delta = now - hwc->freq_count_stamp;
+               hwc->freq_count_stamp = now;
 
-               /*
-                * In order to avoid being stalled by an (accidental) huge
-                * sample period, force reset the sample period if we didn't
-                * get any events in this freq period.
-                */
-               if (!interrupts) {
-                       perf_disable();
-                       event->pmu->disable(event);
-                       atomic64_set(&hwc->period_left, 0);
-                       event->pmu->enable(event);
-                       perf_enable();
-               }
+               if (delta > 0)
+                       perf_adjust_period(event, TICK_NSEC, delta);
+               perf_enable();
        }
        raw_spin_unlock(&ctx->lock);
 }
@@ -1446,18 +1550,11 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
  */
 static void rotate_ctx(struct perf_event_context *ctx)
 {
-       if (!ctx->nr_events)
-               return;
-
        raw_spin_lock(&ctx->lock);
 
        /* Rotate the first entry last of non-pinned groups */
-       perf_disable();
-
        list_rotate_left(&ctx->flexible_groups);
 
-       perf_enable();
-
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1465,28 +1562,40 @@ void perf_event_task_tick(struct task_struct *curr)
 {
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
+       int rotate = 0;
 
        if (!atomic_read(&nr_events))
                return;
 
        cpuctx = &__get_cpu_var(perf_cpu_context);
+       if (cpuctx->ctx.nr_events &&
+           cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+               rotate = 1;
+
        ctx = curr->perf_event_ctxp;
+       if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
+               rotate = 1;
 
        perf_ctx_adjust_freq(&cpuctx->ctx);
        if (ctx)
                perf_ctx_adjust_freq(ctx);
 
-       perf_event_cpu_sched_out(cpuctx);
+       if (!rotate)
+               return;
+
+       perf_disable();
+       cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               __perf_event_task_sched_out(ctx);
+               task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
 
        rotate_ctx(&cpuctx->ctx);
        if (ctx)
                rotate_ctx(ctx);
 
-       perf_event_cpu_sched_in(cpuctx);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               perf_event_task_sched_in(curr);
+               task_ctx_sched_in(curr, EVENT_FLEXIBLE);
+       perf_enable();
 }
 
 static int event_enable_on_exec(struct perf_event *event,
@@ -2481,7 +2590,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        if (user_locked > user_lock_limit)
                extra = user_locked - user_lock_limit;
 
-       lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+       lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
        locked = vma->vm_mm->locked_vm + extra;
 
@@ -2677,6 +2786,13 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
        return NULL;
 }
 
+#ifdef CONFIG_EVENT_TRACING
+__weak
+void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
+{
+}
+#endif
+
 /*
  * Output
  */
@@ -3278,8 +3394,6 @@ static void perf_event_task_output(struct perf_event *event,
        task_event->event_id.tid = perf_event_tid(event, task);
        task_event->event_id.ptid = perf_event_tid(event, current);
 
-       task_event->event_id.time = perf_clock();
-
        perf_output_put(&handle, task_event->event_id);
 
        perf_output_end(&handle);
@@ -3287,6 +3401,9 @@ static void perf_event_task_output(struct perf_event *event,
 
 static int perf_event_task_match(struct perf_event *event)
 {
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
+               return 0;
+
        if (event->cpu != -1 && event->cpu != smp_processor_id())
                return 0;
 
@@ -3316,7 +3433,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_task_ctx(&cpuctx->ctx, task_event);
        if (!ctx)
-               ctx = rcu_dereference(task_event->task->perf_event_ctxp);
+               ctx = rcu_dereference(current->perf_event_ctxp);
        if (ctx)
                perf_event_task_ctx(ctx, task_event);
        put_cpu_var(perf_cpu_context);
@@ -3347,6 +3464,7 @@ static void perf_event_task(struct task_struct *task,
                        /* .ppid */
                        /* .tid  */
                        /* .ptid */
+                       .time = perf_clock(),
                },
        };
 
@@ -3396,6 +3514,9 @@ static void perf_event_comm_output(struct perf_event *event,
 
 static int perf_event_comm_match(struct perf_event *event)
 {
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
+               return 0;
+
        if (event->cpu != -1 && event->cpu != smp_processor_id())
                return 0;
 
@@ -3513,6 +3634,9 @@ static void perf_event_mmap_output(struct perf_event *event,
 static int perf_event_mmap_match(struct perf_event *event,
                                   struct perf_mmap_event *mmap_event)
 {
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
+               return 0;
+
        if (event->cpu != -1 && event->cpu != smp_processor_id())
                return 0;
 
@@ -3619,7 +3743,7 @@ void __perf_event_mmap(struct vm_area_struct *vma)
                        /* .tid */
                        .start  = vma->vm_start,
                        .len    = vma->vm_end - vma->vm_start,
-                       .pgoff  = vma->vm_pgoff,
+                       .pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
                },
        };
 
@@ -3699,12 +3823,12 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
 
        if (event->attr.freq) {
                u64 now = perf_clock();
-               s64 delta = now - hwc->freq_stamp;
+               s64 delta = now - hwc->freq_time_stamp;
 
-               hwc->freq_stamp = now;
+               hwc->freq_time_stamp = now;
 
-               if (delta > 0 && delta < TICK_NSEC)
-                       perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
+               if (delta > 0 && delta < 2*TICK_NSEC)
+                       perf_adjust_period(event, delta, hwc->last_period);
        }
 
        /*
@@ -3986,8 +4110,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
        if (rctx < 0)
                return;
 
-       data.addr = addr;
-       data.raw  = NULL;
+       perf_sample_data_init(&data, addr);
 
        do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
 
@@ -4032,11 +4155,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
        struct perf_event *event;
        u64 period;
 
-       event   = container_of(hrtimer, struct perf_event, hw.hrtimer);
+       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
        event->pmu->read(event);
 
-       data.addr = 0;
-       data.raw = NULL;
+       perf_sample_data_init(&data, 0);
        data.period = event->hw.last_period;
        regs = get_irq_regs();
        /*
@@ -4198,26 +4320,20 @@ static const struct pmu perf_ops_task_clock = {
 #ifdef CONFIG_EVENT_TRACING
 
 void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
-                         int entry_size)
+                  int entry_size, struct pt_regs *regs)
 {
+       struct perf_sample_data data;
        struct perf_raw_record raw = {
                .size = entry_size,
                .data = record,
        };
 
-       struct perf_sample_data data = {
-               .addr = addr,
-               .raw = &raw,
-       };
-
-       struct pt_regs *regs = get_irq_regs();
-
-       if (!regs)
-               regs = task_pt_regs(current);
+       perf_sample_data_init(&data, addr);
+       data.raw = &raw;
 
        /* Trace events already protected against recursion */
        do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
-                               &data, regs);
+                        &data, regs);
 }
 EXPORT_SYMBOL_GPL(perf_tp_event);
 
@@ -4233,7 +4349,7 @@ static int perf_tp_event_match(struct perf_event *event,
 
 static void tp_perf_event_destroy(struct perf_event *event)
 {
-       ftrace_profile_disable(event->attr.config);
+       perf_trace_disable(event->attr.config);
 }
 
 static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4247,7 +4363,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
                        !capable(CAP_SYS_ADMIN))
                return ERR_PTR(-EPERM);
 
-       if (ftrace_profile_enable(event->attr.config))
+       if (perf_trace_enable(event->attr.config))
                return NULL;
 
        event->destroy = tp_perf_event_destroy;
@@ -4326,8 +4442,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
        struct perf_sample_data sample;
        struct pt_regs *regs = data;
 
-       sample.raw = NULL;
-       sample.addr = bp->attr.bp_addr;
+       perf_sample_data_init(&sample, bp->attr.bp_addr);
 
        if (!perf_exclude_event(bp, regs))
                perf_swevent_add(bp, 1, 1, &sample, regs);
@@ -4590,7 +4705,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
        if (attr->type >= PERF_TYPE_MAX)
                return -EINVAL;
 
-       if (attr->__reserved_1 || attr->__reserved_2)
+       if (attr->__reserved_1)
                return -EINVAL;
 
        if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -4881,8 +4996,15 @@ inherit_event(struct perf_event *parent_event,
        else
                child_event->state = PERF_EVENT_STATE_OFF;
 
-       if (parent_event->attr.freq)
-               child_event->hw.sample_period = parent_event->hw.sample_period;
+       if (parent_event->attr.freq) {
+               u64 sample_period = parent_event->hw.sample_period;
+               struct hw_perf_event *hwc = &child_event->hw;
+
+               hwc->sample_period = sample_period;
+               hwc->last_period   = sample_period;
+
+               atomic64_set(&hwc->period_left, sample_period);
+       }
 
        child_event->overflow_handler = parent_event->overflow_handler;
 
@@ -5248,18 +5370,26 @@ int perf_event_init_task(struct task_struct *child)
        return ret;
 }
 
+static void __init perf_event_init_all_cpus(void)
+{
+       int cpu;
+       struct perf_cpu_context *cpuctx;
+
+       for_each_possible_cpu(cpu) {
+               cpuctx = &per_cpu(perf_cpu_context, cpu);
+               __perf_event_init_context(&cpuctx->ctx, NULL);
+       }
+}
+
 static void __cpuinit perf_event_init_cpu(int cpu)
 {
        struct perf_cpu_context *cpuctx;
 
        cpuctx = &per_cpu(perf_cpu_context, cpu);
-       __perf_event_init_context(&cpuctx->ctx, NULL);
 
        spin_lock(&perf_resource_lock);
        cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
        spin_unlock(&perf_resource_lock);
-
-       hw_perf_event_setup(cpu);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -5299,11 +5429,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
                perf_event_init_cpu(cpu);
                break;
 
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               hw_perf_event_setup_online(cpu);
-               break;
-
        case CPU_DOWN_PREPARE:
        case CPU_DOWN_PREPARE_FROZEN:
                perf_event_exit_cpu(cpu);
@@ -5326,6 +5451,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
 
 void __init perf_event_init(void)
 {
+       perf_event_init_all_cpus();
        perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
                        (void *)(long)smp_processor_id());
        perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
@@ -5333,13 +5459,16 @@ void __init perf_event_init(void)
        register_cpu_notifier(&perf_cpu_nb);
 }
 
-static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
+static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
+                                       struct sysdev_class_attribute *attr,
+                                       char *buf)
 {
        return sprintf(buf, "%d\n", perf_reserved_percpu);
 }
 
 static ssize_t
 perf_set_reserve_percpu(struct sysdev_class *class,
+                       struct sysdev_class_attribute *attr,
                        const char *buf,
                        size_t count)
 {
@@ -5368,13 +5497,17 @@ perf_set_reserve_percpu(struct sysdev_class *class,
        return count;
 }
 
-static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
+static ssize_t perf_show_overcommit(struct sysdev_class *class,
+                                   struct sysdev_class_attribute *attr,
+                                   char *buf)
 {
        return sprintf(buf, "%d\n", perf_overcommit);
 }
 
 static ssize_t
-perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
+perf_set_overcommit(struct sysdev_class *class,
+                   struct sysdev_class_attribute *attr,
+                   const char *buf, size_t count)
 {
        unsigned long val;
        int err;