Merge remote branch 'tip/perf/core' into oprofile/core
Robert Richter [Fri, 15 Oct 2010 10:45:00 +0000 (12:45 +0200)]
Conflicts:
arch/arm/oprofile/common.c
kernel/perf_event.c

1  2 
arch/arm/kernel/perf_event.c
arch/sh/kernel/perf_event.c
drivers/oprofile/oprofile_perf.c
include/linux/perf_event.h
kernel/perf_event.c

@@@ -123,12 -123,6 +123,12 @@@ armpmu_get_max_events(void
  }
  EXPORT_SYMBOL_GPL(armpmu_get_max_events);
  
 +int perf_num_counters(void)
 +{
 +      return armpmu_get_max_events();
 +}
 +EXPORT_SYMBOL_GPL(perf_num_counters);
 +
  #define HW_OP_UNSUPPORTED             0xFFFF
  
  #define C(_x) \
@@@ -227,46 -221,56 +227,56 @@@ again
  }
  
  static void
- armpmu_disable(struct perf_event *event)
+ armpmu_read(struct perf_event *event)
  {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-       WARN_ON(idx < 0);
-       clear_bit(idx, cpuc->active_mask);
-       armpmu->disable(hwc, idx);
-       barrier();
  
-       armpmu_event_update(event, hwc, idx);
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       /* Don't read disabled counters! */
+       if (hwc->idx < 0)
+               return;
  
-       perf_event_update_userpage(event);
+       armpmu_event_update(event, hwc, hwc->idx);
  }
  
  static void
- armpmu_read(struct perf_event *event)
+ armpmu_stop(struct perf_event *event, int flags)
  {
        struct hw_perf_event *hwc = &event->hw;
  
-       /* Don't read disabled counters! */
-       if (hwc->idx < 0)
+       if (!armpmu)
                return;
  
-       armpmu_event_update(event, hwc, hwc->idx);
+       /*
+        * ARM pmu always has to update the counter, so ignore
+        * PERF_EF_UPDATE, see comments in armpmu_start().
+        */
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               armpmu->disable(hwc, hwc->idx);
+               barrier(); /* why? */
+               armpmu_event_update(event, hwc, hwc->idx);
+               hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       }
  }
  
  static void
- armpmu_unthrottle(struct perf_event *event)
+ armpmu_start(struct perf_event *event, int flags)
  {
        struct hw_perf_event *hwc = &event->hw;
  
+       if (!armpmu)
+               return;
+       /*
+        * ARM pmu always has to reprogram the period, so ignore
+        * PERF_EF_RELOAD, see the comment below.
+        */
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+       hwc->state = 0;
        /*
         * Set the period again. Some counters can't be stopped, so when we
-        * were throttled we simply disabled the IRQ source and the counter
+        * were stopped we simply disabled the IRQ source and the counter
         * may have been left counting. If we don't do this step then we may
         * get an interrupt too soon or *way* too late if the overflow has
         * happened since disabling.
        armpmu->enable(hwc, hwc->idx);
  }
  
+ static void
+ armpmu_del(struct perf_event *event, int flags)
+ {
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+       WARN_ON(idx < 0);
+       clear_bit(idx, cpuc->active_mask);
+       armpmu_stop(event, PERF_EF_UPDATE);
+       cpuc->events[idx] = NULL;
+       clear_bit(idx, cpuc->used_mask);
+       perf_event_update_userpage(event);
+ }
  static int
- armpmu_enable(struct perf_event *event)
+ armpmu_add(struct perf_event *event, int flags)
  {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx;
        int err = 0;
  
+       perf_pmu_disable(event->pmu);
        /* If we don't have a space for the counter then finish early. */
        idx = armpmu->get_event_idx(cpuc, hwc);
        if (idx < 0) {
        cpuc->events[idx] = event;
        set_bit(idx, cpuc->active_mask);
  
-       /* Set the period for the event. */
-       armpmu_event_set_period(event, hwc, idx);
-       /* Enable the event. */
-       armpmu->enable(hwc, idx);
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       if (flags & PERF_EF_START)
+               armpmu_start(event, PERF_EF_RELOAD);
  
        /* Propagate our changes to the userspace mapping. */
        perf_event_update_userpage(event);
  
  out:
+       perf_pmu_enable(event->pmu);
        return err;
  }
  
- static struct pmu pmu = {
-       .enable     = armpmu_enable,
-       .disable    = armpmu_disable,
-       .unthrottle = armpmu_unthrottle,
-       .read       = armpmu_read,
- };
+ static struct pmu pmu;
  
  static int
  validate_event(struct cpu_hw_events *cpuc,
@@@ -497,20 -514,29 +520,29 @@@ __hw_perf_event_init(struct perf_event 
        return err;
  }
  
- const struct pmu *
- hw_perf_event_init(struct perf_event *event)
+ static int armpmu_event_init(struct perf_event *event)
  {
        int err = 0;
  
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+       default:
+               return -ENOENT;
+       }
        if (!armpmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
  
        event->destroy = hw_perf_event_destroy;
  
        if (!atomic_inc_not_zero(&active_events)) {
-               if (atomic_read(&active_events) > perf_max_events) {
+               if (atomic_read(&active_events) > armpmu->num_events) {
                        atomic_dec(&active_events);
-                       return ERR_PTR(-ENOSPC);
+                       return -ENOSPC;
                }
  
                mutex_lock(&pmu_reserve_mutex);
        }
  
        if (err)
-               return ERR_PTR(err);
+               return err;
  
        err = __hw_perf_event_init(event);
        if (err)
                hw_perf_event_destroy(event);
  
-       return err ? ERR_PTR(err) : &pmu;
+       return err;
  }
  
- void
- hw_perf_enable(void)
+ static void armpmu_enable(struct pmu *pmu)
  {
        /* Enable all of the perf events on hardware. */
        int idx;
        armpmu->start();
  }
  
- void
- hw_perf_disable(void)
+ static void armpmu_disable(struct pmu *pmu)
  {
        if (armpmu)
                armpmu->stop();
  }
  
+ static struct pmu pmu = {
+       .pmu_enable     = armpmu_enable,
+       .pmu_disable    = armpmu_disable,
+       .event_init     = armpmu_event_init,
+       .add            = armpmu_add,
+       .del            = armpmu_del,
+       .start          = armpmu_start,
+       .stop           = armpmu_stop,
+       .read           = armpmu_read,
+ };
  /*
   * ARMv6 Performance counter handling code.
   *
@@@ -2939,14 -2974,12 +2980,12 @@@ init_hw_perf_events(void
                        armpmu = &armv6pmu;
                        memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
                                        sizeof(armv6_perf_cache_map));
-                       perf_max_events = armv6pmu.num_events;
                        break;
                case 0xB020:    /* ARM11mpcore */
                        armpmu = &armv6mpcore_pmu;
                        memcpy(armpmu_perf_cache_map,
                               armv6mpcore_perf_cache_map,
                               sizeof(armv6mpcore_perf_cache_map));
-                       perf_max_events = armv6mpcore_pmu.num_events;
                        break;
                case 0xC080:    /* Cortex-A8 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA8;
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                case 0xC090:    /* Cortex-A9 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA9;
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                }
        /* Intel CPUs [xscale]. */
                        armpmu = &xscale1pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale1pmu.num_events;
                        break;
                case 2:
                        armpmu = &xscale2pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale2pmu.num_events;
                        break;
                }
        }
                                arm_pmu_names[armpmu->id], armpmu->num_events);
        } else {
                pr_info("no hardware support available\n");
-               perf_max_events = -1;
        }
  
+       perf_pmu_register(&pmu);
        return 0;
  }
  arch_initcall(init_hw_perf_events);
  /*
   * Callchain handling code.
   */
- static inline void
- callchain_store(struct perf_callchain_entry *entry,
-               u64 ip)
- {
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
- }
  
  /*
   * The registers we're interested in are at the end of the variable
@@@ -3045,7 -3068,7 +3074,7 @@@ user_backtrace(struct frame_tail *tail
        if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
                return NULL;
  
-       callchain_store(entry, buftail.lr);
+       perf_callchain_store(entry, buftail.lr);
  
        /*
         * Frame pointers should strictly progress back up the stack
        return buftail.fp - 1;
  }
  
- static void
- perf_callchain_user(struct pt_regs *regs,
-                   struct perf_callchain_entry *entry)
+ void
+ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
  {
        struct frame_tail *tail;
  
-       callchain_store(entry, PERF_CONTEXT_USER);
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
  
        tail = (struct frame_tail *)regs->ARM_fp - 1;
  
@@@ -3084,56 -3102,18 +3108,18 @@@ callchain_trace(struct stackframe *fr
                void *data)
  {
        struct perf_callchain_entry *entry = data;
-       callchain_store(entry, fr->pc);
+       perf_callchain_store(entry, fr->pc);
        return 0;
  }
  
- static void
- perf_callchain_kernel(struct pt_regs *regs,
-                     struct perf_callchain_entry *entry)
+ void
+ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
  {
        struct stackframe fr;
  
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        fr.fp = regs->ARM_fp;
        fr.sp = regs->ARM_sp;
        fr.lr = regs->ARM_lr;
        fr.pc = regs->ARM_pc;
        walk_stackframe(&fr, callchain_trace, entry);
  }
- static void
- perf_do_callchain(struct pt_regs *regs,
-                 struct perf_callchain_entry *entry)
- {
-       int is_user;
-       if (!regs)
-               return;
-       is_user = user_mode(regs);
-       if (!current || !current->pid)
-               return;
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-       if (current->mm)
-               perf_callchain_user(regs, entry);
- }
- static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
- struct perf_callchain_entry *
- perf_callchain(struct pt_regs *regs)
- {
-       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-       entry->nr = 0;
-       perf_do_callchain(regs, entry);
-       return entry;
- }
@@@ -59,24 -59,6 +59,24 @@@ static inline int sh_pmu_initialized(vo
        return !!sh_pmu;
  }
  
 +const char *perf_pmu_name(void)
 +{
 +      if (!sh_pmu)
 +              return NULL;
 +
 +      return sh_pmu->name;
 +}
 +EXPORT_SYMBOL_GPL(perf_pmu_name);
 +
 +int perf_num_counters(void)
 +{
 +      if (!sh_pmu)
 +              return 0;
 +
 +      return sh_pmu->num_events;
 +}
 +EXPORT_SYMBOL_GPL(perf_num_counters);
 +
  /*
   * Release the PMU if this is the last perf_event.
   */
@@@ -224,50 -206,80 +224,80 @@@ again
        local64_add(delta, &event->count);
  }
  
- static void sh_pmu_disable(struct perf_event *event)
+ static void sh_pmu_stop(struct perf_event *event, int flags)
  {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
  
-       clear_bit(idx, cpuc->active_mask);
-       sh_pmu->disable(hwc, idx);
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               sh_pmu->disable(hwc, idx);
+               cpuc->events[idx] = NULL;
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+       if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+               sh_perf_event_update(event, &event->hw, idx);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+ }
  
-       barrier();
+ static void sh_pmu_start(struct perf_event *event, int flags)
+ {
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
  
-       sh_perf_event_update(event, &event->hw, idx);
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+       cpuc->events[idx] = event;
+       event->hw.state = 0;
+       sh_pmu->enable(hwc, idx);
+ }
+ static void sh_pmu_del(struct perf_event *event, int flags)
+ {
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       sh_pmu_stop(event, PERF_EF_UPDATE);
+       __clear_bit(event->hw.idx, cpuc->used_mask);
  
        perf_event_update_userpage(event);
  }
  
- static int sh_pmu_enable(struct perf_event *event)
+ static int sh_pmu_add(struct perf_event *event, int flags)
  {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
+       int ret = -EAGAIN;
+       perf_pmu_disable(event->pmu);
  
-       if (test_and_set_bit(idx, cpuc->used_mask)) {
+       if (__test_and_set_bit(idx, cpuc->used_mask)) {
                idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
                if (idx == sh_pmu->num_events)
-                       return -EAGAIN;
+                       goto out;
  
-               set_bit(idx, cpuc->used_mask);
+               __set_bit(idx, cpuc->used_mask);
                hwc->idx = idx;
        }
  
        sh_pmu->disable(hwc, idx);
  
-       cpuc->events[idx] = event;
-       set_bit(idx, cpuc->active_mask);
-       sh_pmu->enable(hwc, idx);
+       event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (flags & PERF_EF_START)
+               sh_pmu_start(event, PERF_EF_RELOAD);
  
        perf_event_update_userpage(event);
-       return 0;
+       ret = 0;
+ out:
+       perf_pmu_enable(event->pmu);
+       return ret;
  }
  
  static void sh_pmu_read(struct perf_event *event)
        sh_perf_event_update(event, &event->hw, event->hw.idx);
  }
  
- static const struct pmu pmu = {
-       .enable         = sh_pmu_enable,
-       .disable        = sh_pmu_disable,
-       .read           = sh_pmu_read,
- };
- const struct pmu *hw_perf_event_init(struct perf_event *event)
+ static int sh_pmu_event_init(struct perf_event *event)
  {
-       int err = __hw_perf_event_init(event);
+       int err;
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HW_CACHE:
+       case PERF_TYPE_HARDWARE:
+               err = __hw_perf_event_init(event);
+               break;
+       default:
+               return -ENOENT;
+       }
        if (unlikely(err)) {
                if (event->destroy)
                        event->destroy(event);
-               return ERR_PTR(err);
        }
  
-       return &pmu;
+       return err;
+ }
+ static void sh_pmu_enable(struct pmu *pmu)
+ {
+       if (!sh_pmu_initialized())
+               return;
+       sh_pmu->enable_all();
+ }
+ static void sh_pmu_disable(struct pmu *pmu)
+ {
+       if (!sh_pmu_initialized())
+               return;
+       sh_pmu->disable_all();
  }
  
+ static struct pmu pmu = {
+       .pmu_enable     = sh_pmu_enable,
+       .pmu_disable    = sh_pmu_disable,
+       .event_init     = sh_pmu_event_init,
+       .add            = sh_pmu_add,
+       .del            = sh_pmu_del,
+       .start          = sh_pmu_start,
+       .stop           = sh_pmu_stop,
+       .read           = sh_pmu_read,
+ };
  static void sh_pmu_setup(int cpu)
  {
        struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@@ -317,32 -361,17 +379,17 @@@ sh_pmu_notifier(struct notifier_block *
        return NOTIFY_OK;
  }
  
- void hw_perf_enable(void)
- {
-       if (!sh_pmu_initialized())
-               return;
-       sh_pmu->enable_all();
- }
- void hw_perf_disable(void)
- {
-       if (!sh_pmu_initialized())
-               return;
-       sh_pmu->disable_all();
- }
- int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
+ int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
  {
        if (sh_pmu)
                return -EBUSY;
-       sh_pmu = pmu;
+       sh_pmu = _pmu;
  
-       pr_info("Performance Events: %s support registered\n", pmu->name);
+       pr_info("Performance Events: %s support registered\n", _pmu->name);
  
-       WARN_ON(pmu->num_events > MAX_HWEVENTS);
+       WARN_ON(_pmu->num_events > MAX_HWEVENTS);
  
+       perf_pmu_register(&pmu);
        perf_cpu_notifier(sh_pmu_notifier);
        return 0;
  }
index b17235a,0000000..79c0005
mode 100644,000000..100644
--- /dev/null
@@@ -1,323 -1,0 +1,323 @@@
 +/*
 + * Copyright 2010 ARM Ltd.
 + *
 + * Perf-events backend for OProfile.
 + */
 +#include <linux/perf_event.h>
 +#include <linux/oprofile.h>
 +#include <linux/slab.h>
 +
 +/*
 + * Per performance monitor configuration as set via oprofilefs.
 + */
 +struct op_counter_config {
 +      unsigned long count;
 +      unsigned long enabled;
 +      unsigned long event;
 +      unsigned long unit_mask;
 +      unsigned long kernel;
 +      unsigned long user;
 +      struct perf_event_attr attr;
 +};
 +
 +static int oprofile_perf_enabled;
 +static DEFINE_MUTEX(oprofile_perf_mutex);
 +
 +static struct op_counter_config *counter_config;
 +static struct perf_event **perf_events[nr_cpumask_bits];
 +static int num_counters;
 +
 +/*
 + * Overflow callback for oprofile.
 + */
 +static void op_overflow_handler(struct perf_event *event, int unused,
 +                      struct perf_sample_data *data, struct pt_regs *regs)
 +{
 +      int id;
 +      u32 cpu = smp_processor_id();
 +
 +      for (id = 0; id < num_counters; ++id)
 +              if (perf_events[cpu][id] == event)
 +                      break;
 +
 +      if (id != num_counters)
 +              oprofile_add_sample(regs, id);
 +      else
 +              pr_warning("oprofile: ignoring spurious overflow "
 +                              "on cpu %u\n", cpu);
 +}
 +
 +/*
 + * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
 + * settings in counter_config. Attributes are created as `pinned' events and
 + * so are permanently scheduled on the PMU.
 + */
 +static void op_perf_setup(void)
 +{
 +      int i;
 +      u32 size = sizeof(struct perf_event_attr);
 +      struct perf_event_attr *attr;
 +
 +      for (i = 0; i < num_counters; ++i) {
 +              attr = &counter_config[i].attr;
 +              memset(attr, 0, size);
 +              attr->type              = PERF_TYPE_RAW;
 +              attr->size              = size;
 +              attr->config            = counter_config[i].event;
 +              attr->sample_period     = counter_config[i].count;
 +              attr->pinned            = 1;
 +      }
 +}
 +
 +static int op_create_counter(int cpu, int event)
 +{
 +      struct perf_event *pevent;
 +
 +      if (!counter_config[event].enabled || perf_events[cpu][event])
 +              return 0;
 +
 +      pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
-                                                 cpu, -1,
++                                                cpu, NULL,
 +                                                op_overflow_handler);
 +
 +      if (IS_ERR(pevent))
 +              return PTR_ERR(pevent);
 +
 +      if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
 +              perf_event_release_kernel(pevent);
 +              pr_warning("oprofile: failed to enable event %d "
 +                              "on CPU %d\n", event, cpu);
 +              return -EBUSY;
 +      }
 +
 +      perf_events[cpu][event] = pevent;
 +
 +      return 0;
 +}
 +
 +static void op_destroy_counter(int cpu, int event)
 +{
 +      struct perf_event *pevent = perf_events[cpu][event];
 +
 +      if (pevent) {
 +              perf_event_release_kernel(pevent);
 +              perf_events[cpu][event] = NULL;
 +      }
 +}
 +
 +/*
 + * Called by oprofile_perf_start to create active perf events based on the
 + * perviously configured attributes.
 + */
 +static int op_perf_start(void)
 +{
 +      int cpu, event, ret = 0;
 +
 +      for_each_online_cpu(cpu) {
 +              for (event = 0; event < num_counters; ++event) {
 +                      ret = op_create_counter(cpu, event);
 +                      if (ret)
 +                              return ret;
 +              }
 +      }
 +
 +      return ret;
 +}
 +
 +/*
 + * Called by oprofile_perf_stop at the end of a profiling run.
 + */
 +static void op_perf_stop(void)
 +{
 +      int cpu, event;
 +
 +      for_each_online_cpu(cpu)
 +              for (event = 0; event < num_counters; ++event)
 +                      op_destroy_counter(cpu, event);
 +}
 +
 +static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < num_counters; i++) {
 +              struct dentry *dir;
 +              char buf[4];
 +
 +              snprintf(buf, sizeof buf, "%d", i);
 +              dir = oprofilefs_mkdir(sb, root, buf);
 +              oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
 +              oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
 +              oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
 +              oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
 +              oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
 +              oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
 +      }
 +
 +      return 0;
 +}
 +
 +static int oprofile_perf_setup(void)
 +{
 +      spin_lock(&oprofilefs_lock);
 +      op_perf_setup();
 +      spin_unlock(&oprofilefs_lock);
 +      return 0;
 +}
 +
 +static int oprofile_perf_start(void)
 +{
 +      int ret = -EBUSY;
 +
 +      mutex_lock(&oprofile_perf_mutex);
 +      if (!oprofile_perf_enabled) {
 +              ret = 0;
 +              op_perf_start();
 +              oprofile_perf_enabled = 1;
 +      }
 +      mutex_unlock(&oprofile_perf_mutex);
 +      return ret;
 +}
 +
 +static void oprofile_perf_stop(void)
 +{
 +      mutex_lock(&oprofile_perf_mutex);
 +      if (oprofile_perf_enabled)
 +              op_perf_stop();
 +      oprofile_perf_enabled = 0;
 +      mutex_unlock(&oprofile_perf_mutex);
 +}
 +
 +#ifdef CONFIG_PM
 +static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
 +{
 +      mutex_lock(&oprofile_perf_mutex);
 +      if (oprofile_perf_enabled)
 +              op_perf_stop();
 +      mutex_unlock(&oprofile_perf_mutex);
 +      return 0;
 +}
 +
 +static int oprofile_perf_resume(struct platform_device *dev)
 +{
 +      mutex_lock(&oprofile_perf_mutex);
 +      if (oprofile_perf_enabled && op_perf_start())
 +              oprofile_perf_enabled = 0;
 +      mutex_unlock(&oprofile_perf_mutex);
 +      return 0;
 +}
 +
 +static struct platform_driver oprofile_driver = {
 +      .driver         = {
 +              .name           = "oprofile-perf",
 +      },
 +      .resume         = oprofile_perf_resume,
 +      .suspend        = oprofile_perf_suspend,
 +};
 +
 +static struct platform_device *oprofile_pdev;
 +
 +static int __init init_driverfs(void)
 +{
 +      int ret;
 +
 +      ret = platform_driver_register(&oprofile_driver);
 +      if (ret)
 +              return ret;
 +
 +      oprofile_pdev = platform_device_register_simple(
 +                              oprofile_driver.driver.name, 0, NULL, 0);
 +      if (IS_ERR(oprofile_pdev)) {
 +              ret = PTR_ERR(oprofile_pdev);
 +              platform_driver_unregister(&oprofile_driver);
 +      }
 +
 +      return ret;
 +}
 +
 +static void __exit exit_driverfs(void)
 +{
 +      platform_device_unregister(oprofile_pdev);
 +      platform_driver_unregister(&oprofile_driver);
 +}
 +#else
 +static int __init init_driverfs(void) { return 0; }
 +#define exit_driverfs() do { } while (0)
 +#endif /* CONFIG_PM */
 +
 +void oprofile_perf_exit(void)
 +{
 +      int cpu, id;
 +      struct perf_event *event;
 +
 +      for_each_possible_cpu(cpu) {
 +              for (id = 0; id < num_counters; ++id) {
 +                      event = perf_events[cpu][id];
 +                      if (event)
 +                              perf_event_release_kernel(event);
 +              }
 +
 +              kfree(perf_events[cpu]);
 +      }
 +
 +      kfree(counter_config);
 +      exit_driverfs();
 +}
 +
 +int __init oprofile_perf_init(struct oprofile_operations *ops)
 +{
 +      int cpu, ret = 0;
 +
 +      ret = init_driverfs();
 +      if (ret)
 +              return ret;
 +
 +      memset(&perf_events, 0, sizeof(perf_events));
 +
 +      num_counters = perf_num_counters();
 +      if (num_counters <= 0) {
 +              pr_info("oprofile: no performance counters\n");
 +              ret = -ENODEV;
 +              goto out;
 +      }
 +
 +      counter_config = kcalloc(num_counters,
 +                      sizeof(struct op_counter_config), GFP_KERNEL);
 +
 +      if (!counter_config) {
 +              pr_info("oprofile: failed to allocate %d "
 +                              "counters\n", num_counters);
 +              ret = -ENOMEM;
 +              num_counters = 0;
 +              goto out;
 +      }
 +
 +      for_each_possible_cpu(cpu) {
 +              perf_events[cpu] = kcalloc(num_counters,
 +                              sizeof(struct perf_event *), GFP_KERNEL);
 +              if (!perf_events[cpu]) {
 +                      pr_info("oprofile: failed to allocate %d perf events "
 +                                      "for cpu %d\n", num_counters, cpu);
 +                      ret = -ENOMEM;
 +                      goto out;
 +              }
 +      }
 +
 +      ops->create_files       = oprofile_perf_create_files;
 +      ops->setup              = oprofile_perf_setup;
 +      ops->start              = oprofile_perf_start;
 +      ops->stop               = oprofile_perf_stop;
 +      ops->shutdown           = oprofile_perf_stop;
 +      ops->cpu_type           = op_name_from_perf_id();
 +
 +      if (!ops->cpu_type)
 +              ret = -ENODEV;
 +      else
 +              pr_info("oprofile: using %s\n", ops->cpu_type);
 +
 +out:
 +      if (ret)
 +              oprofile_perf_exit();
 +
 +      return ret;
 +}
@@@ -529,7 -529,6 +529,6 @@@ struct hw_perf_event 
                        int             last_cpu;
                };
                struct { /* software */
-                       s64             remaining;
                        struct hrtimer  hrtimer;
                };
  #ifdef CONFIG_HAVE_HW_BREAKPOINT
                };
  #endif
        };
+       int                             state;
        local64_t                       prev_count;
        u64                             sample_period;
        u64                             last_period;
  #endif
  };
  
+ /*
+  * hw_perf_event::state flags
+  */
+ #define PERF_HES_STOPPED      0x01 /* the counter is stopped */
+ #define PERF_HES_UPTODATE     0x02 /* event->count up-to-date */
+ #define PERF_HES_ARCH         0x04
  struct perf_event;
  
  /*
   * struct pmu - generic performance monitoring unit
   */
  struct pmu {
-       int (*enable)                   (struct perf_event *event);
-       void (*disable)                 (struct perf_event *event);
-       int (*start)                    (struct perf_event *event);
-       void (*stop)                    (struct perf_event *event);
-       void (*read)                    (struct perf_event *event);
-       void (*unthrottle)              (struct perf_event *event);
+       struct list_head                entry;
+       int * __percpu                  pmu_disable_count;
+       struct perf_cpu_context * __percpu pmu_cpu_context;
+       int                             task_ctx_nr;
  
        /*
-        * Group events scheduling is treated as a transaction, add group
-        * events as a whole and perform one schedulability test. If the test
-        * fails, roll back the whole group
+        * Fully disable/enable this PMU, can be used to protect from the PMI
+        * as well as for lazy/batch writing of the MSRs.
         */
+       void (*pmu_enable)              (struct pmu *pmu); /* optional */
+       void (*pmu_disable)             (struct pmu *pmu); /* optional */
  
        /*
-        * Start the transaction, after this ->enable() doesn't need
-        * to do schedulability tests.
+        * Try and initialize the event for this PMU.
+        * Should return -ENOENT when the @event doesn't match this PMU.
         */
-       void (*start_txn)       (const struct pmu *pmu);
+       int (*event_init)               (struct perf_event *event);
+ #define PERF_EF_START 0x01            /* start the counter when adding    */
+ #define PERF_EF_RELOAD        0x02            /* reload the counter when starting */
+ #define PERF_EF_UPDATE        0x04            /* update the counter when stopping */
        /*
-        * If ->start_txn() disabled the ->enable() schedulability test
+        * Adds/Removes a counter to/from the PMU, can be done inside
+        * a transaction, see the ->*_txn() methods.
+        */
+       int  (*add)                     (struct perf_event *event, int flags);
+       void (*del)                     (struct perf_event *event, int flags);
+       /*
+        * Starts/Stops a counter present on the PMU. The PMI handler
+        * should stop the counter when perf_event_overflow() returns
+        * !0. ->start() will be used to continue.
+        */
+       void (*start)                   (struct perf_event *event, int flags);
+       void (*stop)                    (struct perf_event *event, int flags);
+       /*
+        * Updates the counter value of the event.
+        */
+       void (*read)                    (struct perf_event *event);
+       /*
+        * Group events scheduling is treated as a transaction, add
+        * group events as a whole and perform one schedulability test.
+        * If the test fails, roll back the whole group
+        *
+        * Start the transaction, after this ->add() doesn't need to
+        * do schedulability tests.
+        */
+       void (*start_txn)       (struct pmu *pmu); /* optional */
+       /*
+        * If ->start_txn() disabled the ->add() schedulability test
         * then ->commit_txn() is required to perform one. On success
         * the transaction is closed. On error the transaction is kept
         * open until ->cancel_txn() is called.
         */
-       int  (*commit_txn)      (const struct pmu *pmu);
+       int  (*commit_txn)      (struct pmu *pmu); /* optional */
        /*
-        * Will cancel the transaction, assumes ->disable() is called for
-        * each successfull ->enable() during the transaction.
+        * Will cancel the transaction, assumes ->del() is called
+        * for each successfull ->add() during the transaction.
         */
-       void (*cancel_txn)      (const struct pmu *pmu);
+       void (*cancel_txn)      (struct pmu *pmu); /* optional */
  };
  
  /**
@@@ -669,7 -710,7 +710,7 @@@ struct perf_event 
        int                             nr_siblings;
        int                             group_flags;
        struct perf_event               *group_leader;
-       const struct pmu                *pmu;
+       struct pmu                      *pmu;
  
        enum perf_event_active_state    state;
        unsigned int                    attach_state;
  #endif /* CONFIG_PERF_EVENTS */
  };
  
+ enum perf_event_context_type {
+       task_context,
+       cpu_context,
+ };
  /**
   * struct perf_event_context - event context structure
   *
   * Used as a container for task events and CPU events as well:
   */
  struct perf_event_context {
+       enum perf_event_context_type    type;
+       struct pmu                      *pmu;
        /*
         * Protect the states of the events in the list,
         * nr_active, and the list:
        struct rcu_head                 rcu_head;
  };
  
+ /*
+  * Number of contexts where an event can trigger:
+  *    task, softirq, hardirq, nmi.
+  */
+ #define PERF_NR_CONTEXTS      4
  /**
   * struct perf_event_cpu_context - per cpu event context structure
   */
@@@ -815,18 -869,9 +869,9 @@@ struct perf_cpu_context 
        struct perf_event_context       ctx;
        struct perf_event_context       *task_ctx;
        int                             active_oncpu;
-       int                             max_pertask;
        int                             exclusive;
-       struct swevent_hlist            *swevent_hlist;
-       struct mutex                    hlist_mutex;
-       int                             hlist_refcount;
-       /*
-        * Recursion avoidance:
-        *
-        * task, softirq, irq, nmi context
-        */
-       int                             recursion[4];
+       struct list_head                rotation_list;
+       int                             jiffies_interval;
  };
  
  struct perf_output_handle {
  
  #ifdef CONFIG_PERF_EVENTS
  
- /*
-  * Set by architecture code:
-  */
- extern int perf_max_events;
- extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+ extern int perf_pmu_register(struct pmu *pmu);
+ extern void perf_pmu_unregister(struct pmu *pmu);
  
 +extern int perf_num_counters(void);
 +extern const char *perf_pmu_name(void);
  extern void perf_event_task_sched_in(struct task_struct *task);
  extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
  extern int perf_event_init_task(struct task_struct *child);
  extern void perf_event_exit_task(struct task_struct *child);
  extern void perf_event_free_task(struct task_struct *task);
+ extern void perf_event_delayed_put(struct task_struct *task);
  extern void set_perf_event_pending(void);
  extern void perf_event_do_pending(void);
  extern void perf_event_print_debug(void);
- extern void __perf_disable(void);
- extern bool __perf_enable(void);
- extern void perf_disable(void);
- extern void perf_enable(void);
+ extern void perf_pmu_disable(struct pmu *pmu);
+ extern void perf_pmu_enable(struct pmu *pmu);
  extern int perf_event_task_disable(void);
  extern int perf_event_task_enable(void);
  extern void perf_event_update_userpage(struct perf_event *event);
@@@ -871,7 -908,7 +910,7 @@@ extern int perf_event_release_kernel(st
  extern struct perf_event *
  perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                int cpu,
-                               pid_t pid,
+                               struct task_struct *task,
                                perf_overflow_handler_t callback);
  extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
@@@ -922,14 -959,7 +961,7 @@@ extern int perf_event_overflow(struct p
   */
  static inline int is_software_event(struct perf_event *event)
  {
-       switch (event->attr.type) {
-       case PERF_TYPE_SOFTWARE:
-       case PERF_TYPE_TRACEPOINT:
-       /* for now the breakpoint stuff also works as software event */
-       case PERF_TYPE_BREAKPOINT:
-               return 1;
-       }
-       return 0;
+       return event->pmu->task_ctx_nr == perf_sw_context;
  }
  
  extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@@ -978,7 -1008,21 +1010,21 @@@ extern int perf_unregister_guest_info_c
  extern void perf_event_comm(struct task_struct *tsk);
  extern void perf_event_fork(struct task_struct *tsk);
  
- extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+ /* Callchains */
+ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+ extern void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs);
+ extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs);
+ static inline void
+ perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+ {
+       if (entry->nr < PERF_MAX_STACK_DEPTH)
+               entry->ip[entry->nr++] = ip;
+ }
  
  extern int sysctl_perf_event_paranoid;
  extern int sysctl_perf_event_mlock;
@@@ -1021,21 -1065,19 +1067,19 @@@ extern int perf_swevent_get_recursion_c
  extern void perf_swevent_put_recursion_context(int rctx);
  extern void perf_event_enable(struct perf_event *event);
  extern void perf_event_disable(struct perf_event *event);
+ extern void perf_event_task_tick(void);
  #else
  static inline void
  perf_event_task_sched_in(struct task_struct *task)                    { }
  static inline void
  perf_event_task_sched_out(struct task_struct *task,
                            struct task_struct *next)                   { }
  static inline int perf_event_init_task(struct task_struct *child)     { return 0; }
  static inline void perf_event_exit_task(struct task_struct *child)    { }
  static inline void perf_event_free_task(struct task_struct *task)     { }
+ static inline void perf_event_delayed_put(struct task_struct *task)   { }
  static inline void perf_event_do_pending(void)                                { }
  static inline void perf_event_print_debug(void)                               { }
- static inline void perf_disable(void)                                 { }
- static inline void perf_enable(void)                                  { }
  static inline int perf_event_task_disable(void)                               { return -EINVAL; }
  static inline int perf_event_task_enable(void)                                { return -EINVAL; }
  
@@@ -1058,6 -1100,7 +1102,7 @@@ static inline int  perf_swevent_get_rec
  static inline void perf_swevent_put_recursion_context(int rctx)               { }
  static inline void perf_event_enable(struct perf_event *event)                { }
  static inline void perf_event_disable(struct perf_event *event)               { }
+ static inline void perf_event_task_tick(void)                         { }
  #endif
  
  #define perf_output_put(handle, x) \
diff --combined kernel/perf_event.c
  #include <linux/kernel_stat.h>
  #include <linux/perf_event.h>
  #include <linux/ftrace_event.h>
  
  #include <asm/irq_regs.h>
  
- /*
-  * Each CPU has a list of per CPU events:
-  */
- static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
- int perf_max_events __read_mostly = 1;
- static int perf_reserved_percpu __read_mostly;
- static int perf_overcommit __read_mostly = 1;
  static atomic_t nr_events __read_mostly;
  static atomic_t nr_mmap_events __read_mostly;
  static atomic_t nr_comm_events __read_mostly;
  static atomic_t nr_task_events __read_mostly;
  
+ static LIST_HEAD(pmus);
+ static DEFINE_MUTEX(pmus_lock);
+ static struct srcu_struct pmus_srcu;
  /*
   * perf event paranoia level:
   *  -1 - not paranoid at all
@@@ -67,41 -61,38 +61,43 @@@ int sysctl_perf_event_sample_rate __rea
  
  static atomic64_t perf_event_id;
  
- /*
-  * Lock for (sysadmin-configurable) event reservations:
-  */
- static DEFINE_SPINLOCK(perf_resource_lock);
- /*
-  * Architecture provided APIs - weak aliases:
-  */
- extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
- {
-       return NULL;
- }
- void __weak hw_perf_disable(void)             { barrier(); }
- void __weak hw_perf_enable(void)              { barrier(); }
  void __weak perf_event_print_debug(void)      { }
  
 +extern __weak const char *perf_pmu_name(void)
 +{
 +      return "pmu";
 +}
 +
- static DEFINE_PER_CPU(int, perf_disable_count);
+ void perf_pmu_disable(struct pmu *pmu)
+ {
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!(*count)++)
+               pmu->pmu_disable(pmu);
+ }
  
- void perf_disable(void)
+ void perf_pmu_enable(struct pmu *pmu)
  {
-       if (!__get_cpu_var(perf_disable_count)++)
-               hw_perf_disable();
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!--(*count))
+               pmu->pmu_enable(pmu);
  }
  
- void perf_enable(void)
+ static DEFINE_PER_CPU(struct list_head, rotation_list);
+ /*
+  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+  * because they're strictly cpu affine and rotate_start is called with IRQs
+  * disabled, while rotate_context is called from IRQ context.
+  */
+ static void perf_pmu_rotate_start(struct pmu *pmu)
  {
-       if (!--__get_cpu_var(perf_disable_count))
-               hw_perf_enable();
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       struct list_head *head = &__get_cpu_var(rotation_list);
+       WARN_ON(!irqs_disabled());
+       if (list_empty(&cpuctx->rotation_list))
+               list_add(&cpuctx->rotation_list, head);
  }
  
  static void get_ctx(struct perf_event_context *ctx)
@@@ -156,13 -147,13 +152,13 @@@ static u64 primary_event_id(struct perf
   * the context could get moved to another task.
   */
  static struct perf_event_context *
- perf_lock_task_context(struct task_struct *task, unsigned long *flags)
+ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
  {
        struct perf_event_context *ctx;
  
        rcu_read_lock();
-  retry:
-       ctx = rcu_dereference(task->perf_event_ctxp);
+ retry:
+       ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
        if (ctx) {
                /*
                 * If this context is a clone of another, it might
                 * can't get swapped on us any more.
                 */
                raw_spin_lock_irqsave(&ctx->lock, *flags);
-               if (ctx != rcu_dereference(task->perf_event_ctxp)) {
+               if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
                        raw_spin_unlock_irqrestore(&ctx->lock, *flags);
                        goto retry;
                }
   * can't get swapped to another task.  This also increments its
   * reference count so that the context can't get freed.
   */
- static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
+ static struct perf_event_context *
+ perf_pin_task_context(struct task_struct *task, int ctxn)
  {
        struct perf_event_context *ctx;
        unsigned long flags;
  
-       ctx = perf_lock_task_context(task, &flags);
+       ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                ++ctx->pin_count;
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@@ -307,6 -299,8 +304,8 @@@ list_add_event(struct perf_event *event
        }
  
        list_add_rcu(&event->event_entry, &ctx->event_list);
+       if (!ctx->nr_events)
+               perf_pmu_rotate_start(ctx->pmu);
        ctx->nr_events++;
        if (event->attr.inherit_stat)
                ctx->nr_stat++;
@@@ -441,7 -435,7 +440,7 @@@ event_sched_out(struct perf_event *even
                event->state = PERF_EVENT_STATE_OFF;
        }
        event->tstamp_stopped = ctx->time;
-       event->pmu->disable(event);
+       event->pmu->del(event, 0);
        event->oncpu = -1;
  
        if (!is_software_event(event))
@@@ -471,6 -465,12 +470,12 @@@ group_sched_out(struct perf_event *grou
                cpuctx->exclusive = 0;
  }
  
+ static inline struct perf_cpu_context *
+ __get_cpu_context(struct perf_event_context *ctx)
+ {
+       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+ }
  /*
   * Cross CPU call to remove a performance event
   *
   */
  static void __perf_event_remove_from_context(void *info)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        /*
         * If this is a task context, we need to check whether it is
                return;
  
        raw_spin_lock(&ctx->lock);
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level.
-        */
-       perf_disable();
  
        event_sched_out(event, cpuctx, ctx);
  
        list_del_event(event, ctx);
  
-       if (!ctx->task) {
-               /*
-                * Allow more per task events with respect to the
-                * reservation:
-                */
-               cpuctx->max_pertask =
-                       min(perf_max_events - ctx->nr_events,
-                           perf_max_events - perf_reserved_percpu);
-       }
-       perf_enable();
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -577,8 -561,8 +566,8 @@@ retry
  static void __perf_event_disable(void *info)
  {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        /*
         * If this is a per-task event, need to check whether this
@@@ -633,7 -617,7 +622,7 @@@ void perf_event_disable(struct perf_eve
                return;
        }
  
-  retry:
+ retry:
        task_oncpu_function_call(task, __perf_event_disable, event);
  
        raw_spin_lock_irq(&ctx->lock);
@@@ -672,7 -656,7 +661,7 @@@ event_sched_in(struct perf_event *event
         */
        smp_wmb();
  
-       if (event->pmu->enable(event)) {
+       if (event->pmu->add(event, PERF_EF_START)) {
                event->state = PERF_EVENT_STATE_INACTIVE;
                event->oncpu = -1;
                return -EAGAIN;
@@@ -696,22 -680,15 +685,15 @@@ group_sched_in(struct perf_event *group
               struct perf_event_context *ctx)
  {
        struct perf_event *event, *partial_group = NULL;
-       const struct pmu *pmu = group_event->pmu;
-       bool txn = false;
+       struct pmu *pmu = group_event->pmu;
  
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
  
-       /* Check if group transaction availabe */
-       if (pmu->start_txn)
-               txn = true;
-       if (txn)
-               pmu->start_txn(pmu);
+       pmu->start_txn(pmu);
  
        if (event_sched_in(group_event, cpuctx, ctx)) {
-               if (txn)
-                       pmu->cancel_txn(pmu);
+               pmu->cancel_txn(pmu);
                return -EAGAIN;
        }
  
                }
        }
  
-       if (!txn || !pmu->commit_txn(pmu))
+       if (!pmu->commit_txn(pmu))
                return 0;
  
  group_error:
        }
        event_sched_out(group_event, cpuctx, ctx);
  
-       if (txn)
-               pmu->cancel_txn(pmu);
+       pmu->cancel_txn(pmu);
  
        return -EAGAIN;
  }
@@@ -794,10 -770,10 +775,10 @@@ static void add_event_to_ctx(struct per
   */
  static void __perf_install_in_context(void *info)
  {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
  
        /*
        ctx->is_active = 1;
        update_context_time(ctx);
  
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level. NOP for non NMI based events.
-        */
-       perf_disable();
        add_event_to_ctx(event, ctx);
  
        if (event->cpu != -1 && event->cpu != smp_processor_id())
                }
        }
  
-       if (!err && !ctx->task && cpuctx->max_pertask)
-               cpuctx->max_pertask--;
-  unlock:
-       perf_enable();
+ unlock:
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -888,6 -853,8 +858,8 @@@ perf_install_in_context(struct perf_eve
  {
        struct task_struct *task = ctx->task;
  
+       event->ctx = ctx;
        if (!task) {
                /*
                 * Per cpu events are installed via an smp call and
@@@ -936,10 -903,12 +908,12 @@@ static void __perf_event_mark_enabled(s
  
        event->state = PERF_EVENT_STATE_INACTIVE;
        event->tstamp_enabled = ctx->time - event->total_time_enabled;
-       list_for_each_entry(sub, &event->sibling_list, group_entry)
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
                        sub->tstamp_enabled =
                                ctx->time - sub->total_time_enabled;
+               }
+       }
  }
  
  /*
  static void __perf_event_enable(void *info)
  {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
  
        /*
        if (!group_can_go_on(event, cpuctx, 1)) {
                err = -EEXIST;
        } else {
-               perf_disable();
                if (event == leader)
                        err = group_sched_in(event, cpuctx, ctx);
                else
                        err = event_sched_in(event, cpuctx, ctx);
-               perf_enable();
        }
  
        if (err) {
                }
        }
  
-  unlock:
+ unlock:
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -1046,7 -1013,7 +1018,7 @@@ void perf_event_enable(struct perf_even
        if (event->state == PERF_EVENT_STATE_ERROR)
                event->state = PERF_EVENT_STATE_OFF;
  
-  retry:
+ retry:
        raw_spin_unlock_irq(&ctx->lock);
        task_oncpu_function_call(task, __perf_event_enable, event);
  
        if (event->state == PERF_EVENT_STATE_OFF)
                __perf_event_mark_enabled(event, ctx);
  
-  out:
+ out:
        raw_spin_unlock_irq(&ctx->lock);
  }
  
@@@ -1097,26 -1064,26 +1069,26 @@@ static void ctx_sched_out(struct perf_e
        struct perf_event *event;
  
        raw_spin_lock(&ctx->lock);
+       perf_pmu_disable(ctx->pmu);
        ctx->is_active = 0;
        if (likely(!ctx->nr_events))
                goto out;
        update_context_time(ctx);
  
-       perf_disable();
        if (!ctx->nr_active)
-               goto out_enable;
+               goto out;
  
-       if (event_type & EVENT_PINNED)
+       if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
+       }
  
-       if (event_type & EVENT_FLEXIBLE)
+       if (event_type & EVENT_FLEXIBLE) {
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
-  out_enable:
-       perf_enable();
-  out:
+       }
+ out:
+       perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -1214,34 -1181,25 +1186,25 @@@ static void perf_event_sync_stat(struc
        }
  }
  
- /*
-  * Called from scheduler to remove the events of the current task,
-  * with interrupts disabled.
-  *
-  * We stop each event and update the event value in event->count.
-  *
-  * This does not protect us against NMI, but disable()
-  * sets the disabled bit in the control field of event _before_
-  * accessing the event control register. If a NMI hits, then it will
-  * not restart the event.
-  */
- void perf_event_task_sched_out(struct task_struct *task,
-                                struct task_struct *next)
+ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+                                 struct task_struct *next)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
        struct perf_event_context *next_ctx;
        struct perf_event_context *parent;
+       struct perf_cpu_context *cpuctx;
        int do_switch = 1;
  
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+       if (likely(!ctx))
+               return;
  
-       if (likely(!ctx || !cpuctx->task_ctx))
+       cpuctx = __get_cpu_context(ctx);
+       if (!cpuctx->task_ctx)
                return;
  
        rcu_read_lock();
        parent = rcu_dereference(ctx->parent_ctx);
-       next_ctx = next->perf_event_ctxp;
+       next_ctx = next->perf_event_ctxp[ctxn];
        if (parent && next_ctx &&
            rcu_dereference(next_ctx->parent_ctx) == parent) {
                /*
                         * XXX do we need a memory barrier of sorts
                         * wrt to rcu_dereference() of perf_event_ctxp
                         */
-                       task->perf_event_ctxp = next_ctx;
-                       next->perf_event_ctxp = ctx;
+                       task->perf_event_ctxp[ctxn] = next_ctx;
+                       next->perf_event_ctxp[ctxn] = ctx;
                        ctx->task = next;
                        next_ctx->task = task;
                        do_switch = 0;
        }
  }
  
+ #define for_each_task_context_nr(ctxn)                                        \
+       for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
+ /*
+  * Called from scheduler to remove the events of the current task,
+  * with interrupts disabled.
+  *
+  * We stop each event and update the event value in event->count.
+  *
+  * This does not protect us against NMI, but disable()
+  * sets the disabled bit in the control field of event _before_
+  * accessing the event control register. If a NMI hits, then it will
+  * not restart the event.
+  */
+ void perf_event_task_sched_out(struct task_struct *task,
+                              struct task_struct *next)
+ {
+       int ctxn;
+       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+       for_each_task_context_nr(ctxn)
+               perf_event_context_sched_out(task, ctxn, next);
+ }
  static void task_ctx_sched_out(struct perf_event_context *ctx,
                               enum event_type_t event_type)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        if (!cpuctx->task_ctx)
                return;
@@@ -1355,9 -1338,10 +1343,10 @@@ ctx_flexible_sched_in(struct perf_event
                if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
  
-               if (group_can_go_on(event, cpuctx, can_add_hw))
+               if (group_can_go_on(event, cpuctx, can_add_hw)) {
                        if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
+               }
        }
  }
  
@@@ -1373,8 -1357,6 +1362,6 @@@ ctx_sched_in(struct perf_event_context 
  
        ctx->timestamp = perf_clock();
  
-       perf_disable();
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
        if (event_type & EVENT_FLEXIBLE)
                ctx_flexible_sched_in(ctx, cpuctx);
  
-       perf_enable();
-  out:
+ out:
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -1399,43 -1380,28 +1385,28 @@@ static void cpu_ctx_sched_in(struct per
        ctx_sched_in(ctx, cpuctx, event_type);
  }
  
- static void task_ctx_sched_in(struct task_struct *task,
+ static void task_ctx_sched_in(struct perf_event_context *ctx,
                              enum event_type_t event_type)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_cpu_context *cpuctx;
  
-       if (likely(!ctx))
-               return;
+               cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
        ctx_sched_in(ctx, cpuctx, event_type);
        cpuctx->task_ctx = ctx;
  }
- /*
-  * Called from scheduler to add the events of the current task
-  * with interrupts disabled.
-  *
-  * We restore the event value and then enable it.
-  *
-  * This does not protect us against NMI, but enable()
-  * sets the enabled bit in the control field of event _before_
-  * accessing the event control register. If a NMI hits, then it will
-  * keep the event running.
-  */
- void perf_event_task_sched_in(struct task_struct *task)
- {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
  
-       if (likely(!ctx))
-               return;
+ void perf_event_context_sched_in(struct perf_event_context *ctx)
+ {
+       struct perf_cpu_context *cpuctx;
  
+       cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
  
-       perf_disable();
+       perf_pmu_disable(ctx->pmu);
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
  
        cpuctx->task_ctx = ctx;
  
-       perf_enable();
+       /*
+        * Since these rotations are per-cpu, we need to ensure the
+        * cpu-context we got scheduled on is actually rotating.
+        */
+       perf_pmu_rotate_start(ctx->pmu);
+       perf_pmu_enable(ctx->pmu);
+ }
+ /*
+  * Called from scheduler to add the events of the current task
+  * with interrupts disabled.
+  *
+  * We restore the event value and then enable it.
+  *
+  * This does not protect us against NMI, but enable()
+  * sets the enabled bit in the control field of event _before_
+  * accessing the event control register. If a NMI hits, then it will
+  * keep the event running.
+  */
+ void perf_event_task_sched_in(struct task_struct *task)
+ {
+       struct perf_event_context *ctx;
+       int ctxn;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (likely(!ctx))
+                       continue;
+               perf_event_context_sched_in(ctx);
+       }
  }
  
  #define MAX_INTERRUPTS (~0ULL)
@@@ -1529,22 -1525,6 +1530,6 @@@ do {                                   
        return div64_u64(dividend, divisor);
  }
  
- static void perf_event_stop(struct perf_event *event)
- {
-       if (!event->pmu->stop)
-               return event->pmu->disable(event);
-       return event->pmu->stop(event);
- }
- static int perf_event_start(struct perf_event *event)
- {
-       if (!event->pmu->start)
-               return event->pmu->enable(event);
-       return event->pmu->start(event);
- }
  static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
  {
        struct hw_perf_event *hwc = &event->hw;
        hwc->sample_period = sample_period;
  
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               perf_disable();
-               perf_event_stop(event);
+               event->pmu->stop(event, PERF_EF_UPDATE);
                local64_set(&hwc->period_left, 0);
-               perf_event_start(event);
-               perf_enable();
+               event->pmu->start(event, PERF_EF_RELOAD);
        }
  }
  
- static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
+ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
  {
        struct perf_event *event;
        struct hw_perf_event *hwc;
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
-                       perf_disable();
-                       event->pmu->unthrottle(event);
-                       perf_enable();
+                       event->pmu->start(event, 0);
                }
  
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
  
-               perf_disable();
                event->pmu->read(event);
                now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
                hwc->freq_count_stamp = now;
  
                if (delta > 0)
-                       perf_adjust_period(event, TICK_NSEC, delta);
-               perf_enable();
+                       perf_adjust_period(event, period, delta);
        }
        raw_spin_unlock(&ctx->lock);
  }
@@@ -1631,32 -1605,38 +1610,38 @@@ static void rotate_ctx(struct perf_even
        raw_spin_unlock(&ctx->lock);
  }
  
- void perf_event_task_tick(struct task_struct *curr)
+ /*
+  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+  * because they're strictly cpu affine and rotate_start is called with IRQs
+  * disabled, while rotate_context is called from IRQ context.
+  */
+ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
  {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       int rotate = 0;
-       if (!atomic_read(&nr_events))
-               return;
+       u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
+       struct perf_event_context *ctx = NULL;
+       int rotate = 0, remove = 1;
  
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-       if (cpuctx->ctx.nr_events &&
-           cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
-               rotate = 1;
+       if (cpuctx->ctx.nr_events) {
+               remove = 0;
+               if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+                       rotate = 1;
+       }
  
-       ctx = curr->perf_event_ctxp;
-       if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
-               rotate = 1;
+       ctx = cpuctx->task_ctx;
+       if (ctx && ctx->nr_events) {
+               remove = 0;
+               if (ctx->nr_events != ctx->nr_active)
+                       rotate = 1;
+       }
  
-       perf_ctx_adjust_freq(&cpuctx->ctx);
+       perf_pmu_disable(cpuctx->ctx.pmu);
+       perf_ctx_adjust_freq(&cpuctx->ctx, interval);
        if (ctx)
-               perf_ctx_adjust_freq(ctx);
+               perf_ctx_adjust_freq(ctx, interval);
  
        if (!rotate)
-               return;
+               goto done;
  
-       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
  
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-       perf_enable();
+               task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
+ done:
+       if (remove)
+               list_del_init(&cpuctx->rotation_list);
+       perf_pmu_enable(cpuctx->ctx.pmu);
+ }
+ void perf_event_task_tick(void)
+ {
+       struct list_head *head = &__get_cpu_var(rotation_list);
+       struct perf_cpu_context *cpuctx, *tmp;
+       WARN_ON(!irqs_disabled());
+       list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+               if (cpuctx->jiffies_interval == 1 ||
+                               !(jiffies % cpuctx->jiffies_interval))
+                       perf_rotate_context(cpuctx);
+       }
  }
  
  static int event_enable_on_exec(struct perf_event *event,
   * Enable all of a task's events that have been marked enable-on-exec.
   * This expects task == current.
   */
- static void perf_event_enable_on_exec(struct task_struct *task)
+ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
  {
-       struct perf_event_context *ctx;
        struct perf_event *event;
        unsigned long flags;
        int enabled = 0;
        int ret;
  
        local_irq_save(flags);
        if (!ctx || !ctx->nr_events)
                goto out;
  
-       __perf_event_task_sched_out(ctx);
+       task_ctx_sched_out(ctx, EVENT_ALL);
  
        raw_spin_lock(&ctx->lock);
  
  
        raw_spin_unlock(&ctx->lock);
  
-       perf_event_task_sched_in(task);
-  out:
+       perf_event_context_sched_in(ctx);
+ out:
        local_irq_restore(flags);
  }
  
   */
  static void __perf_event_read(void *info)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        /*
         * If this is a task context, we need to check whether it is
@@@ -1787,11 -1784,219 +1789,219 @@@ static u64 perf_event_read(struct perf_
  }
  
  /*
-  * Initialize the perf_event context in a task_struct:
+  * Callchain support
   */
+ struct callchain_cpus_entries {
+       struct rcu_head                 rcu_head;
+       struct perf_callchain_entry     *cpu_entries[0];
+ };
+ static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+ static atomic_t nr_callchain_events;
+ static DEFINE_MUTEX(callchain_mutex);
+ struct callchain_cpus_entries *callchain_cpus_entries;
+ __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs)
+ {
+ }
+ __weak void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs)
+ {
+ }
+ static void release_callchain_buffers_rcu(struct rcu_head *head)
+ {
+       struct callchain_cpus_entries *entries;
+       int cpu;
+       entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+ }
+ static void release_callchain_buffers(void)
+ {
+       struct callchain_cpus_entries *entries;
+       entries = callchain_cpus_entries;
+       rcu_assign_pointer(callchain_cpus_entries, NULL);
+       call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+ }
+ static int alloc_callchain_buffers(void)
+ {
+       int cpu;
+       int size;
+       struct callchain_cpus_entries *entries;
+       /*
+        * We can't use the percpu allocation API for data that can be
+        * accessed from NMI. Use a temporary manual per cpu allocation
+        * until that gets sorted out.
+        */
+       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
+               num_possible_cpus();
+       entries = kzalloc(size, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+       size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+       for_each_possible_cpu(cpu) {
+               entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+                                                        cpu_to_node(cpu));
+               if (!entries->cpu_entries[cpu])
+                       goto fail;
+       }
+       rcu_assign_pointer(callchain_cpus_entries, entries);
+       return 0;
+ fail:
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+       return -ENOMEM;
+ }
+ static int get_callchain_buffers(void)
+ {
+       int err = 0;
+       int count;
+       mutex_lock(&callchain_mutex);
+       count = atomic_inc_return(&nr_callchain_events);
+       if (WARN_ON_ONCE(count < 1)) {
+               err = -EINVAL;
+               goto exit;
+       }
+       if (count > 1) {
+               /* If the allocation failed, give up */
+               if (!callchain_cpus_entries)
+                       err = -ENOMEM;
+               goto exit;
+       }
+       err = alloc_callchain_buffers();
+       if (err)
+               release_callchain_buffers();
+ exit:
+       mutex_unlock(&callchain_mutex);
+       return err;
+ }
+ static void put_callchain_buffers(void)
+ {
+       if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+               release_callchain_buffers();
+               mutex_unlock(&callchain_mutex);
+       }
+ }
+ static int get_recursion_context(int *recursion)
+ {
+       int rctx;
+       if (in_nmi())
+               rctx = 3;
+       else if (in_irq())
+               rctx = 2;
+       else if (in_softirq())
+               rctx = 1;
+       else
+               rctx = 0;
+       if (recursion[rctx])
+               return -1;
+       recursion[rctx]++;
+       barrier();
+       return rctx;
+ }
+ static inline void put_recursion_context(int *recursion, int rctx)
+ {
+       barrier();
+       recursion[rctx]--;
+ }
+ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+ {
+       int cpu;
+       struct callchain_cpus_entries *entries;
+       *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+       if (*rctx == -1)
+               return NULL;
+       entries = rcu_dereference(callchain_cpus_entries);
+       if (!entries)
+               return NULL;
+       cpu = smp_processor_id();
+       return &entries->cpu_entries[cpu][*rctx];
+ }
  static void
- __perf_event_init_context(struct perf_event_context *ctx,
-                           struct task_struct *task)
+ put_callchain_entry(int rctx)
+ {
+       put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+ }
+ static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+ {
+       int rctx;
+       struct perf_callchain_entry *entry;
+       entry = get_callchain_entry(&rctx);
+       if (rctx == -1)
+               return NULL;
+       if (!entry)
+               goto exit_put;
+       entry->nr = 0;
+       if (!user_mode(regs)) {
+               perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+               perf_callchain_kernel(entry, regs);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+       if (regs) {
+               perf_callchain_store(entry, PERF_CONTEXT_USER);
+               perf_callchain_user(entry, regs);
+       }
+ exit_put:
+       put_callchain_entry(rctx);
+       return entry;
+ }
+ /*
+  * Initialize the perf_event context in a task_struct:
+  */
+ static void __perf_event_init_context(struct perf_event_context *ctx)
  {
        raw_spin_lock_init(&ctx->lock);
        mutex_init(&ctx->mutex);
        INIT_LIST_HEAD(&ctx->flexible_groups);
        INIT_LIST_HEAD(&ctx->event_list);
        atomic_set(&ctx->refcount, 1);
-       ctx->task = task;
  }
  
- static struct perf_event_context *find_get_context(pid_t pid, int cpu)
+ static struct perf_event_context *
+ alloc_perf_context(struct pmu *pmu, struct task_struct *task)
  {
        struct perf_event_context *ctx;
-       struct perf_cpu_context *cpuctx;
-       struct task_struct *task;
-       unsigned long flags;
-       int err;
-       if (pid == -1 && cpu != -1) {
-               /* Must be root to operate on a CPU event: */
-               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
-                       return ERR_PTR(-EACCES);
  
-               if (cpu < 0 || cpu >= nr_cpumask_bits)
-                       return ERR_PTR(-EINVAL);
+       ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
  
-               /*
-                * We could be clever and allow to attach a event to an
-                * offline CPU and activate it when the CPU comes up, but
-                * that's for later.
-                */
-               if (!cpu_online(cpu))
-                       return ERR_PTR(-ENODEV);
+       __perf_event_init_context(ctx);
+       if (task) {
+               ctx->task = task;
+               get_task_struct(task);
+       }
+       ctx->pmu = pmu;
  
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               ctx = &cpuctx->ctx;
-               get_ctx(ctx);
+       return ctx;
+ }
  
-               return ctx;
-       }
+ static struct task_struct *
+ find_lively_task_by_vpid(pid_t vpid)
+ {
+       struct task_struct *task;
+       int err;
  
        rcu_read_lock();
-       if (!pid)
+       if (!vpid)
                task = current;
        else
-               task = find_task_by_vpid(pid);
+               task = find_task_by_vpid(vpid);
        if (task)
                get_task_struct(task);
        rcu_read_unlock();
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
                goto errout;
  
-  retry:
-       ctx = perf_lock_task_context(task, &flags);
+       return task;
+ errout:
+       put_task_struct(task);
+       return ERR_PTR(err);
+ }
+ static struct perf_event_context *
+ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+ {
+       struct perf_event_context *ctx;
+       struct perf_cpu_context *cpuctx;
+       unsigned long flags;
+       int ctxn, err;
+       if (!task && cpu != -1) {
+               /* Must be root to operate on a CPU event: */
+               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+                       return ERR_PTR(-EACCES);
+               if (cpu < 0 || cpu >= nr_cpumask_bits)
+                       return ERR_PTR(-EINVAL);
+               /*
+                * We could be clever and allow to attach a event to an
+                * offline CPU and activate it when the CPU comes up, but
+                * that's for later.
+                */
+               if (!cpu_online(cpu))
+                       return ERR_PTR(-ENODEV);
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               ctx = &cpuctx->ctx;
+               get_ctx(ctx);
+               return ctx;
+       }
+       err = -EINVAL;
+       ctxn = pmu->task_ctx_nr;
+       if (ctxn < 0)
+               goto errout;
+ retry:
+       ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                unclone_ctx(ctx);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
  
        if (!ctx) {
-               ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+               ctx = alloc_perf_context(pmu, task);
                err = -ENOMEM;
                if (!ctx)
                        goto errout;
-               __perf_event_init_context(ctx, task);
                get_ctx(ctx);
-               if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
+               if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
                        /*
                         * We raced with some other task; use
                         * the context they set.
                         */
+                       put_task_struct(task);
                        kfree(ctx);
                        goto retry;
                }
-               get_task_struct(task);
        }
  
        put_task_struct(task);
        return ctx;
  
-  errout:
+ errout:
        put_task_struct(task);
        return ERR_PTR(err);
  }
@@@ -1918,6 -2160,8 +2165,8 @@@ static void free_event(struct perf_even
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
        }
  
        if (event->buffer) {
        if (event->destroy)
                event->destroy(event);
  
-       put_ctx(event->ctx);
+       if (event->ctx)
+               put_ctx(event->ctx);
        call_rcu(&event->rcu_head, free_event_rcu);
  }
  
@@@ -2349,6 -2595,9 +2600,9 @@@ int perf_event_task_disable(void
  
  static int perf_event_index(struct perf_event *event)
  {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return 0;
  
@@@ -2961,16 -3210,6 +3215,6 @@@ void perf_event_do_pending(void
  }
  
  /*
-  * Callchain support -- arch specific
-  */
- __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
- {
-       return NULL;
- }
- /*
   * We assume there is only KVM supporting the callbacks.
   * Later on, we might change it to a list if there is
   * another virtualization implementation supporting the callbacks.
@@@ -3076,7 -3315,7 +3320,7 @@@ again
        if (handle->wakeup != local_read(&buffer->wakeup))
                perf_output_wakeup(handle);
  
-  out:
+ out:
        preempt_enable();
  }
  
@@@ -3464,14 -3703,20 +3708,20 @@@ static void perf_event_output(struct pe
        struct perf_output_handle handle;
        struct perf_event_header header;
  
+       /* protect the callchain buffers */
+       rcu_read_lock();
        perf_prepare_sample(&header, data, event, regs);
  
        if (perf_output_begin(&handle, event, header.size, nmi, 1))
-               return;
+               goto exit;
  
        perf_output_sample(&handle, &header, data, event);
  
        perf_output_end(&handle);
+ exit:
+       rcu_read_unlock();
  }
  
  /*
@@@ -3585,16 -3830,27 +3835,27 @@@ static void perf_event_task_ctx(struct 
  static void perf_event_task_event(struct perf_task_event *task_event)
  {
        struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx = task_event->task_ctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int ctxn;
  
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_task_ctx(&cpuctx->ctx, task_event);
-       if (!ctx)
-               ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_task_ctx(ctx, task_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_task_ctx(&cpuctx->ctx, task_event);
+               ctx = task_event->task_ctx;
+               if (!ctx) {
+                       ctxn = pmu->task_ctx_nr;
+                       if (ctxn < 0)
+                               goto next;
+                       ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               }
+               if (ctx)
+                       perf_event_task_ctx(ctx, task_event);
+ next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
  }
  
@@@ -3699,8 -3955,10 +3960,10 @@@ static void perf_event_comm_event(struc
  {
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
-       unsigned int size;
        char comm[TASK_COMM_LEN];
+       unsigned int size;
+       struct pmu *pmu;
+       int ctxn;
  
        memset(comm, 0, sizeof(comm));
        strlcpy(comm, comm_event->task->comm, sizeof(comm));
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
  
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_comm_ctx(&cpuctx->ctx, comm_event);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_comm_ctx(ctx, comm_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx)
+                       perf_event_comm_ctx(ctx, comm_event);
+ next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
  }
  
  void perf_event_comm(struct task_struct *task)
  {
        struct perf_comm_event comm_event;
+       struct perf_event_context *ctx;
+       int ctxn;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
  
-       if (task->perf_event_ctxp)
-               perf_event_enable_on_exec(task);
+               perf_event_enable_on_exec(ctx);
+       }
  
        if (!atomic_read(&nr_comm_events))
                return;
@@@ -3828,6 -4101,8 +4106,8 @@@ static void perf_event_mmap_event(struc
        char tmp[16];
        char *buf = NULL;
        const char *name;
+       struct pmu *pmu;
+       int ctxn;
  
        memset(tmp, 0, sizeof(tmp));
  
@@@ -3880,12 -4155,23 +4160,23 @@@ got_name
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx) {
+                       perf_event_mmap_ctx(ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+               }
+ next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
  
        kfree(buf);
@@@ -3967,8 -4253,6 +4258,6 @@@ static int __perf_event_overflow(struc
        struct hw_perf_event *hwc = &event->hw;
        int ret = 0;
  
-       throttle = (throttle && event->pmu->unthrottle != NULL);
        if (!throttle) {
                hwc->interrupts++;
        } else {
@@@ -4036,6 -4320,17 +4325,17 @@@ int perf_event_overflow(struct perf_eve
   * Generic software event infrastructure
   */
  
+ struct swevent_htable {
+       struct swevent_hlist            *swevent_hlist;
+       struct mutex                    hlist_mutex;
+       int                             hlist_refcount;
+       /* Recursion avoidance in each contexts */
+       int                             recursion[PERF_NR_CONTEXTS];
+ };
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
  /*
   * We directly increment event->count and keep a second value in
   * event->hw.period_left to count intervals. This period event
@@@ -4093,7 -4388,7 +4393,7 @@@ static void perf_swevent_overflow(struc
        }
  }
  
- static void perf_swevent_add(struct perf_event *event, u64 nr,
+ static void perf_swevent_event(struct perf_event *event, u64 nr,
                               int nmi, struct perf_sample_data *data,
                               struct pt_regs *regs)
  {
  static int perf_exclude_event(struct perf_event *event,
                              struct pt_regs *regs)
  {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
        if (regs) {
                if (event->attr.exclude_user && user_mode(regs))
                        return 1;
@@@ -4165,11 -4463,11 +4468,11 @@@ __find_swevent_head(struct swevent_hlis
  
  /* For the read side: events when they trigger */
  static inline struct hlist_head *
- find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
+ find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
  {
        struct swevent_hlist *hlist;
  
-       hlist = rcu_dereference(ctx->swevent_hlist);
+       hlist = rcu_dereference(swhash->swevent_hlist);
        if (!hlist)
                return NULL;
  
  
  /* For the event head insertion and removal in the hlist */
  static inline struct hlist_head *
- find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
+ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
  {
        struct swevent_hlist *hlist;
        u32 event_id = event->attr.config;
         * and release. Which makes the protected version suitable here.
         * The context lock guarantees that.
         */
-       hlist = rcu_dereference_protected(ctx->swevent_hlist,
+       hlist = rcu_dereference_protected(swhash->swevent_hlist,
                                          lockdep_is_held(&event->ctx->lock));
        if (!hlist)
                return NULL;
@@@ -4202,23 -4500,19 +4505,19 @@@ static void do_perf_sw_event(enum perf_
                                    struct perf_sample_data *data,
                                    struct pt_regs *regs)
  {
-       struct perf_cpu_context *cpuctx;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct perf_event *event;
        struct hlist_node *node;
        struct hlist_head *head;
  
-       cpuctx = &__get_cpu_var(perf_cpu_context);
        rcu_read_lock();
-       head = find_swevent_head_rcu(cpuctx, type, event_id);
+       head = find_swevent_head_rcu(swhash, type, event_id);
        if (!head)
                goto end;
  
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_swevent_match(event, type, event_id, data, regs))
-                       perf_swevent_add(event, nr, nmi, data, regs);
+                       perf_swevent_event(event, nr, nmi, data, regs);
        }
  end:
        rcu_read_unlock();
  
  int perf_swevent_get_recursion_context(void)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       int rctx;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  
-       if (in_nmi())
-               rctx = 3;
-       else if (in_irq())
-               rctx = 2;
-       else if (in_softirq())
-               rctx = 1;
-       else
-               rctx = 0;
-       if (cpuctx->recursion[rctx])
-               return -1;
-       cpuctx->recursion[rctx]++;
-       barrier();
-       return rctx;
+       return get_recursion_context(swhash->recursion);
  }
  EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  
  void inline perf_swevent_put_recursion_context(int rctx)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       barrier();
-       cpuctx->recursion[rctx]--;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+       put_recursion_context(swhash->recursion, rctx);
  }
  
  void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@@ -4278,20 -4556,20 +4561,20 @@@ static void perf_swevent_read(struct pe
  {
  }
  
- static int perf_swevent_enable(struct perf_event *event)
+ static int perf_swevent_add(struct perf_event *event, int flags)
  {
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct hw_perf_event *hwc = &event->hw;
-       struct perf_cpu_context *cpuctx;
        struct hlist_head *head;
  
-       cpuctx = &__get_cpu_var(perf_cpu_context);
        if (hwc->sample_period) {
                hwc->last_period = hwc->sample_period;
                perf_swevent_set_period(event);
        }
  
-       head = find_swevent_head(cpuctx, event);
+       hwc->state = !(flags & PERF_EF_START);
+       head = find_swevent_head(swhash, event);
        if (WARN_ON_ONCE(!head))
                return -EINVAL;
  
        return 0;
  }
  
- static void perf_swevent_disable(struct perf_event *event)
+ static void perf_swevent_del(struct perf_event *event, int flags)
  {
        hlist_del_rcu(&event->hlist_entry);
  }
  
- static void perf_swevent_void(struct perf_event *event)
+ static void perf_swevent_start(struct perf_event *event, int flags)
  {
+       event->hw.state = 0;
  }
  
- static int perf_swevent_int(struct perf_event *event)
+ static void perf_swevent_stop(struct perf_event *event, int flags)
  {
-       return 0;
- }
- static const struct pmu perf_ops_generic = {
-       .enable         = perf_swevent_enable,
-       .disable        = perf_swevent_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
-       .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void, /* hwc->interrupts already reset */
- };
- /*
-  * hrtimer based swevent callback
-  */
- static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
- {
-       enum hrtimer_restart ret = HRTIMER_RESTART;
-       struct perf_sample_data data;
-       struct pt_regs *regs;
-       struct perf_event *event;
-       u64 period;
-       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
-       event->pmu->read(event);
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
-       regs = get_irq_regs();
-       if (regs && !perf_exclude_event(event, regs)) {
-               if (!(event->attr.exclude_idle && current->pid == 0))
-                       if (perf_event_overflow(event, 0, &data, regs))
-                               ret = HRTIMER_NORESTART;
-       }
-       period = max_t(u64, 10000, event->hw.sample_period);
-       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
-       return ret;
+       event->hw.state = PERF_HES_STOPPED;
  }
  
- static void perf_swevent_start_hrtimer(struct perf_event *event)
- {
-       struct hw_perf_event *hwc = &event->hw;
-       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hwc->hrtimer.function = perf_swevent_hrtimer;
-       if (hwc->sample_period) {
-               u64 period;
-               if (hwc->remaining) {
-                       if (hwc->remaining < 0)
-                               period = 10000;
-                       else
-                               period = hwc->remaining;
-                       hwc->remaining = 0;
-               } else {
-                       period = max_t(u64, 10000, hwc->sample_period);
-               }
-               __hrtimer_start_range_ns(&hwc->hrtimer,
-                               ns_to_ktime(period), 0,
-                               HRTIMER_MODE_REL, 0);
-       }
- }
- static void perf_swevent_cancel_hrtimer(struct perf_event *event)
- {
-       struct hw_perf_event *hwc = &event->hw;
-       if (hwc->sample_period) {
-               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
-               hwc->remaining = ktime_to_ns(remaining);
-               hrtimer_cancel(&hwc->hrtimer);
-       }
- }
- /*
-  * Software event: cpu wall time clock
-  */
- static void cpu_clock_perf_event_update(struct perf_event *event)
- {
-       int cpu = raw_smp_processor_id();
-       s64 prev;
-       u64 now;
-       now = cpu_clock(cpu);
-       prev = local64_xchg(&event->hw.prev_count, now);
-       local64_add(now - prev, &event->count);
- }
- static int cpu_clock_perf_event_enable(struct perf_event *event)
- {
-       struct hw_perf_event *hwc = &event->hw;
-       int cpu = raw_smp_processor_id();
-       local64_set(&hwc->prev_count, cpu_clock(cpu));
-       perf_swevent_start_hrtimer(event);
-       return 0;
- }
- static void cpu_clock_perf_event_disable(struct perf_event *event)
- {
-       perf_swevent_cancel_hrtimer(event);
-       cpu_clock_perf_event_update(event);
- }
- static void cpu_clock_perf_event_read(struct perf_event *event)
- {
-       cpu_clock_perf_event_update(event);
- }
- static const struct pmu perf_ops_cpu_clock = {
-       .enable         = cpu_clock_perf_event_enable,
-       .disable        = cpu_clock_perf_event_disable,
-       .read           = cpu_clock_perf_event_read,
- };
- /*
-  * Software event: task time clock
-  */
- static void task_clock_perf_event_update(struct perf_event *event, u64 now)
- {
-       u64 prev;
-       s64 delta;
-       prev = local64_xchg(&event->hw.prev_count, now);
-       delta = now - prev;
-       local64_add(delta, &event->count);
- }
- static int task_clock_perf_event_enable(struct perf_event *event)
- {
-       struct hw_perf_event *hwc = &event->hw;
-       u64 now;
-       now = event->ctx->time;
-       local64_set(&hwc->prev_count, now);
-       perf_swevent_start_hrtimer(event);
-       return 0;
- }
- static void task_clock_perf_event_disable(struct perf_event *event)
- {
-       perf_swevent_cancel_hrtimer(event);
-       task_clock_perf_event_update(event, event->ctx->time);
- }
- static void task_clock_perf_event_read(struct perf_event *event)
- {
-       u64 time;
-       if (!in_nmi()) {
-               update_context_time(event->ctx);
-               time = event->ctx->time;
-       } else {
-               u64 now = perf_clock();
-               u64 delta = now - event->ctx->timestamp;
-               time = event->ctx->time + delta;
-       }
-       task_clock_perf_event_update(event, time);
- }
- static const struct pmu perf_ops_task_clock = {
-       .enable         = task_clock_perf_event_enable,
-       .disable        = task_clock_perf_event_disable,
-       .read           = task_clock_perf_event_read,
- };
  /* Deref the hlist from the update side */
  static inline struct swevent_hlist *
- swevent_hlist_deref(struct perf_cpu_context *cpuctx)
+ swevent_hlist_deref(struct swevent_htable *swhash)
  {
-       return rcu_dereference_protected(cpuctx->swevent_hlist,
-                                        lockdep_is_held(&cpuctx->hlist_mutex));
+       return rcu_dereference_protected(swhash->swevent_hlist,
+                                        lockdep_is_held(&swhash->hlist_mutex));
  }
  
  static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
        kfree(hlist);
  }
  
- static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
+ static void swevent_hlist_release(struct swevent_htable *swhash)
  {
-       struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
+       struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
  
        if (!hlist)
                return;
  
-       rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
+       rcu_assign_pointer(swhash->swevent_hlist, NULL);
        call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
  }
  
  static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
  {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
  
-       if (!--cpuctx->hlist_refcount)
-               swevent_hlist_release(cpuctx);
+       if (!--swhash->hlist_refcount)
+               swevent_hlist_release(swhash);
  
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
  }
  
  static void swevent_hlist_put(struct perf_event *event)
  
  static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
  {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
        int err = 0;
  
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
  
-       if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
+       if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
                struct swevent_hlist *hlist;
  
                hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
                        err = -ENOMEM;
                        goto exit;
                }
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       cpuctx->hlist_refcount++;
-  exit:
-       mutex_unlock(&cpuctx->hlist_mutex);
+       swhash->hlist_refcount++;
+ exit:
+       mutex_unlock(&swhash->hlist_mutex);
  
        return err;
  }
@@@ -4585,7 -4688,7 +4693,7 @@@ static int swevent_hlist_get(struct per
        put_online_cpus();
  
        return 0;
-  fail:
+ fail:
        for_each_possible_cpu(cpu) {
                if (cpu == failed_cpu)
                        break;
        return err;
  }
  
- #ifdef CONFIG_EVENT_TRACING
+ atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+ static void sw_perf_event_destroy(struct perf_event *event)
+ {
+       u64 event_id = event->attr.config;
+       WARN_ON(event->parent);
+       atomic_dec(&perf_swevent_enabled[event_id]);
+       swevent_hlist_put(event);
+ }
+ static int perf_swevent_init(struct perf_event *event)
+ {
+       int event_id = event->attr.config;
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+       switch (event_id) {
+       case PERF_COUNT_SW_CPU_CLOCK:
+       case PERF_COUNT_SW_TASK_CLOCK:
+               return -ENOENT;
+       default:
+               break;
+       }
+       if (event_id > PERF_COUNT_SW_MAX)
+               return -ENOENT;
+       if (!event->parent) {
+               int err;
+               err = swevent_hlist_get(event);
+               if (err)
+                       return err;
+               atomic_inc(&perf_swevent_enabled[event_id]);
+               event->destroy = sw_perf_event_destroy;
+       }
+       return 0;
+ }
+ static struct pmu perf_swevent = {
+       .task_ctx_nr    = perf_sw_context,
  
- static const struct pmu perf_ops_tracepoint = {
-       .enable         = perf_trace_enable,
-       .disable        = perf_trace_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
+       .event_init     = perf_swevent_init,
+       .add            = perf_swevent_add,
+       .del            = perf_swevent_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void,
  };
  
+ #ifdef CONFIG_EVENT_TRACING
  static int perf_tp_filter_match(struct perf_event *event,
                                struct perf_sample_data *data)
  {
@@@ -4650,7 -4800,7 +4805,7 @@@ void perf_tp_event(u64 addr, u64 count
  
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
-                       perf_swevent_add(event, count, 1, &data, regs);
+                       perf_swevent_event(event, count, 1, &data, regs);
        }
  
        perf_swevent_put_recursion_context(rctx);
@@@ -4662,10 -4812,13 +4817,13 @@@ static void tp_perf_event_destroy(struc
        perf_trace_destroy(event);
  }
  
- static const struct pmu *tp_perf_event_init(struct perf_event *event)
+ static int perf_tp_event_init(struct perf_event *event)
  {
        int err;
  
+       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+               return -ENOENT;
        /*
         * Raw tracepoint data is a severe data leak, only allow root to
         * have these.
        if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
                        perf_paranoid_tracepoint_raw() &&
                        !capable(CAP_SYS_ADMIN))
-               return ERR_PTR(-EPERM);
+               return -EPERM;
  
        err = perf_trace_init(event);
        if (err)
-               return NULL;
+               return err;
  
        event->destroy = tp_perf_event_destroy;
  
-       return &perf_ops_tracepoint;
+       return 0;
+ }
+ static struct pmu perf_tracepoint = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = perf_tp_event_init,
+       .add            = perf_trace_add,
+       .del            = perf_trace_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
+       .read           = perf_swevent_read,
+ };
+ static inline void perf_tp_register(void)
+ {
+       perf_pmu_register(&perf_tracepoint);
  }
  
  static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@@ -4709,9 -4878,8 +4883,8 @@@ static void perf_event_free_filter(stru
  
  #else
  
- static const struct pmu *tp_perf_event_init(struct perf_event *event)
+ static inline void perf_tp_register(void)
  {
-       return NULL;
  }
  
  static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@@ -4726,105 -4894,389 +4899,389 @@@ static void perf_event_free_filter(stru
  #endif /* CONFIG_EVENT_TRACING */
  
  #ifdef CONFIG_HAVE_HW_BREAKPOINT
- static void bp_perf_event_destroy(struct perf_event *event)
+ void perf_bp_event(struct perf_event *bp, void *data)
  {
-       release_bp_slot(event);
+       struct perf_sample_data sample;
+       struct pt_regs *regs = data;
+       perf_sample_data_init(&sample, bp->attr.bp_addr);
+       if (!bp->hw.state && !perf_exclude_event(bp, regs))
+               perf_swevent_event(bp, 1, 1, &sample, regs);
  }
+ #endif
  
- static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+ /*
+  * hrtimer based swevent callback
+  */
+ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
  {
-       int err;
+       enum hrtimer_restart ret = HRTIMER_RESTART;
+       struct perf_sample_data data;
+       struct pt_regs *regs;
+       struct perf_event *event;
+       u64 period;
  
-       err = register_perf_hw_breakpoint(bp);
-       if (err)
-               return ERR_PTR(err);
+       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+       event->pmu->read(event);
  
-       bp->destroy = bp_perf_event_destroy;
+       perf_sample_data_init(&data, 0);
+       data.period = event->hw.last_period;
+       regs = get_irq_regs();
+       if (regs && !perf_exclude_event(event, regs)) {
+               if (!(event->attr.exclude_idle && current->pid == 0))
+                       if (perf_event_overflow(event, 0, &data, regs))
+                               ret = HRTIMER_NORESTART;
+       }
  
-       return &perf_ops_bp;
+       period = max_t(u64, 10000, event->hw.sample_period);
+       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
+       return ret;
  }
  
- void perf_bp_event(struct perf_event *bp, void *data)
+ static void perf_swevent_start_hrtimer(struct perf_event *event)
  {
-       struct perf_sample_data sample;
-       struct pt_regs *regs = data;
+       struct hw_perf_event *hwc = &event->hw;
  
-       perf_sample_data_init(&sample, bp->attr.bp_addr);
+       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hwc->hrtimer.function = perf_swevent_hrtimer;
+       if (hwc->sample_period) {
+               s64 period = local64_read(&hwc->period_left);
+               if (period) {
+                       if (period < 0)
+                               period = 10000;
  
-       if (!perf_exclude_event(bp, regs))
-               perf_swevent_add(bp, 1, 1, &sample, regs);
+                       local64_set(&hwc->period_left, 0);
+               } else {
+                       period = max_t(u64, 10000, hwc->sample_period);
+               }
+               __hrtimer_start_range_ns(&hwc->hrtimer,
+                               ns_to_ktime(period), 0,
+                               HRTIMER_MODE_REL_PINNED, 0);
+       }
  }
- #else
- static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+ static void perf_swevent_cancel_hrtimer(struct perf_event *event)
  {
-       return NULL;
+       struct hw_perf_event *hwc = &event->hw;
+       if (hwc->sample_period) {
+               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
+               local64_set(&hwc->period_left, ktime_to_ns(remaining));
+               hrtimer_cancel(&hwc->hrtimer);
+       }
  }
  
- void perf_bp_event(struct perf_event *bp, void *regs)
+ /*
+  * Software event: cpu wall time clock
+  */
+ static void cpu_clock_event_update(struct perf_event *event)
  {
+       s64 prev;
+       u64 now;
+       now = local_clock();
+       prev = local64_xchg(&event->hw.prev_count, now);
+       local64_add(now - prev, &event->count);
  }
- #endif
  
- atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+ static void cpu_clock_event_start(struct perf_event *event, int flags)
+ {
+       local64_set(&event->hw.prev_count, local_clock());
+       perf_swevent_start_hrtimer(event);
+ }
  
- static void sw_perf_event_destroy(struct perf_event *event)
+ static void cpu_clock_event_stop(struct perf_event *event, int flags)
  {
-       u64 event_id = event->attr.config;
+       perf_swevent_cancel_hrtimer(event);
+       cpu_clock_event_update(event);
+ }
  
-       WARN_ON(event->parent);
+ static int cpu_clock_event_add(struct perf_event *event, int flags)
+ {
+       if (flags & PERF_EF_START)
+               cpu_clock_event_start(event, flags);
  
-       atomic_dec(&perf_swevent_enabled[event_id]);
-       swevent_hlist_put(event);
+       return 0;
  }
  
- static const struct pmu *sw_perf_event_init(struct perf_event *event)
+ static void cpu_clock_event_del(struct perf_event *event, int flags)
  {
-       const struct pmu *pmu = NULL;
-       u64 event_id = event->attr.config;
+       cpu_clock_event_stop(event, flags);
+ }
+ static void cpu_clock_event_read(struct perf_event *event)
+ {
+       cpu_clock_event_update(event);
+ }
+ static int cpu_clock_event_init(struct perf_event *event)
+ {
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+       if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
+               return -ENOENT;
  
+       return 0;
+ }
+ static struct pmu perf_cpu_clock = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = cpu_clock_event_init,
+       .add            = cpu_clock_event_add,
+       .del            = cpu_clock_event_del,
+       .start          = cpu_clock_event_start,
+       .stop           = cpu_clock_event_stop,
+       .read           = cpu_clock_event_read,
+ };
+ /*
+  * Software event: task time clock
+  */
+ static void task_clock_event_update(struct perf_event *event, u64 now)
+ {
+       u64 prev;
+       s64 delta;
+       prev = local64_xchg(&event->hw.prev_count, now);
+       delta = now - prev;
+       local64_add(delta, &event->count);
+ }
+ static void task_clock_event_start(struct perf_event *event, int flags)
+ {
+       local64_set(&event->hw.prev_count, event->ctx->time);
+       perf_swevent_start_hrtimer(event);
+ }
+ static void task_clock_event_stop(struct perf_event *event, int flags)
+ {
+       perf_swevent_cancel_hrtimer(event);
+       task_clock_event_update(event, event->ctx->time);
+ }
+ static int task_clock_event_add(struct perf_event *event, int flags)
+ {
+       if (flags & PERF_EF_START)
+               task_clock_event_start(event, flags);
+       return 0;
+ }
+ static void task_clock_event_del(struct perf_event *event, int flags)
+ {
+       task_clock_event_stop(event, PERF_EF_UPDATE);
+ }
+ static void task_clock_event_read(struct perf_event *event)
+ {
+       u64 time;
+       if (!in_nmi()) {
+               update_context_time(event->ctx);
+               time = event->ctx->time;
+       } else {
+               u64 now = perf_clock();
+               u64 delta = now - event->ctx->timestamp;
+               time = event->ctx->time + delta;
+       }
+       task_clock_event_update(event, time);
+ }
+ static int task_clock_event_init(struct perf_event *event)
+ {
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+       if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
+               return -ENOENT;
+       return 0;
+ }
+ static struct pmu perf_task_clock = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = task_clock_event_init,
+       .add            = task_clock_event_add,
+       .del            = task_clock_event_del,
+       .start          = task_clock_event_start,
+       .stop           = task_clock_event_stop,
+       .read           = task_clock_event_read,
+ };
+ static void perf_pmu_nop_void(struct pmu *pmu)
+ {
+ }
+ static int perf_pmu_nop_int(struct pmu *pmu)
+ {
+       return 0;
+ }
+ static void perf_pmu_start_txn(struct pmu *pmu)
+ {
+       perf_pmu_disable(pmu);
+ }
+ static int perf_pmu_commit_txn(struct pmu *pmu)
+ {
+       perf_pmu_enable(pmu);
+       return 0;
+ }
+ static void perf_pmu_cancel_txn(struct pmu *pmu)
+ {
+       perf_pmu_enable(pmu);
+ }
+ /*
+  * Ensures all contexts with the same task_ctx_nr have the same
+  * pmu_cpu_context too.
+  */
+ static void *find_pmu_context(int ctxn)
+ {
+       struct pmu *pmu;
+       if (ctxn < 0)
+               return NULL;
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->task_ctx_nr == ctxn)
+                       return pmu->pmu_cpu_context;
+       }
+       return NULL;
+ }
+ static void free_pmu_context(void * __percpu cpu_context)
+ {
+       struct pmu *pmu;
+       mutex_lock(&pmus_lock);
        /*
-        * Software events (currently) can't in general distinguish
-        * between user, kernel and hypervisor events.
-        * However, context switches and cpu migrations are considered
-        * to be kernel events, and page faults are never hypervisor
-        * events.
+        * Like a real lame refcount.
         */
-       switch (event_id) {
-       case PERF_COUNT_SW_CPU_CLOCK:
-               pmu = &perf_ops_cpu_clock;
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->pmu_cpu_context == cpu_context)
+                       goto out;
+       }
  
-               break;
-       case PERF_COUNT_SW_TASK_CLOCK:
-               /*
-                * If the user instantiates this as a per-cpu event,
-                * use the cpu_clock event instead.
-                */
-               if (event->ctx->task)
-                       pmu = &perf_ops_task_clock;
-               else
-                       pmu = &perf_ops_cpu_clock;
+       free_percpu(cpu_context);
+ out:
+       mutex_unlock(&pmus_lock);
+ }
  
-               break;
-       case PERF_COUNT_SW_PAGE_FAULTS:
-       case PERF_COUNT_SW_PAGE_FAULTS_MIN:
-       case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
-       case PERF_COUNT_SW_CONTEXT_SWITCHES:
-       case PERF_COUNT_SW_CPU_MIGRATIONS:
-       case PERF_COUNT_SW_ALIGNMENT_FAULTS:
-       case PERF_COUNT_SW_EMULATION_FAULTS:
-               if (!event->parent) {
-                       int err;
-                       err = swevent_hlist_get(event);
-                       if (err)
-                               return ERR_PTR(err);
+ int perf_pmu_register(struct pmu *pmu)
+ {
+       int cpu, ret;
  
-                       atomic_inc(&perf_swevent_enabled[event_id]);
-                       event->destroy = sw_perf_event_destroy;
+       mutex_lock(&pmus_lock);
+       ret = -ENOMEM;
+       pmu->pmu_disable_count = alloc_percpu(int);
+       if (!pmu->pmu_disable_count)
+               goto unlock;
+       pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
+       if (pmu->pmu_cpu_context)
+               goto got_cpu_context;
+       pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+       if (!pmu->pmu_cpu_context)
+               goto free_pdc;
+       for_each_possible_cpu(cpu) {
+               struct perf_cpu_context *cpuctx;
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               __perf_event_init_context(&cpuctx->ctx);
+               cpuctx->ctx.type = cpu_context;
+               cpuctx->ctx.pmu = pmu;
+               cpuctx->jiffies_interval = 1;
+               INIT_LIST_HEAD(&cpuctx->rotation_list);
+       }
+ got_cpu_context:
+       if (!pmu->start_txn) {
+               if (pmu->pmu_enable) {
+                       /*
+                        * If we have pmu_enable/pmu_disable calls, install
+                        * transaction stubs that use that to try and batch
+                        * hardware accesses.
+                        */
+                       pmu->start_txn  = perf_pmu_start_txn;
+                       pmu->commit_txn = perf_pmu_commit_txn;
+                       pmu->cancel_txn = perf_pmu_cancel_txn;
+               } else {
+                       pmu->start_txn  = perf_pmu_nop_void;
+                       pmu->commit_txn = perf_pmu_nop_int;
+                       pmu->cancel_txn = perf_pmu_nop_void;
+               }
+       }
+       if (!pmu->pmu_enable) {
+               pmu->pmu_enable  = perf_pmu_nop_void;
+               pmu->pmu_disable = perf_pmu_nop_void;
+       }
+       list_add_rcu(&pmu->entry, &pmus);
+       ret = 0;
+ unlock:
+       mutex_unlock(&pmus_lock);
+       return ret;
+ free_pdc:
+       free_percpu(pmu->pmu_disable_count);
+       goto unlock;
+ }
+ void perf_pmu_unregister(struct pmu *pmu)
+ {
+       mutex_lock(&pmus_lock);
+       list_del_rcu(&pmu->entry);
+       mutex_unlock(&pmus_lock);
+       /*
+        * We dereference the pmu list under both SRCU and regular RCU, so
+        * synchronize against both of those.
+        */
+       synchronize_srcu(&pmus_srcu);
+       synchronize_rcu();
+       free_percpu(pmu->pmu_disable_count);
+       free_pmu_context(pmu->pmu_cpu_context);
+ }
+ struct pmu *perf_init_event(struct perf_event *event)
+ {
+       struct pmu *pmu = NULL;
+       int idx;
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               int ret = pmu->event_init(event);
+               if (!ret)
+                       goto unlock;
+               if (ret != -ENOENT) {
+                       pmu = ERR_PTR(ret);
+                       goto unlock;
                }
-               pmu = &perf_ops_generic;
-               break;
        }
+       pmu = ERR_PTR(-ENOENT);
+ unlock:
+       srcu_read_unlock(&pmus_srcu, idx);
  
        return pmu;
  }
   * Allocate and initialize a event structure
   */
  static struct perf_event *
- perf_event_alloc(struct perf_event_attr *attr,
-                  int cpu,
-                  struct perf_event_context *ctx,
+ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                   struct perf_event *group_leader,
                   struct perf_event *parent_event,
-                  perf_overflow_handler_t overflow_handler,
-                  gfp_t gfpflags)
+                  perf_overflow_handler_t overflow_handler)
  {
-       const struct pmu *pmu;
+       struct pmu *pmu;
        struct perf_event *event;
        struct hw_perf_event *hwc;
        long err;
  
-       event = kzalloc(sizeof(*event), gfpflags);
+       event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return ERR_PTR(-ENOMEM);
  
        event->attr             = *attr;
        event->group_leader     = group_leader;
        event->pmu              = NULL;
-       event->ctx              = ctx;
        event->oncpu            = -1;
  
        event->parent           = parent_event;
        if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
                goto done;
  
-       switch (attr->type) {
-       case PERF_TYPE_RAW:
-       case PERF_TYPE_HARDWARE:
-       case PERF_TYPE_HW_CACHE:
-               pmu = hw_perf_event_init(event);
-               break;
+       pmu = perf_init_event(event);
  
-       case PERF_TYPE_SOFTWARE:
-               pmu = sw_perf_event_init(event);
-               break;
-       case PERF_TYPE_TRACEPOINT:
-               pmu = tp_perf_event_init(event);
-               break;
-       case PERF_TYPE_BREAKPOINT:
-               pmu = bp_perf_event_init(event);
-               break;
-       default:
-               break;
-       }
  done:
        err = 0;
        if (!pmu)
                        atomic_inc(&nr_comm_events);
                if (event->attr.task)
                        atomic_inc(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       err = get_callchain_buffers();
+                       if (err) {
+                               free_event(event);
+                               return ERR_PTR(err);
+                       }
+               }
        }
  
        return event;
@@@ -5099,12 -5533,16 +5538,16 @@@ SYSCALL_DEFINE5(perf_event_open
                struct perf_event_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  {
-       struct perf_event *event, *group_leader = NULL, *output_event = NULL;
+       struct perf_event *group_leader = NULL, *output_event = NULL;
+       struct perf_event *event, *sibling;
        struct perf_event_attr attr;
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
        struct file *group_file = NULL;
+       struct task_struct *task = NULL;
+       struct pmu *pmu;
        int event_fd;
+       int move_group = 0;
        int fput_needed = 0;
        int err;
  
        if (event_fd < 0)
                return event_fd;
  
-       /*
-        * Get the target context (task or percpu):
-        */
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_fd;
-       }
        if (group_fd != -1) {
                group_leader = perf_fget_light(group_fd, &fput_needed);
                if (IS_ERR(group_leader)) {
                        err = PTR_ERR(group_leader);
-                       goto err_put_context;
+                       goto err_fd;
                }
                group_file = group_leader->filp;
                if (flags & PERF_FLAG_FD_OUTPUT)
                        group_leader = NULL;
        }
  
+       event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+               goto err_fd;
+       }
+       /*
+        * Special case software events and allow them to be part of
+        * any hardware group.
+        */
+       pmu = event->pmu;
+       if (group_leader &&
+           (is_software_event(event) != is_software_event(group_leader))) {
+               if (is_software_event(event)) {
+                       /*
+                        * If event and group_leader are not both a software
+                        * event, and event is, then group leader is not.
+                        *
+                        * Allow the addition of software events to !software
+                        * groups, this is safe because software events never
+                        * fail to schedule.
+                        */
+                       pmu = group_leader->pmu;
+               } else if (is_software_event(group_leader) &&
+                          (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+                       /*
+                        * In case the group is a pure software group, and we
+                        * try to add a hardware event, move the whole group to
+                        * the hardware context.
+                        */
+                       move_group = 1;
+               }
+       }
+       if (pid != -1) {
+               task = find_lively_task_by_vpid(pid);
+               if (IS_ERR(task)) {
+                       err = PTR_ERR(task);
+                       goto err_group_fd;
+               }
+       }
+       /*
+        * Get the target context (task or percpu):
+        */
+       ctx = find_get_context(pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_group_fd;
+       }
        /*
         * Look up the group leader (we will attach this event to it):
         */
                 * becoming part of another group-sibling):
                 */
                if (group_leader->group_leader != group_leader)
-                       goto err_put_context;
+                       goto err_context;
                /*
                 * Do not allow to attach to a group in a different
                 * task or CPU context:
                 */
-               if (group_leader->ctx != ctx)
-                       goto err_put_context;
+               if (move_group) {
+                       if (group_leader->ctx->type != ctx->type)
+                               goto err_context;
+               } else {
+                       if (group_leader->ctx != ctx)
+                               goto err_context;
+               }
                /*
                 * Only a group leader can be exclusive or pinned
                 */
                if (attr.exclusive || attr.pinned)
-                       goto err_put_context;
-       }
-       event = perf_event_alloc(&attr, cpu, ctx, group_leader,
-                                    NULL, NULL, GFP_KERNEL);
-       if (IS_ERR(event)) {
-               err = PTR_ERR(event);
-               goto err_put_context;
+                       goto err_context;
        }
  
        if (output_event) {
                err = perf_event_set_output(event, output_event);
                if (err)
-                       goto err_free_put_context;
+                       goto err_context;
        }
  
        event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
        if (IS_ERR(event_file)) {
                err = PTR_ERR(event_file);
-               goto err_free_put_context;
+               goto err_context;
+       }
+       if (move_group) {
+               struct perf_event_context *gctx = group_leader->ctx;
+               mutex_lock(&gctx->mutex);
+               perf_event_remove_from_context(group_leader);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_event_remove_from_context(sibling);
+                       put_ctx(gctx);
+               }
+               mutex_unlock(&gctx->mutex);
+               put_ctx(gctx);
        }
  
        event->filp = event_file;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
+       if (move_group) {
+               perf_install_in_context(ctx, group_leader, cpu);
+               get_ctx(ctx);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_install_in_context(ctx, sibling, cpu);
+                       get_ctx(ctx);
+               }
+       }
        perf_install_in_context(ctx, event, cpu);
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
        fd_install(event_fd, event_file);
        return event_fd;
  
- err_free_put_context:
-       free_event(event);
- err_put_context:
-       fput_light(group_file, fput_needed);
+ err_context:
        put_ctx(ctx);
+ err_group_fd:
+       fput_light(group_file, fput_needed);
+       free_event(event);
  err_fd:
        put_unused_fd(event_fd);
        return err;
   *
   * @attr: attributes of the counter to create
   * @cpu: cpu in which the counter is bound
-  * @pid: task to profile
+  * @task: task to profile (NULL for percpu)
   */
  struct perf_event *
  perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
-                                pid_t pid,
+                                struct task_struct *task,
                                 perf_overflow_handler_t overflow_handler)
  {
-       struct perf_event *event;
        struct perf_event_context *ctx;
+       struct perf_event *event;
        int err;
  
        /*
         * Get the target context (task or percpu):
         */
  
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_exit;
-       }
-       event = perf_event_alloc(attr, cpu, ctx, NULL,
-                                NULL, overflow_handler, GFP_KERNEL);
+       event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
-               goto err_put_context;
+               goto err;
+       }
+       ctx = find_get_context(event->pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_free;
        }
  
        event->filp = NULL;
  
        return event;
  
-  err_put_context:
-       put_ctx(ctx);
-  err_exit:
+ err_free:
+       free_event(event);
+ err:
        return ERR_PTR(err);
  }
  EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
  
- /*
-  * inherit a event from parent task to child task:
-  */
- static struct perf_event *
- inherit_event(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event *group_leader,
-             struct perf_event_context *child_ctx)
- {
-       struct perf_event *child_event;
-       /*
-        * Instead of creating recursive hierarchies of events,
-        * we link inherited events back to the original parent,
-        * which has a filp for sure, which we use as the reference
-        * count:
-        */
-       if (parent_event->parent)
-               parent_event = parent_event->parent;
-       child_event = perf_event_alloc(&parent_event->attr,
-                                          parent_event->cpu, child_ctx,
-                                          group_leader, parent_event,
-                                          NULL, GFP_KERNEL);
-       if (IS_ERR(child_event))
-               return child_event;
-       get_ctx(child_ctx);
-       /*
-        * Make the child state follow the state of the parent event,
-        * not its attr.disabled bit.  We hold the parent's mutex,
-        * so we won't race with perf_event_{en, dis}able_family.
-        */
-       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
-               child_event->state = PERF_EVENT_STATE_INACTIVE;
-       else
-               child_event->state = PERF_EVENT_STATE_OFF;
-       if (parent_event->attr.freq) {
-               u64 sample_period = parent_event->hw.sample_period;
-               struct hw_perf_event *hwc = &child_event->hw;
-               hwc->sample_period = sample_period;
-               hwc->last_period   = sample_period;
-               local64_set(&hwc->period_left, sample_period);
-       }
-       child_event->overflow_handler = parent_event->overflow_handler;
-       /*
-        * Link it up in the child's context:
-        */
-       add_event_to_ctx(child_event, child_ctx);
-       /*
-        * Get a reference to the parent filp - we will fput it
-        * when the child event exits. This is safe to do because
-        * we are in the parent and we know that the filp still
-        * exists and has a nonzero count:
-        */
-       atomic_long_inc(&parent_event->filp->f_count);
-       /*
-        * Link this into the parent event's child list
-        */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
-       list_add_tail(&child_event->child_list, &parent_event->child_list);
-       mutex_unlock(&parent_event->child_mutex);
-       return child_event;
- }
- static int inherit_group(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event_context *child_ctx)
- {
-       struct perf_event *leader;
-       struct perf_event *sub;
-       struct perf_event *child_ctr;
-       leader = inherit_event(parent_event, parent, parent_ctx,
-                                child, NULL, child_ctx);
-       if (IS_ERR(leader))
-               return PTR_ERR(leader);
-       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
-               child_ctr = inherit_event(sub, parent, parent_ctx,
-                                           child, leader, child_ctx);
-               if (IS_ERR(child_ctr))
-                       return PTR_ERR(child_ctr);
-       }
-       return 0;
- }
  static void sync_child_event(struct perf_event *child_event,
                               struct task_struct *child)
  {
@@@ -5439,16 -5844,13 +5849,13 @@@ __perf_event_exit_task(struct perf_even
        }
  }
  
- /*
-  * When a child task exits, feed back event values to parent events.
-  */
- void perf_event_exit_task(struct task_struct *child)
+ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
  {
        struct perf_event *child_event, *tmp;
        struct perf_event_context *child_ctx;
        unsigned long flags;
  
-       if (likely(!child->perf_event_ctxp)) {
+       if (likely(!child->perf_event_ctxp[ctxn])) {
                perf_event_task(child, NULL, 0);
                return;
        }
         * scheduled, so we are now safe from rescheduling changing
         * our context.
         */
-       child_ctx = child->perf_event_ctxp;
+       child_ctx = child->perf_event_ctxp[ctxn];
        __perf_event_task_sched_out(child_ctx);
  
        /*
         * incremented the context's refcount before we do put_ctx below.
         */
        raw_spin_lock(&child_ctx->lock);
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@@ -5522,6 -5924,17 +5929,17 @@@ again
        put_ctx(child_ctx);
  }
  
+ /*
+  * When a child task exits, feed back event values to parent events.
+  */
+ void perf_event_exit_task(struct task_struct *child)
+ {
+       int ctxn;
+       for_each_task_context_nr(ctxn)
+               perf_event_exit_task_context(child, ctxn);
+ }
  static void perf_free_event(struct perf_event *event,
                            struct perf_event_context *ctx)
  {
  
  /*
   * free an unexposed, unused context as created by inheritance by
-  * init_task below, used by fork() in case of fail.
+  * perf_event_init_task below, used by fork() in case of fail.
   */
  void perf_event_free_task(struct task_struct *task)
  {
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx;
        struct perf_event *event, *tmp;
+       int ctxn;
  
-       if (!ctx)
-               return;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
  
-       mutex_lock(&ctx->mutex);
+               mutex_lock(&ctx->mutex);
  again:
-       list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
  
-       list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
-                                group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
  
-       if (!list_empty(&ctx->pinned_groups) ||
-           !list_empty(&ctx->flexible_groups))
-               goto again;
+               if (!list_empty(&ctx->pinned_groups) ||
+                               !list_empty(&ctx->flexible_groups))
+                       goto again;
  
-       mutex_unlock(&ctx->mutex);
+               mutex_unlock(&ctx->mutex);
  
-       put_ctx(ctx);
+               put_ctx(ctx);
+       }
+ }
+ void perf_event_delayed_put(struct task_struct *task)
+ {
+       int ctxn;
+       for_each_task_context_nr(ctxn)
+               WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
+ }
+ /*
+  * inherit a event from parent task to child task:
+  */
+ static struct perf_event *
+ inherit_event(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event *group_leader,
+             struct perf_event_context *child_ctx)
+ {
+       struct perf_event *child_event;
+       unsigned long flags;
+       /*
+        * Instead of creating recursive hierarchies of events,
+        * we link inherited events back to the original parent,
+        * which has a filp for sure, which we use as the reference
+        * count:
+        */
+       if (parent_event->parent)
+               parent_event = parent_event->parent;
+       child_event = perf_event_alloc(&parent_event->attr,
+                                          parent_event->cpu,
+                                          group_leader, parent_event,
+                                          NULL);
+       if (IS_ERR(child_event))
+               return child_event;
+       get_ctx(child_ctx);
+       /*
+        * Make the child state follow the state of the parent event,
+        * not its attr.disabled bit.  We hold the parent's mutex,
+        * so we won't race with perf_event_{en, dis}able_family.
+        */
+       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
+               child_event->state = PERF_EVENT_STATE_INACTIVE;
+       else
+               child_event->state = PERF_EVENT_STATE_OFF;
+       if (parent_event->attr.freq) {
+               u64 sample_period = parent_event->hw.sample_period;
+               struct hw_perf_event *hwc = &child_event->hw;
+               hwc->sample_period = sample_period;
+               hwc->last_period   = sample_period;
+               local64_set(&hwc->period_left, sample_period);
+       }
+       child_event->ctx = child_ctx;
+       child_event->overflow_handler = parent_event->overflow_handler;
+       /*
+        * Link it up in the child's context:
+        */
+       raw_spin_lock_irqsave(&child_ctx->lock, flags);
+       add_event_to_ctx(child_event, child_ctx);
+       raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+       /*
+        * Get a reference to the parent filp - we will fput it
+        * when the child event exits. This is safe to do because
+        * we are in the parent and we know that the filp still
+        * exists and has a nonzero count:
+        */
+       atomic_long_inc(&parent_event->filp->f_count);
+       /*
+        * Link this into the parent event's child list
+        */
+       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+       mutex_lock(&parent_event->child_mutex);
+       list_add_tail(&child_event->child_list, &parent_event->child_list);
+       mutex_unlock(&parent_event->child_mutex);
+       return child_event;
+ }
+ static int inherit_group(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event_context *child_ctx)
+ {
+       struct perf_event *leader;
+       struct perf_event *sub;
+       struct perf_event *child_ctr;
+       leader = inherit_event(parent_event, parent, parent_ctx,
+                                child, NULL, child_ctx);
+       if (IS_ERR(leader))
+               return PTR_ERR(leader);
+       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+               child_ctr = inherit_event(sub, parent, parent_ctx,
+                                           child, leader, child_ctx);
+               if (IS_ERR(child_ctr))
+                       return PTR_ERR(child_ctr);
+       }
+       return 0;
  }
  
  static int
  inherit_task_group(struct perf_event *event, struct task_struct *parent,
                   struct perf_event_context *parent_ctx,
-                  struct task_struct *child,
+                  struct task_struct *child, int ctxn,
                   int *inherited_all)
  {
        int ret;
-       struct perf_event_context *child_ctx = child->perf_event_ctxp;
+       struct perf_event_context *child_ctx;
  
        if (!event->attr.inherit) {
                *inherited_all = 0;
                return 0;
        }
  
+               child_ctx = child->perf_event_ctxp[ctxn];
        if (!child_ctx) {
                /*
                 * This is executed from the parent task context, so
                 * child.
                 */
  
-               child_ctx = kzalloc(sizeof(struct perf_event_context),
-                                   GFP_KERNEL);
+               child_ctx = alloc_perf_context(event->pmu, child);
                if (!child_ctx)
                        return -ENOMEM;
  
-               __perf_event_init_context(child_ctx, child);
-               child->perf_event_ctxp = child_ctx;
-               get_task_struct(child);
+               child->perf_event_ctxp[ctxn] = child_ctx;
        }
  
        ret = inherit_group(event, parent, parent_ctx,
        return ret;
  }
  
  /*
   * Initialize the perf_event context in task_struct
   */
- int perf_event_init_task(struct task_struct *child)
+ int perf_event_init_context(struct task_struct *child, int ctxn)
  {
        struct perf_event_context *child_ctx, *parent_ctx;
        struct perf_event_context *cloned_ctx;
        int inherited_all = 1;
        int ret = 0;
  
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
  
        mutex_init(&child->perf_event_mutex);
        INIT_LIST_HEAD(&child->perf_event_list);
  
-       if (likely(!parent->perf_event_ctxp))
+       if (likely(!parent->perf_event_ctxp[ctxn]))
                return 0;
  
        /*
         * If the parent's context is a clone, pin it so it won't get
         * swapped under us.
         */
-       parent_ctx = perf_pin_task_context(parent);
+       parent_ctx = perf_pin_task_context(parent, ctxn);
  
        /*
         * No need to check if parent_ctx != NULL here; since we saw
         * the list, not manipulating it:
         */
        list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
  
        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
  
-       child_ctx = child->perf_event_ctxp;
+       child_ctx = child->perf_event_ctxp[ctxn];
  
        if (child_ctx && inherited_all) {
                /*
        return ret;
  }
  
+ /*
+  * Initialize the perf_event context in task_struct
+  */
+ int perf_event_init_task(struct task_struct *child)
+ {
+       int ctxn, ret;
+       for_each_task_context_nr(ctxn) {
+               ret = perf_event_init_context(child, ctxn);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
  static void __init perf_event_init_all_cpus(void)
  {
+       struct swevent_htable *swhash;
        int cpu;
-       struct perf_cpu_context *cpuctx;
  
        for_each_possible_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               mutex_init(&cpuctx->hlist_mutex);
-               __perf_event_init_context(&cpuctx->ctx, NULL);
+               swhash = &per_cpu(swevent_htable, cpu);
+               mutex_init(&swhash->hlist_mutex);
+               INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
        }
  }
  
  static void __cpuinit perf_event_init_cpu(int cpu)
  {
-       struct perf_cpu_context *cpuctx;
-       cpuctx = &per_cpu(perf_cpu_context, cpu);
-       spin_lock(&perf_resource_lock);
-       cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
-       spin_unlock(&perf_resource_lock);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
-       mutex_lock(&cpuctx->hlist_mutex);
-       if (cpuctx->hlist_refcount > 0) {
+       mutex_lock(&swhash->hlist_mutex);
+       if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
  
-               hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
-               WARN_ON_ONCE(!hlist);
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
+               WARN_ON(!hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
- static void __perf_event_exit_cpu(void *info)
+ static void perf_pmu_rotate_stop(struct pmu *pmu)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       WARN_ON(!irqs_disabled());
+       list_del_init(&cpuctx->rotation_list);
+ }
+ static void __perf_event_exit_context(void *__info)
+ {
+       struct perf_event_context *ctx = __info;
        struct perf_event *event, *tmp;
  
+       perf_pmu_rotate_stop(ctx->pmu);
        list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
                __perf_event_remove_from_context(event);
        list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
                __perf_event_remove_from_context(event);
  }
+ static void perf_event_exit_cpu_context(int cpu)
+ {
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int idx;
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+               mutex_lock(&ctx->mutex);
+               smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+               mutex_unlock(&ctx->mutex);
+       }
+       srcu_read_unlock(&pmus_srcu, idx);
+ }
  static void perf_event_exit_cpu(int cpu)
  {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
-       mutex_lock(&cpuctx->hlist_mutex);
-       swevent_hlist_release(cpuctx);
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
+       swevent_hlist_release(swhash);
+       mutex_unlock(&swhash->hlist_mutex);
  
-       mutex_lock(&ctx->mutex);
-       smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
-       mutex_unlock(&ctx->mutex);
+       perf_event_exit_cpu_context(cpu);
  }
  #else
  static inline void perf_event_exit_cpu(int cpu) { }
@@@ -5785,118 -6346,13 +6351,13 @@@ perf_cpu_notify(struct notifier_block *
        return NOTIFY_OK;
  }
  
  void __init perf_event_init(void)
  {
        perf_event_init_all_cpus();
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
-                       (void *)(long)smp_processor_id());
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
-                       (void *)(long)smp_processor_id());
-       register_cpu_notifier(&perf_cpu_nb);
- }
- static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
-                                       struct sysdev_class_attribute *attr,
-                                       char *buf)
- {
-       return sprintf(buf, "%d\n", perf_reserved_percpu);
- }
- static ssize_t
- perf_set_reserve_percpu(struct sysdev_class *class,
-                       struct sysdev_class_attribute *attr,
-                       const char *buf,
-                       size_t count)
- {
-       struct perf_cpu_context *cpuctx;
-       unsigned long val;
-       int err, cpu, mpt;
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > perf_max_events)
-               return -EINVAL;
-       spin_lock(&perf_resource_lock);
-       perf_reserved_percpu = val;
-       for_each_online_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               raw_spin_lock_irq(&cpuctx->ctx.lock);
-               mpt = min(perf_max_events - cpuctx->ctx.nr_events,
-                         perf_max_events - perf_reserved_percpu);
-               cpuctx->max_pertask = mpt;
-               raw_spin_unlock_irq(&cpuctx->ctx.lock);
-       }
-       spin_unlock(&perf_resource_lock);
-       return count;
- }
- static ssize_t perf_show_overcommit(struct sysdev_class *class,
-                                   struct sysdev_class_attribute *attr,
-                                   char *buf)
- {
-       return sprintf(buf, "%d\n", perf_overcommit);
- }
- static ssize_t
- perf_set_overcommit(struct sysdev_class *class,
-                   struct sysdev_class_attribute *attr,
-                   const char *buf, size_t count)
- {
-       unsigned long val;
-       int err;
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > 1)
-               return -EINVAL;
-       spin_lock(&perf_resource_lock);
-       perf_overcommit = val;
-       spin_unlock(&perf_resource_lock);
-       return count;
- }
- static SYSDEV_CLASS_ATTR(
-                               reserve_percpu,
-                               0644,
-                               perf_show_reserve_percpu,
-                               perf_set_reserve_percpu
-                       );
- static SYSDEV_CLASS_ATTR(
-                               overcommit,
-                               0644,
-                               perf_show_overcommit,
-                               perf_set_overcommit
-                       );
- static struct attribute *perfclass_attrs[] = {
-       &attr_reserve_percpu.attr,
-       &attr_overcommit.attr,
-       NULL
- };
- static struct attribute_group perfclass_attr_group = {
-       .attrs                  = perfclass_attrs,
-       .name                   = "perf_events",
- };
- static int __init perf_event_sysfs_init(void)
- {
-       return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
-                                 &perfclass_attr_group);
+       init_srcu_struct(&pmus_srcu);
+       perf_pmu_register(&perf_swevent);
+       perf_pmu_register(&perf_cpu_clock);
+       perf_pmu_register(&perf_task_clock);
+       perf_tp_register();
+       perf_cpu_notifier(perf_cpu_notify);
  }
- device_initcall(perf_event_sysfs_init);