Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
Linus Torvalds [Thu, 21 Oct 2010 19:54:49 +0000 (12:54 -0700)]
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (163 commits)
  tracing: Fix compile issue for trace_sched_wakeup.c
  [S390] hardirq: remove pointless header file includes
  [IA64] Move local_softirq_pending() definition
  perf, powerpc: Fix power_pmu_event_init to not use event->ctx
  ftrace: Remove recursion between recordmcount and scripts/mod/empty
  jump_label: Add COND_STMT(), reducer wrappery
  perf: Optimize sw events
  perf: Use jump_labels to optimize the scheduler hooks
  jump_label: Add atomic_t interface
  jump_label: Use more consistent naming
  perf, hw_breakpoint: Fix crash in hw_breakpoint creation
  perf: Find task before event alloc
  perf: Fix task refcount bugs
  perf: Fix group moving
  irq_work: Add generic hardirq context callbacks
  perf_events: Fix transaction recovery in group_sched_in()
  perf_events: Fix bogus AMD64 generic TLB events
  perf_events: Fix bogus context time tracking
  tracing: Remove parent recording in latency tracer graph options
  tracing: Use one prologue for the preempt irqs off tracer function tracers
  ...

1  2 
Makefile
arch/arm/Kconfig
include/linux/sched.h
init/Kconfig
kernel/Makefile
kernel/perf_event.c
kernel/sched.c
kernel/trace/ring_buffer.c
kernel/watchdog.c
lib/Kconfig.debug

diff --combined Makefile
+++ b/Makefile
@@@ -1,8 -1,8 +1,8 @@@
  VERSION = 2
  PATCHLEVEL = 6
  SUBLEVEL = 36
 -EXTRAVERSION = -rc7
 -NAME = Sheep on Meth
 +EXTRAVERSION =
 +NAME = Flesh-Eating Bats with Fangs
  
  # *DOCUMENTATION*
  # To see a list of typical targets execute "make help"
@@@ -568,6 -568,12 +568,12 @@@ endi
  
  ifdef CONFIG_FUNCTION_TRACER
  KBUILD_CFLAGS += -pg
+ ifdef CONFIG_DYNAMIC_FTRACE
+       ifdef CONFIG_HAVE_C_RECORDMCOUNT
+               BUILD_C_RECORDMCOUNT := y
+               export BUILD_C_RECORDMCOUNT
+       endif
+ endif
  endif
  
  # We trigger additional mismatches with less inlining
@@@ -591,6 -597,11 +597,11 @@@ KBUILD_CFLAGS    += $(call cc-option,-fno-
  # conserve stack if available
  KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
  
+ # check for 'asm goto'
+ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ endif
  # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
  # But warn user when we do so
  warn-assign = \
diff --combined arch/arm/Kconfig
@@@ -23,6 -23,7 +23,7 @@@ config AR
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZO
        select HAVE_KERNEL_LZMA
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_REGS_AND_STACK_ACCESS_API
@@@ -1101,20 -1102,6 +1102,20 @@@ config ARM_ERRATA_72078
          invalidated are not, resulting in an incoherency in the system page
          tables. The workaround changes the TLB flushing routines to invalidate
          entries regardless of the ASID.
 +
 +config ARM_ERRATA_743622
 +      bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
 +      depends on CPU_V7
 +      help
 +        This option enables the workaround for the 743622 Cortex-A9
 +        (r2p0..r2p2) erratum. Under very rare conditions, a faulty
 +        optimisation in the Cortex-A9 Store Buffer may lead to data
 +        corruption. This workaround sets a specific bit in the diagnostic
 +        register of the Cortex-A9 which disables the Store Buffer
 +        optimisation, preventing the defect from occurring. This has no
 +        visible impact on the overall performance or power consumption of the
 +        processor.
 +
  endmenu
  
  source "arch/arm/common/Kconfig"
diff --combined include/linux/sched.h
@@@ -1160,6 -1160,13 +1160,13 @@@ struct sched_rt_entity 
  
  struct rcu_node;
  
+ enum perf_event_task_context {
+       perf_invalid_context = -1,
+       perf_hw_context = 0,
+       perf_sw_context,
+       perf_nr_task_contexts,
+ };
  struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        void *stack;
        unsigned int policy;
        cpumask_t cpus_allowed;
  
 -#ifdef CONFIG_TREE_PREEMPT_RCU
 +#ifdef CONFIG_PREEMPT_RCU
        int rcu_read_lock_nesting;
        char rcu_read_unlock_special;
 -      struct rcu_node *rcu_blocked_node;
        struct list_head rcu_node_entry;
 +#endif /* #ifdef CONFIG_PREEMPT_RCU */
 +#ifdef CONFIG_TREE_PREEMPT_RCU
 +      struct rcu_node *rcu_blocked_node;
  #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  
  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        struct list_head cpu_timers[3];
  
  /* process credentials */
 -      const struct cred *real_cred;   /* objective and real subjective task
 +      const struct cred __rcu *real_cred; /* objective and real subjective task
                                         * credentials (COW) */
 -      const struct cred *cred;        /* effective (overridable) subjective task
 +      const struct cred __rcu *cred;  /* effective (overridable) subjective task
                                         * credentials (COW) */
        struct mutex cred_guard_mutex;  /* guard against foreign influences on
                                         * credential calculations
  #endif
  #ifdef CONFIG_CGROUPS
        /* Control Group info protected by css_set_lock */
 -      struct css_set *cgroups;
 +      struct css_set __rcu *cgroups;
        /* cg_list protected by css_set_lock and tsk->alloc_lock */
        struct list_head cg_list;
  #endif
        struct futex_pi_state *pi_state_cache;
  #endif
  #ifdef CONFIG_PERF_EVENTS
-       struct perf_event_context *perf_event_ctxp;
+       struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
        struct mutex perf_event_mutex;
        struct list_head perf_event_list;
  #endif
@@@ -1742,7 -1747,7 +1749,7 @@@ extern void thread_group_times(struct t
  #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
  #define used_math() tsk_used_math(current)
  
 -#ifdef CONFIG_TREE_PREEMPT_RCU
 +#ifdef CONFIG_PREEMPT_RCU
  
  #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
  #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
@@@ -1751,9 -1756,7 +1758,9 @@@ static inline void rcu_copy_process(str
  {
        p->rcu_read_lock_nesting = 0;
        p->rcu_read_unlock_special = 0;
 +#ifdef CONFIG_TREE_PREEMPT_RCU
        p->rcu_blocked_node = NULL;
 +#endif
        INIT_LIST_HEAD(&p->rcu_node_entry);
  }
  
diff --combined init/Kconfig
@@@ -21,6 -21,13 +21,13 @@@ config CONSTRUCTOR
        depends on !UML
        default y
  
+ config HAVE_IRQ_WORK
+       bool
+ config IRQ_WORK
+       bool
+       depends on HAVE_IRQ_WORK
  menu "General setup"
  
  config EXPERIMENTAL
@@@ -340,7 -347,6 +347,7 @@@ choic
  
  config TREE_RCU
        bool "Tree-based hierarchical RCU"
 +      depends on !PREEMPT && SMP
        help
          This option selects the RCU implementation that is
          designed for very large SMP system with hundreds or
          smaller systems.
  
  config TREE_PREEMPT_RCU
 -      bool "Preemptable tree-based hierarchical RCU"
 +      bool "Preemptible tree-based hierarchical RCU"
        depends on PREEMPT
        help
          This option selects the RCU implementation that is
@@@ -366,22 -372,8 +373,22 @@@ config TINY_RC
          is not required.  This option greatly reduces the
          memory footprint of RCU.
  
 +config TINY_PREEMPT_RCU
 +      bool "Preemptible UP-only small-memory-footprint RCU"
 +      depends on !SMP && PREEMPT
 +      help
 +        This option selects the RCU implementation that is designed
 +        for real-time UP systems.  This option greatly reduces the
 +        memory footprint of RCU.
 +
  endchoice
  
 +config PREEMPT_RCU
 +      def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU )
 +      help
 +        This option enables preemptible-RCU code that is common between
 +        the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
 +
  config RCU_TRACE
        bool "Enable tracing for RCU"
        depends on TREE_RCU || TREE_PREEMPT_RCU
@@@ -402,12 -394,9 +409,12 @@@ config RCU_FANOU
        help
          This option controls the fanout of hierarchical implementations
          of RCU, allowing RCU to work efficiently on machines with
 -        large numbers of CPUs.  This value must be at least the cube
 -        root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
 -        systems and up to 262,144 for 64-bit systems.
 +        large numbers of CPUs.  This value must be at least the fourth
 +        root of NR_CPUS, which allows NR_CPUS to be insanely large.
 +        The default value of RCU_FANOUT should be used for production
 +        systems, but if you are stress-testing the RCU implementation
 +        itself, small RCU_FANOUT values allow you to test large-system
 +        code paths on small(er) systems.
  
          Select a specific number if testing RCU itself.
          Take the default if unsure.
@@@ -1005,6 -994,7 +1012,7 @@@ config PERF_EVENT
        default y if (PROFILING || PERF_COUNTERS)
        depends on HAVE_PERF_EVENTS
        select ANON_INODES
+       select IRQ_WORK
        help
          Enable kernel support for various performance events provided
          by software and hardware.
diff --combined kernel/Makefile
@@@ -10,7 -10,7 +10,7 @@@ obj-y     = sched.o fork.o exec_domain.
            kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
-           async.o range.o
+           async.o range.o jump_label.o
  obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
  obj-y += groups.o
  
@@@ -23,6 -23,7 +23,7 @@@ CFLAGS_REMOVE_rtmutex-debug.o = -p
  CFLAGS_REMOVE_cgroup-debug.o = -pg
  CFLAGS_REMOVE_sched_clock.o = -pg
  CFLAGS_REMOVE_perf_event.o = -pg
+ CFLAGS_REMOVE_irq_work.o = -pg
  endif
  
  obj-$(CONFIG_FREEZER) += freezer.o
@@@ -86,7 -87,6 +87,7 @@@ obj-$(CONFIG_TREE_RCU) += rcutree.
  obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
  obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
  obj-$(CONFIG_TINY_RCU) += rcutiny.o
 +obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o
  obj-$(CONFIG_RELAY) += relay.o
  obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
  obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@@ -101,6 -101,7 +102,7 @@@ obj-$(CONFIG_TRACING) += trace
  obj-$(CONFIG_X86_DS) += trace/
  obj-$(CONFIG_RING_BUFFER) += trace/
  obj-$(CONFIG_SMP) += sched_cpupri.o
+ obj-$(CONFIG_IRQ_WORK) += irq_work.o
  obj-$(CONFIG_PERF_EVENTS) += perf_event.o
  obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
  obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
diff --combined kernel/perf_event.c
  #include <linux/kernel_stat.h>
  #include <linux/perf_event.h>
  #include <linux/ftrace_event.h>
  
  #include <asm/irq_regs.h>
  
- /*
-  * Each CPU has a list of per CPU events:
-  */
- static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
- int perf_max_events __read_mostly = 1;
- static int perf_reserved_percpu __read_mostly;
- static int perf_overcommit __read_mostly = 1;
- static atomic_t nr_events __read_mostly;
+ atomic_t perf_task_events __read_mostly;
  static atomic_t nr_mmap_events __read_mostly;
  static atomic_t nr_comm_events __read_mostly;
  static atomic_t nr_task_events __read_mostly;
  
+ static LIST_HEAD(pmus);
+ static DEFINE_MUTEX(pmus_lock);
+ static struct srcu_struct pmus_srcu;
  /*
   * perf event paranoia level:
   *  -1 - not paranoid at all
@@@ -67,36 -61,43 +61,43 @@@ int sysctl_perf_event_sample_rate __rea
  
  static atomic64_t perf_event_id;
  
- /*
-  * Lock for (sysadmin-configurable) event reservations:
-  */
- static DEFINE_SPINLOCK(perf_resource_lock);
+ void __weak perf_event_print_debug(void)      { }
  
- /*
-  * Architecture provided APIs - weak aliases:
-  */
- extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
+ extern __weak const char *perf_pmu_name(void)
  {
-       return NULL;
+       return "pmu";
  }
  
- void __weak hw_perf_disable(void)             { barrier(); }
- void __weak hw_perf_enable(void)              { barrier(); }
- void __weak perf_event_print_debug(void)      { }
- static DEFINE_PER_CPU(int, perf_disable_count);
+ void perf_pmu_disable(struct pmu *pmu)
+ {
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!(*count)++)
+               pmu->pmu_disable(pmu);
+ }
  
- void perf_disable(void)
+ void perf_pmu_enable(struct pmu *pmu)
  {
-       if (!__get_cpu_var(perf_disable_count)++)
-               hw_perf_disable();
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!--(*count))
+               pmu->pmu_enable(pmu);
  }
  
- void perf_enable(void)
+ static DEFINE_PER_CPU(struct list_head, rotation_list);
+ /*
+  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+  * because they're strictly cpu affine and rotate_start is called with IRQs
+  * disabled, while rotate_context is called from IRQ context.
+  */
+ static void perf_pmu_rotate_start(struct pmu *pmu)
  {
-       if (!--__get_cpu_var(perf_disable_count))
-               hw_perf_enable();
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       struct list_head *head = &__get_cpu_var(rotation_list);
+       WARN_ON(!irqs_disabled());
+       if (list_empty(&cpuctx->rotation_list))
+               list_add(&cpuctx->rotation_list, head);
  }
  
  static void get_ctx(struct perf_event_context *ctx)
@@@ -151,13 -152,13 +152,13 @@@ static u64 primary_event_id(struct perf
   * the context could get moved to another task.
   */
  static struct perf_event_context *
- perf_lock_task_context(struct task_struct *task, unsigned long *flags)
+ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
  {
        struct perf_event_context *ctx;
  
        rcu_read_lock();
-  retry:
-       ctx = rcu_dereference(task->perf_event_ctxp);
+ retry:
+       ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
        if (ctx) {
                /*
                 * If this context is a clone of another, it might
                 * can't get swapped on us any more.
                 */
                raw_spin_lock_irqsave(&ctx->lock, *flags);
-               if (ctx != rcu_dereference(task->perf_event_ctxp)) {
+               if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
                        raw_spin_unlock_irqrestore(&ctx->lock, *flags);
                        goto retry;
                }
   * can't get swapped to another task.  This also increments its
   * reference count so that the context can't get freed.
   */
- static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
+ static struct perf_event_context *
+ perf_pin_task_context(struct task_struct *task, int ctxn)
  {
        struct perf_event_context *ctx;
        unsigned long flags;
  
-       ctx = perf_lock_task_context(task, &flags);
+       ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                ++ctx->pin_count;
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@@ -302,6 -304,8 +304,8 @@@ list_add_event(struct perf_event *event
        }
  
        list_add_rcu(&event->event_entry, &ctx->event_list);
+       if (!ctx->nr_events)
+               perf_pmu_rotate_start(ctx->pmu);
        ctx->nr_events++;
        if (event->attr.inherit_stat)
                ctx->nr_stat++;
@@@ -311,7 -315,12 +315,12 @@@ static void perf_group_attach(struct pe
  {
        struct perf_event *group_leader = event->group_leader;
  
-       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
+       /*
+        * We can have double attach due to group movement in perf_event_open.
+        */
+       if (event->attach_state & PERF_ATTACH_GROUP)
+               return;
        event->attach_state |= PERF_ATTACH_GROUP;
  
        if (group_leader == event)
@@@ -408,8 -417,8 +417,8 @@@ event_filter_match(struct perf_event *e
        return event->cpu == -1 || event->cpu == smp_processor_id();
  }
  
- static void
- event_sched_out(struct perf_event *event,
+ static int
+ __event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
  {
        }
  
        if (event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+               return 0;
  
        event->state = PERF_EVENT_STATE_INACTIVE;
        if (event->pending_disable) {
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
-       event->tstamp_stopped = ctx->time;
-       event->pmu->disable(event);
+       event->pmu->del(event, 0);
        event->oncpu = -1;
  
        if (!is_software_event(event))
        ctx->nr_active--;
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
+       return 1;
+ }
+ static void
+ event_sched_out(struct perf_event *event,
+                 struct perf_cpu_context *cpuctx,
+                 struct perf_event_context *ctx)
+ {
+       int ret;
+       ret = __event_sched_out(event, cpuctx, ctx);
+       if (ret)
+               event->tstamp_stopped = ctx->time;
  }
  
  static void
@@@ -466,6 -487,12 +487,12 @@@ group_sched_out(struct perf_event *grou
                cpuctx->exclusive = 0;
  }
  
+ static inline struct perf_cpu_context *
+ __get_cpu_context(struct perf_event_context *ctx)
+ {
+       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+ }
  /*
   * Cross CPU call to remove a performance event
   *
   */
  static void __perf_event_remove_from_context(void *info)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        /*
         * If this is a task context, we need to check whether it is
                return;
  
        raw_spin_lock(&ctx->lock);
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level.
-        */
-       perf_disable();
  
        event_sched_out(event, cpuctx, ctx);
  
        list_del_event(event, ctx);
  
-       if (!ctx->task) {
-               /*
-                * Allow more per task events with respect to the
-                * reservation:
-                */
-               cpuctx->max_pertask =
-                       min(perf_max_events - ctx->nr_events,
-                           perf_max_events - perf_reserved_percpu);
-       }
-       perf_enable();
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -572,8 -583,8 +583,8 @@@ retry
  static void __perf_event_disable(void *info)
  {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        /*
         * If this is a per-task event, need to check whether this
@@@ -628,7 -639,7 +639,7 @@@ void perf_event_disable(struct perf_eve
                return;
        }
  
-  retry:
+ retry:
        task_oncpu_function_call(task, __perf_event_disable, event);
  
        raw_spin_lock_irq(&ctx->lock);
  }
  
  static int
- event_sched_in(struct perf_event *event,
+ __event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
  {
         */
        smp_wmb();
  
-       if (event->pmu->enable(event)) {
+       if (event->pmu->add(event, PERF_EF_START)) {
                event->state = PERF_EVENT_STATE_INACTIVE;
                event->oncpu = -1;
                return -EAGAIN;
        }
  
-       event->tstamp_running += ctx->time - event->tstamp_stopped;
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        ctx->nr_active++;
        return 0;
  }
  
+ static inline int
+ event_sched_in(struct perf_event *event,
+                struct perf_cpu_context *cpuctx,
+                struct perf_event_context *ctx)
+ {
+       int ret = __event_sched_in(event, cpuctx, ctx);
+       if (ret)
+               return ret;
+       event->tstamp_running += ctx->time - event->tstamp_stopped;
+       return 0;
+ }
+ static void
+ group_commit_event_sched_in(struct perf_event *group_event,
+              struct perf_cpu_context *cpuctx,
+              struct perf_event_context *ctx)
+ {
+       struct perf_event *event;
+       u64 now = ctx->time;
+       group_event->tstamp_running += now - group_event->tstamp_stopped;
+       /*
+        * Schedule in siblings as one group (if any):
+        */
+       list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+               event->tstamp_running += now - event->tstamp_stopped;
+       }
+ }
  static int
  group_sched_in(struct perf_event *group_event,
               struct perf_cpu_context *cpuctx,
               struct perf_event_context *ctx)
  {
        struct perf_event *event, *partial_group = NULL;
-       const struct pmu *pmu = group_event->pmu;
-       bool txn = false;
+       struct pmu *pmu = group_event->pmu;
  
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
  
-       /* Check if group transaction availabe */
-       if (pmu->start_txn)
-               txn = true;
+       pmu->start_txn(pmu);
  
-       if (txn)
-               pmu->start_txn(pmu);
-       if (event_sched_in(group_event, cpuctx, ctx)) {
-               if (txn)
-                       pmu->cancel_txn(pmu);
+       /*
+        * use __event_sched_in() to delay updating tstamp_running
+        * until the transaction is committed. In case of failure
+        * we will keep an unmodified tstamp_running which is a
+        * requirement to get correct timing information
+        */
+       if (__event_sched_in(group_event, cpuctx, ctx)) {
+               pmu->cancel_txn(pmu);
                return -EAGAIN;
        }
  
         * Schedule in siblings as one group (if any):
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
-               if (event_sched_in(event, cpuctx, ctx)) {
+               if (__event_sched_in(event, cpuctx, ctx)) {
                        partial_group = event;
                        goto group_error;
                }
        }
  
-       if (!txn || !pmu->commit_txn(pmu))
+       if (!pmu->commit_txn(pmu)) {
+               /* commit tstamp_running */
+               group_commit_event_sched_in(group_event, cpuctx, ctx);
                return 0;
+       }
  group_error:
        /*
         * Groups can be scheduled in as one unit only, so undo any
         * partial group before returning:
+        *
+        * use __event_sched_out() to avoid updating tstamp_stopped
+        * because the event never actually ran
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
                if (event == partial_group)
                        break;
-               event_sched_out(event, cpuctx, ctx);
+               __event_sched_out(event, cpuctx, ctx);
        }
-       event_sched_out(group_event, cpuctx, ctx);
+       __event_sched_out(group_event, cpuctx, ctx);
  
-       if (txn)
-               pmu->cancel_txn(pmu);
+       pmu->cancel_txn(pmu);
  
        return -EAGAIN;
  }
@@@ -789,10 -830,10 +830,10 @@@ static void add_event_to_ctx(struct per
   */
  static void __perf_install_in_context(void *info)
  {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
  
        /*
        ctx->is_active = 1;
        update_context_time(ctx);
  
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level. NOP for non NMI based events.
-        */
-       perf_disable();
        add_event_to_ctx(event, ctx);
  
        if (event->cpu != -1 && event->cpu != smp_processor_id())
                }
        }
  
-       if (!err && !ctx->task && cpuctx->max_pertask)
-               cpuctx->max_pertask--;
-  unlock:
-       perf_enable();
+ unlock:
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -883,6 -913,8 +913,8 @@@ perf_install_in_context(struct perf_eve
  {
        struct task_struct *task = ctx->task;
  
+       event->ctx = ctx;
        if (!task) {
                /*
                 * Per cpu events are installed via an smp call and
@@@ -931,10 -963,12 +963,12 @@@ static void __perf_event_mark_enabled(s
  
        event->state = PERF_EVENT_STATE_INACTIVE;
        event->tstamp_enabled = ctx->time - event->total_time_enabled;
-       list_for_each_entry(sub, &event->sibling_list, group_entry)
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
                        sub->tstamp_enabled =
                                ctx->time - sub->total_time_enabled;
+               }
+       }
  }
  
  /*
  static void __perf_event_enable(void *info)
  {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
  
        /*
        if (!group_can_go_on(event, cpuctx, 1)) {
                err = -EEXIST;
        } else {
-               perf_disable();
                if (event == leader)
                        err = group_sched_in(event, cpuctx, ctx);
                else
                        err = event_sched_in(event, cpuctx, ctx);
-               perf_enable();
        }
  
        if (err) {
                }
        }
  
-  unlock:
+ unlock:
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -1041,7 -1073,7 +1073,7 @@@ void perf_event_enable(struct perf_even
        if (event->state == PERF_EVENT_STATE_ERROR)
                event->state = PERF_EVENT_STATE_OFF;
  
-  retry:
+ retry:
        raw_spin_unlock_irq(&ctx->lock);
        task_oncpu_function_call(task, __perf_event_enable, event);
  
        if (event->state == PERF_EVENT_STATE_OFF)
                __perf_event_mark_enabled(event, ctx);
  
-  out:
+ out:
        raw_spin_unlock_irq(&ctx->lock);
  }
  
@@@ -1092,26 -1124,26 +1124,26 @@@ static void ctx_sched_out(struct perf_e
        struct perf_event *event;
  
        raw_spin_lock(&ctx->lock);
+       perf_pmu_disable(ctx->pmu);
        ctx->is_active = 0;
        if (likely(!ctx->nr_events))
                goto out;
        update_context_time(ctx);
  
-       perf_disable();
        if (!ctx->nr_active)
-               goto out_enable;
+               goto out;
  
-       if (event_type & EVENT_PINNED)
+       if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
+       }
  
-       if (event_type & EVENT_FLEXIBLE)
+       if (event_type & EVENT_FLEXIBLE) {
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
-  out_enable:
-       perf_enable();
-  out:
+       }
+ out:
+       perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -1209,34 -1241,25 +1241,25 @@@ static void perf_event_sync_stat(struc
        }
  }
  
- /*
-  * Called from scheduler to remove the events of the current task,
-  * with interrupts disabled.
-  *
-  * We stop each event and update the event value in event->count.
-  *
-  * This does not protect us against NMI, but disable()
-  * sets the disabled bit in the control field of event _before_
-  * accessing the event control register. If a NMI hits, then it will
-  * not restart the event.
-  */
- void perf_event_task_sched_out(struct task_struct *task,
-                                struct task_struct *next)
+ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+                                 struct task_struct *next)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
        struct perf_event_context *next_ctx;
        struct perf_event_context *parent;
+       struct perf_cpu_context *cpuctx;
        int do_switch = 1;
  
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+       if (likely(!ctx))
+               return;
  
-       if (likely(!ctx || !cpuctx->task_ctx))
+       cpuctx = __get_cpu_context(ctx);
+       if (!cpuctx->task_ctx)
                return;
  
        rcu_read_lock();
        parent = rcu_dereference(ctx->parent_ctx);
-       next_ctx = next->perf_event_ctxp;
+       next_ctx = next->perf_event_ctxp[ctxn];
        if (parent && next_ctx &&
            rcu_dereference(next_ctx->parent_ctx) == parent) {
                /*
                         * XXX do we need a memory barrier of sorts
                         * wrt to rcu_dereference() of perf_event_ctxp
                         */
-                       task->perf_event_ctxp = next_ctx;
-                       next->perf_event_ctxp = ctx;
+                       task->perf_event_ctxp[ctxn] = next_ctx;
+                       next->perf_event_ctxp[ctxn] = ctx;
                        ctx->task = next;
                        next_ctx->task = task;
                        do_switch = 0;
        }
  }
  
+ #define for_each_task_context_nr(ctxn)                                        \
+       for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
+ /*
+  * Called from scheduler to remove the events of the current task,
+  * with interrupts disabled.
+  *
+  * We stop each event and update the event value in event->count.
+  *
+  * This does not protect us against NMI, but disable()
+  * sets the disabled bit in the control field of event _before_
+  * accessing the event control register. If a NMI hits, then it will
+  * not restart the event.
+  */
+ void __perf_event_task_sched_out(struct task_struct *task,
+                                struct task_struct *next)
+ {
+       int ctxn;
+       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+       for_each_task_context_nr(ctxn)
+               perf_event_context_sched_out(task, ctxn, next);
+ }
  static void task_ctx_sched_out(struct perf_event_context *ctx,
                               enum event_type_t event_type)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        if (!cpuctx->task_ctx)
                return;
  /*
   * Called with IRQs disabled
   */
- static void __perf_event_task_sched_out(struct perf_event_context *ctx)
- {
-       task_ctx_sched_out(ctx, EVENT_ALL);
- }
- /*
-  * Called with IRQs disabled
-  */
  static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
                              enum event_type_t event_type)
  {
@@@ -1350,9 -1390,10 +1390,10 @@@ ctx_flexible_sched_in(struct perf_event
                if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
  
-               if (group_can_go_on(event, cpuctx, can_add_hw))
+               if (group_can_go_on(event, cpuctx, can_add_hw)) {
                        if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
+               }
        }
  }
  
@@@ -1368,8 -1409,6 +1409,6 @@@ ctx_sched_in(struct perf_event_context 
  
        ctx->timestamp = perf_clock();
  
-       perf_disable();
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
        if (event_type & EVENT_FLEXIBLE)
                ctx_flexible_sched_in(ctx, cpuctx);
  
-       perf_enable();
-  out:
+ out:
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -1394,43 -1432,28 +1432,28 @@@ static void cpu_ctx_sched_in(struct per
        ctx_sched_in(ctx, cpuctx, event_type);
  }
  
- static void task_ctx_sched_in(struct task_struct *task,
+ static void task_ctx_sched_in(struct perf_event_context *ctx,
                              enum event_type_t event_type)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_cpu_context *cpuctx;
  
-       if (likely(!ctx))
-               return;
+               cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
        ctx_sched_in(ctx, cpuctx, event_type);
        cpuctx->task_ctx = ctx;
  }
- /*
-  * Called from scheduler to add the events of the current task
-  * with interrupts disabled.
-  *
-  * We restore the event value and then enable it.
-  *
-  * This does not protect us against NMI, but enable()
-  * sets the enabled bit in the control field of event _before_
-  * accessing the event control register. If a NMI hits, then it will
-  * keep the event running.
-  */
- void perf_event_task_sched_in(struct task_struct *task)
- {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
  
-       if (likely(!ctx))
-               return;
+ void perf_event_context_sched_in(struct perf_event_context *ctx)
+ {
+       struct perf_cpu_context *cpuctx;
  
+       cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
  
-       perf_disable();
+       perf_pmu_disable(ctx->pmu);
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
  
        cpuctx->task_ctx = ctx;
  
-       perf_enable();
+       /*
+        * Since these rotations are per-cpu, we need to ensure the
+        * cpu-context we got scheduled on is actually rotating.
+        */
+       perf_pmu_rotate_start(ctx->pmu);
+       perf_pmu_enable(ctx->pmu);
+ }
+ /*
+  * Called from scheduler to add the events of the current task
+  * with interrupts disabled.
+  *
+  * We restore the event value and then enable it.
+  *
+  * This does not protect us against NMI, but enable()
+  * sets the enabled bit in the control field of event _before_
+  * accessing the event control register. If a NMI hits, then it will
+  * keep the event running.
+  */
+ void __perf_event_task_sched_in(struct task_struct *task)
+ {
+       struct perf_event_context *ctx;
+       int ctxn;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (likely(!ctx))
+                       continue;
+               perf_event_context_sched_in(ctx);
+       }
  }
  
  #define MAX_INTERRUPTS (~0ULL)
@@@ -1524,22 -1577,6 +1577,6 @@@ do {                                   
        return div64_u64(dividend, divisor);
  }
  
- static void perf_event_stop(struct perf_event *event)
- {
-       if (!event->pmu->stop)
-               return event->pmu->disable(event);
-       return event->pmu->stop(event);
- }
- static int perf_event_start(struct perf_event *event)
- {
-       if (!event->pmu->start)
-               return event->pmu->enable(event);
-       return event->pmu->start(event);
- }
  static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
  {
        struct hw_perf_event *hwc = &event->hw;
        hwc->sample_period = sample_period;
  
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               perf_disable();
-               perf_event_stop(event);
+               event->pmu->stop(event, PERF_EF_UPDATE);
                local64_set(&hwc->period_left, 0);
-               perf_event_start(event);
-               perf_enable();
+               event->pmu->start(event, PERF_EF_RELOAD);
        }
  }
  
- static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
+ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
  {
        struct perf_event *event;
        struct hw_perf_event *hwc;
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
-                       perf_disable();
-                       event->pmu->unthrottle(event);
-                       perf_enable();
+                       event->pmu->start(event, 0);
                }
  
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
  
-               perf_disable();
                event->pmu->read(event);
                now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
                hwc->freq_count_stamp = now;
  
                if (delta > 0)
-                       perf_adjust_period(event, TICK_NSEC, delta);
-               perf_enable();
+                       perf_adjust_period(event, period, delta);
        }
        raw_spin_unlock(&ctx->lock);
  }
@@@ -1626,32 -1657,38 +1657,38 @@@ static void rotate_ctx(struct perf_even
        raw_spin_unlock(&ctx->lock);
  }
  
- void perf_event_task_tick(struct task_struct *curr)
+ /*
+  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+  * because they're strictly cpu affine and rotate_start is called with IRQs
+  * disabled, while rotate_context is called from IRQ context.
+  */
+ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
  {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       int rotate = 0;
-       if (!atomic_read(&nr_events))
-               return;
+       u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
+       struct perf_event_context *ctx = NULL;
+       int rotate = 0, remove = 1;
  
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-       if (cpuctx->ctx.nr_events &&
-           cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
-               rotate = 1;
+       if (cpuctx->ctx.nr_events) {
+               remove = 0;
+               if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+                       rotate = 1;
+       }
  
-       ctx = curr->perf_event_ctxp;
-       if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
-               rotate = 1;
+       ctx = cpuctx->task_ctx;
+       if (ctx && ctx->nr_events) {
+               remove = 0;
+               if (ctx->nr_events != ctx->nr_active)
+                       rotate = 1;
+       }
  
-       perf_ctx_adjust_freq(&cpuctx->ctx);
+       perf_pmu_disable(cpuctx->ctx.pmu);
+       perf_ctx_adjust_freq(&cpuctx->ctx, interval);
        if (ctx)
-               perf_ctx_adjust_freq(ctx);
+               perf_ctx_adjust_freq(ctx, interval);
  
        if (!rotate)
-               return;
+               goto done;
  
-       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
  
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-       perf_enable();
+               task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
+ done:
+       if (remove)
+               list_del_init(&cpuctx->rotation_list);
+       perf_pmu_enable(cpuctx->ctx.pmu);
+ }
+ void perf_event_task_tick(void)
+ {
+       struct list_head *head = &__get_cpu_var(rotation_list);
+       struct perf_cpu_context *cpuctx, *tmp;
+       WARN_ON(!irqs_disabled());
+       list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+               if (cpuctx->jiffies_interval == 1 ||
+                               !(jiffies % cpuctx->jiffies_interval))
+                       perf_rotate_context(cpuctx);
+       }
  }
  
  static int event_enable_on_exec(struct perf_event *event,
   * Enable all of a task's events that have been marked enable-on-exec.
   * This expects task == current.
   */
- static void perf_event_enable_on_exec(struct task_struct *task)
+ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
  {
-       struct perf_event_context *ctx;
        struct perf_event *event;
        unsigned long flags;
        int enabled = 0;
        int ret;
  
        local_irq_save(flags);
        if (!ctx || !ctx->nr_events)
                goto out;
  
-       __perf_event_task_sched_out(ctx);
+       task_ctx_sched_out(ctx, EVENT_ALL);
  
        raw_spin_lock(&ctx->lock);
  
  
        raw_spin_unlock(&ctx->lock);
  
-       perf_event_task_sched_in(task);
-  out:
+       perf_event_context_sched_in(ctx);
+ out:
        local_irq_restore(flags);
  }
  
   */
  static void __perf_event_read(void *info)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
        /*
         * If this is a task context, we need to check whether it is
@@@ -1773,7 -1827,13 +1827,13 @@@ static u64 perf_event_read(struct perf_
                unsigned long flags;
  
                raw_spin_lock_irqsave(&ctx->lock, flags);
-               update_context_time(ctx);
+               /*
+                * may read while context is not active
+                * (e.g., thread is blocked), in that case
+                * we cannot update context time
+                */
+               if (ctx->is_active)
+                       update_context_time(ctx);
                update_event_times(event);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
  }
  
  /*
-  * Initialize the perf_event context in a task_struct:
+  * Callchain support
   */
+ struct callchain_cpus_entries {
+       struct rcu_head                 rcu_head;
+       struct perf_callchain_entry     *cpu_entries[0];
+ };
+ static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+ static atomic_t nr_callchain_events;
+ static DEFINE_MUTEX(callchain_mutex);
+ struct callchain_cpus_entries *callchain_cpus_entries;
+ __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs)
+ {
+ }
+ __weak void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs)
+ {
+ }
+ static void release_callchain_buffers_rcu(struct rcu_head *head)
+ {
+       struct callchain_cpus_entries *entries;
+       int cpu;
+       entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+ }
+ static void release_callchain_buffers(void)
+ {
+       struct callchain_cpus_entries *entries;
+       entries = callchain_cpus_entries;
+       rcu_assign_pointer(callchain_cpus_entries, NULL);
+       call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+ }
+ static int alloc_callchain_buffers(void)
+ {
+       int cpu;
+       int size;
+       struct callchain_cpus_entries *entries;
+       /*
+        * We can't use the percpu allocation API for data that can be
+        * accessed from NMI. Use a temporary manual per cpu allocation
+        * until that gets sorted out.
+        */
+       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
+               num_possible_cpus();
+       entries = kzalloc(size, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+       size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+       for_each_possible_cpu(cpu) {
+               entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+                                                        cpu_to_node(cpu));
+               if (!entries->cpu_entries[cpu])
+                       goto fail;
+       }
+       rcu_assign_pointer(callchain_cpus_entries, entries);
+       return 0;
+ fail:
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+       return -ENOMEM;
+ }
+ static int get_callchain_buffers(void)
+ {
+       int err = 0;
+       int count;
+       mutex_lock(&callchain_mutex);
+       count = atomic_inc_return(&nr_callchain_events);
+       if (WARN_ON_ONCE(count < 1)) {
+               err = -EINVAL;
+               goto exit;
+       }
+       if (count > 1) {
+               /* If the allocation failed, give up */
+               if (!callchain_cpus_entries)
+                       err = -ENOMEM;
+               goto exit;
+       }
+       err = alloc_callchain_buffers();
+       if (err)
+               release_callchain_buffers();
+ exit:
+       mutex_unlock(&callchain_mutex);
+       return err;
+ }
+ static void put_callchain_buffers(void)
+ {
+       if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+               release_callchain_buffers();
+               mutex_unlock(&callchain_mutex);
+       }
+ }
+ static int get_recursion_context(int *recursion)
+ {
+       int rctx;
+       if (in_nmi())
+               rctx = 3;
+       else if (in_irq())
+               rctx = 2;
+       else if (in_softirq())
+               rctx = 1;
+       else
+               rctx = 0;
+       if (recursion[rctx])
+               return -1;
+       recursion[rctx]++;
+       barrier();
+       return rctx;
+ }
+ static inline void put_recursion_context(int *recursion, int rctx)
+ {
+       barrier();
+       recursion[rctx]--;
+ }
+ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+ {
+       int cpu;
+       struct callchain_cpus_entries *entries;
+       *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+       if (*rctx == -1)
+               return NULL;
+       entries = rcu_dereference(callchain_cpus_entries);
+       if (!entries)
+               return NULL;
+       cpu = smp_processor_id();
+       return &entries->cpu_entries[cpu][*rctx];
+ }
  static void
- __perf_event_init_context(struct perf_event_context *ctx,
-                           struct task_struct *task)
+ put_callchain_entry(int rctx)
+ {
+       put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+ }
+ static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+ {
+       int rctx;
+       struct perf_callchain_entry *entry;
+       entry = get_callchain_entry(&rctx);
+       if (rctx == -1)
+               return NULL;
+       if (!entry)
+               goto exit_put;
+       entry->nr = 0;
+       if (!user_mode(regs)) {
+               perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+               perf_callchain_kernel(entry, regs);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+       if (regs) {
+               perf_callchain_store(entry, PERF_CONTEXT_USER);
+               perf_callchain_user(entry, regs);
+       }
+ exit_put:
+       put_callchain_entry(rctx);
+       return entry;
+ }
+ /*
+  * Initialize the perf_event context in a task_struct:
+  */
+ static void __perf_event_init_context(struct perf_event_context *ctx)
  {
        raw_spin_lock_init(&ctx->lock);
        mutex_init(&ctx->mutex);
        INIT_LIST_HEAD(&ctx->flexible_groups);
        INIT_LIST_HEAD(&ctx->event_list);
        atomic_set(&ctx->refcount, 1);
-       ctx->task = task;
  }
  
- static struct perf_event_context *find_get_context(pid_t pid, int cpu)
+ static struct perf_event_context *
+ alloc_perf_context(struct pmu *pmu, struct task_struct *task)
  {
        struct perf_event_context *ctx;
-       struct perf_cpu_context *cpuctx;
-       struct task_struct *task;
-       unsigned long flags;
-       int err;
-       if (pid == -1 && cpu != -1) {
-               /* Must be root to operate on a CPU event: */
-               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
-                       return ERR_PTR(-EACCES);
  
-               if (cpu < 0 || cpu >= nr_cpumask_bits)
-                       return ERR_PTR(-EINVAL);
+       ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
  
-               /*
-                * We could be clever and allow to attach a event to an
-                * offline CPU and activate it when the CPU comes up, but
-                * that's for later.
-                */
-               if (!cpu_online(cpu))
-                       return ERR_PTR(-ENODEV);
+       __perf_event_init_context(ctx);
+       if (task) {
+               ctx->task = task;
+               get_task_struct(task);
+       }
+       ctx->pmu = pmu;
  
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               ctx = &cpuctx->ctx;
-               get_ctx(ctx);
+       return ctx;
+ }
  
-               return ctx;
-       }
+ static struct task_struct *
+ find_lively_task_by_vpid(pid_t vpid)
+ {
+       struct task_struct *task;
+       int err;
  
        rcu_read_lock();
-       if (!pid)
+       if (!vpid)
                task = current;
        else
-               task = find_task_by_vpid(pid);
+               task = find_task_by_vpid(vpid);
        if (task)
                get_task_struct(task);
        rcu_read_unlock();
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
                goto errout;
  
-  retry:
-       ctx = perf_lock_task_context(task, &flags);
+       return task;
+ errout:
+       put_task_struct(task);
+       return ERR_PTR(err);
+ }
+ static struct perf_event_context *
+ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+ {
+       struct perf_event_context *ctx;
+       struct perf_cpu_context *cpuctx;
+       unsigned long flags;
+       int ctxn, err;
+       if (!task && cpu != -1) {
+               /* Must be root to operate on a CPU event: */
+               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+                       return ERR_PTR(-EACCES);
+               if (cpu < 0 || cpu >= nr_cpumask_bits)
+                       return ERR_PTR(-EINVAL);
+               /*
+                * We could be clever and allow to attach a event to an
+                * offline CPU and activate it when the CPU comes up, but
+                * that's for later.
+                */
+               if (!cpu_online(cpu))
+                       return ERR_PTR(-ENODEV);
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               ctx = &cpuctx->ctx;
+               get_ctx(ctx);
+               return ctx;
+       }
+       err = -EINVAL;
+       ctxn = pmu->task_ctx_nr;
+       if (ctxn < 0)
+               goto errout;
+ retry:
+       ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                unclone_ctx(ctx);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
  
        if (!ctx) {
-               ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+               ctx = alloc_perf_context(pmu, task);
                err = -ENOMEM;
                if (!ctx)
                        goto errout;
-               __perf_event_init_context(ctx, task);
                get_ctx(ctx);
-               if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
+               if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
                        /*
                         * We raced with some other task; use
                         * the context they set.
                         */
+                       put_task_struct(task);
                        kfree(ctx);
                        goto retry;
                }
-               get_task_struct(task);
        }
  
-       put_task_struct(task);
        return ctx;
  
-  errout:
-       put_task_struct(task);
+ errout:
        return ERR_PTR(err);
  }
  
@@@ -1898,21 -2201,23 +2201,23 @@@ static void free_event_rcu(struct rcu_h
        kfree(event);
  }
  
- static void perf_pending_sync(struct perf_event *event);
  static void perf_buffer_put(struct perf_buffer *buffer);
  
  static void free_event(struct perf_event *event)
  {
-       perf_pending_sync(event);
+       irq_work_sync(&event->pending);
  
        if (!event->parent) {
-               atomic_dec(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_dec(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_dec(&nr_mmap_events);
                if (event->attr.comm)
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
        }
  
        if (event->buffer) {
        if (event->destroy)
                event->destroy(event);
  
-       put_ctx(event->ctx);
+       if (event->ctx)
+               put_ctx(event->ctx);
        call_rcu(&event->rcu_head, free_event_rcu);
  }
  
@@@ -2202,13 -2509,15 +2509,13 @@@ static void perf_event_for_each(struct 
  static int perf_event_period(struct perf_event *event, u64 __user *arg)
  {
        struct perf_event_context *ctx = event->ctx;
 -      unsigned long size;
        int ret = 0;
        u64 value;
  
        if (!event->attr.sample_period)
                return -EINVAL;
  
 -      size = copy_from_user(&value, arg, sizeof(value));
 -      if (size != sizeof(value))
 +      if (copy_from_user(&value, arg, sizeof(value)))
                return -EFAULT;
  
        if (!value)
@@@ -2342,6 -2651,9 +2649,9 @@@ int perf_event_task_disable(void
  
  static int perf_event_index(struct perf_event *event)
  {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return 0;
  
@@@ -2845,16 -3157,7 +3155,7 @@@ void perf_event_wakeup(struct perf_even
        }
  }
  
- /*
-  * Pending wakeups
-  *
-  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
-  *
-  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
-  * single linked list and use cmpxchg() to add entries lockless.
-  */
- static void perf_pending_event(struct perf_pending_entry *entry)
+ static void perf_pending_event(struct irq_work *entry)
  {
        struct perf_event *event = container_of(entry,
                        struct perf_event, pending);
        }
  }
  
- #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
- static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
-       PENDING_TAIL,
- };
- static void perf_pending_queue(struct perf_pending_entry *entry,
-                              void (*func)(struct perf_pending_entry *))
- {
-       struct perf_pending_entry **head;
-       if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
-               return;
-       entry->func = func;
-       head = &get_cpu_var(perf_pending_head);
-       do {
-               entry->next = *head;
-       } while (cmpxchg(head, entry->next, entry) != entry->next);
-       set_perf_event_pending();
-       put_cpu_var(perf_pending_head);
- }
- static int __perf_pending_run(void)
- {
-       struct perf_pending_entry *list;
-       int nr = 0;
-       list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
-       while (list != PENDING_TAIL) {
-               void (*func)(struct perf_pending_entry *);
-               struct perf_pending_entry *entry = list;
-               list = list->next;
-               func = entry->func;
-               entry->next = NULL;
-               /*
-                * Ensure we observe the unqueue before we issue the wakeup,
-                * so that we won't be waiting forever.
-                * -- see perf_not_pending().
-                */
-               smp_wmb();
-               func(entry);
-               nr++;
-       }
-       return nr;
- }
- static inline int perf_not_pending(struct perf_event *event)
- {
-       /*
-        * If we flush on whatever cpu we run, there is a chance we don't
-        * need to wait.
-        */
-       get_cpu();
-       __perf_pending_run();
-       put_cpu();
-       /*
-        * Ensure we see the proper queue state before going to sleep
-        * so that we do not miss the wakeup. -- see perf_pending_handle()
-        */
-       smp_rmb();
-       return event->pending.next == NULL;
- }
- static void perf_pending_sync(struct perf_event *event)
- {
-       wait_event(event->waitq, perf_not_pending(event));
- }
- void perf_event_do_pending(void)
- {
-       __perf_pending_run();
- }
- /*
-  * Callchain support -- arch specific
-  */
- __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
- {
-       return NULL;
- }
  /*
   * We assume there is only KVM supporting the callbacks.
   * Later on, we might change it to a list if there is
@@@ -3012,8 -3222,7 +3220,7 @@@ static void perf_output_wakeup(struct p
  
        if (handle->nmi) {
                handle->event->pending_wakeup = 1;
-               perf_pending_queue(&handle->event->pending,
-                                  perf_pending_event);
+               irq_work_queue(&handle->event->pending);
        } else
                perf_event_wakeup(handle->event);
  }
@@@ -3069,7 -3278,7 +3276,7 @@@ again
        if (handle->wakeup != local_read(&buffer->wakeup))
                perf_output_wakeup(handle);
  
-  out:
+ out:
        preempt_enable();
  }
  
@@@ -3457,14 -3666,20 +3664,20 @@@ static void perf_event_output(struct pe
        struct perf_output_handle handle;
        struct perf_event_header header;
  
+       /* protect the callchain buffers */
+       rcu_read_lock();
        perf_prepare_sample(&header, data, event, regs);
  
        if (perf_output_begin(&handle, event, header.size, nmi, 1))
-               return;
+               goto exit;
  
        perf_output_sample(&handle, &header, data, event);
  
        perf_output_end(&handle);
+ exit:
+       rcu_read_unlock();
  }
  
  /*
@@@ -3578,16 -3793,27 +3791,27 @@@ static void perf_event_task_ctx(struct 
  static void perf_event_task_event(struct perf_task_event *task_event)
  {
        struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx = task_event->task_ctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int ctxn;
  
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_task_ctx(&cpuctx->ctx, task_event);
-       if (!ctx)
-               ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_task_ctx(ctx, task_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_task_ctx(&cpuctx->ctx, task_event);
+               ctx = task_event->task_ctx;
+               if (!ctx) {
+                       ctxn = pmu->task_ctx_nr;
+                       if (ctxn < 0)
+                               goto next;
+                       ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               }
+               if (ctx)
+                       perf_event_task_ctx(ctx, task_event);
+ next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
  }
  
@@@ -3692,8 -3918,10 +3916,10 @@@ static void perf_event_comm_event(struc
  {
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
-       unsigned int size;
        char comm[TASK_COMM_LEN];
+       unsigned int size;
+       struct pmu *pmu;
+       int ctxn;
  
        memset(comm, 0, sizeof(comm));
        strlcpy(comm, comm_event->task->comm, sizeof(comm));
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
  
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_comm_ctx(&cpuctx->ctx, comm_event);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_comm_ctx(ctx, comm_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx)
+                       perf_event_comm_ctx(ctx, comm_event);
+ next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
  }
  
  void perf_event_comm(struct task_struct *task)
  {
        struct perf_comm_event comm_event;
+       struct perf_event_context *ctx;
+       int ctxn;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
  
-       if (task->perf_event_ctxp)
-               perf_event_enable_on_exec(task);
+               perf_event_enable_on_exec(ctx);
+       }
  
        if (!atomic_read(&nr_comm_events))
                return;
@@@ -3821,6 -4064,8 +4062,8 @@@ static void perf_event_mmap_event(struc
        char tmp[16];
        char *buf = NULL;
        const char *name;
+       struct pmu *pmu;
+       int ctxn;
  
        memset(tmp, 0, sizeof(tmp));
  
@@@ -3873,12 -4118,23 +4116,23 @@@ got_name
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx) {
+                       perf_event_mmap_ctx(ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+               }
+ next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
  
        kfree(buf);
@@@ -3960,8 -4216,6 +4214,6 @@@ static int __perf_event_overflow(struc
        struct hw_perf_event *hwc = &event->hw;
        int ret = 0;
  
-       throttle = (throttle && event->pmu->unthrottle != NULL);
        if (!throttle) {
                hwc->interrupts++;
        } else {
                event->pending_kill = POLL_HUP;
                if (nmi) {
                        event->pending_disable = 1;
-                       perf_pending_queue(&event->pending,
-                                          perf_pending_event);
+                       irq_work_queue(&event->pending);
                } else
                        perf_event_disable(event);
        }
@@@ -4029,6 -4282,17 +4280,17 @@@ int perf_event_overflow(struct perf_eve
   * Generic software event infrastructure
   */
  
+ struct swevent_htable {
+       struct swevent_hlist            *swevent_hlist;
+       struct mutex                    hlist_mutex;
+       int                             hlist_refcount;
+       /* Recursion avoidance in each contexts */
+       int                             recursion[PERF_NR_CONTEXTS];
+ };
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
  /*
   * We directly increment event->count and keep a second value in
   * event->hw.period_left to count intervals. This period event
@@@ -4086,7 -4350,7 +4348,7 @@@ static void perf_swevent_overflow(struc
        }
  }
  
- static void perf_swevent_add(struct perf_event *event, u64 nr,
+ static void perf_swevent_event(struct perf_event *event, u64 nr,
                               int nmi, struct perf_sample_data *data,
                               struct pt_regs *regs)
  {
  static int perf_exclude_event(struct perf_event *event,
                              struct pt_regs *regs)
  {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
        if (regs) {
                if (event->attr.exclude_user && user_mode(regs))
                        return 1;
@@@ -4158,11 -4425,11 +4423,11 @@@ __find_swevent_head(struct swevent_hlis
  
  /* For the read side: events when they trigger */
  static inline struct hlist_head *
- find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
+ find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
  {
        struct swevent_hlist *hlist;
  
-       hlist = rcu_dereference(ctx->swevent_hlist);
+       hlist = rcu_dereference(swhash->swevent_hlist);
        if (!hlist)
                return NULL;
  
  
  /* For the event head insertion and removal in the hlist */
  static inline struct hlist_head *
- find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
+ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
  {
        struct swevent_hlist *hlist;
        u32 event_id = event->attr.config;
         * and release. Which makes the protected version suitable here.
         * The context lock guarantees that.
         */
-       hlist = rcu_dereference_protected(ctx->swevent_hlist,
+       hlist = rcu_dereference_protected(swhash->swevent_hlist,
                                          lockdep_is_held(&event->ctx->lock));
        if (!hlist)
                return NULL;
@@@ -4195,23 -4462,19 +4460,19 @@@ static void do_perf_sw_event(enum perf_
                                    struct perf_sample_data *data,
                                    struct pt_regs *regs)
  {
-       struct perf_cpu_context *cpuctx;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct perf_event *event;
        struct hlist_node *node;
        struct hlist_head *head;
  
-       cpuctx = &__get_cpu_var(perf_cpu_context);
        rcu_read_lock();
-       head = find_swevent_head_rcu(cpuctx, type, event_id);
+       head = find_swevent_head_rcu(swhash, type, event_id);
        if (!head)
                goto end;
  
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_swevent_match(event, type, event_id, data, regs))
-                       perf_swevent_add(event, nr, nmi, data, regs);
+                       perf_swevent_event(event, nr, nmi, data, regs);
        }
  end:
        rcu_read_unlock();
  
  int perf_swevent_get_recursion_context(void)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       int rctx;
-       if (in_nmi())
-               rctx = 3;
-       else if (in_irq())
-               rctx = 2;
-       else if (in_softirq())
-               rctx = 1;
-       else
-               rctx = 0;
-       if (cpuctx->recursion[rctx])
-               return -1;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  
-       cpuctx->recursion[rctx]++;
-       barrier();
-       return rctx;
+       return get_recursion_context(swhash->recursion);
  }
  EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  
  void inline perf_swevent_put_recursion_context(int rctx)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       barrier();
-       cpuctx->recursion[rctx]--;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+       put_recursion_context(swhash->recursion, rctx);
  }
  
  void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@@ -4271,20 -4518,20 +4516,20 @@@ static void perf_swevent_read(struct pe
  {
  }
  
- static int perf_swevent_enable(struct perf_event *event)
+ static int perf_swevent_add(struct perf_event *event, int flags)
  {
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct hw_perf_event *hwc = &event->hw;
-       struct perf_cpu_context *cpuctx;
        struct hlist_head *head;
  
-       cpuctx = &__get_cpu_var(perf_cpu_context);
        if (hwc->sample_period) {
                hwc->last_period = hwc->sample_period;
                perf_swevent_set_period(event);
        }
  
-       head = find_swevent_head(cpuctx, event);
+       hwc->state = !(flags & PERF_EF_START);
+       head = find_swevent_head(swhash, event);
        if (WARN_ON_ONCE(!head))
                return -EINVAL;
  
        return 0;
  }
  
- static void perf_swevent_disable(struct perf_event *event)
+ static void perf_swevent_del(struct perf_event *event, int flags)
  {
        hlist_del_rcu(&event->hlist_entry);
  }
  
- static void perf_swevent_void(struct perf_event *event)
- {
- }
- static int perf_swevent_int(struct perf_event *event)
- {
-       return 0;
- }
- static const struct pmu perf_ops_generic = {
-       .enable         = perf_swevent_enable,
-       .disable        = perf_swevent_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
-       .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void, /* hwc->interrupts already reset */
- };
- /*
-  * hrtimer based swevent callback
-  */
- static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+ static void perf_swevent_start(struct perf_event *event, int flags)
  {
-       enum hrtimer_restart ret = HRTIMER_RESTART;
-       struct perf_sample_data data;
-       struct pt_regs *regs;
-       struct perf_event *event;
-       u64 period;
-       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
-       event->pmu->read(event);
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
-       regs = get_irq_regs();
-       if (regs && !perf_exclude_event(event, regs)) {
-               if (!(event->attr.exclude_idle && current->pid == 0))
-                       if (perf_event_overflow(event, 0, &data, regs))
-                               ret = HRTIMER_NORESTART;
-       }
-       period = max_t(u64, 10000, event->hw.sample_period);
-       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
-       return ret;
+       event->hw.state = 0;
  }
  
- static void perf_swevent_start_hrtimer(struct perf_event *event)
+ static void perf_swevent_stop(struct perf_event *event, int flags)
  {
-       struct hw_perf_event *hwc = &event->hw;
-       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hwc->hrtimer.function = perf_swevent_hrtimer;
-       if (hwc->sample_period) {
-               u64 period;
-               if (hwc->remaining) {
-                       if (hwc->remaining < 0)
-                               period = 10000;
-                       else
-                               period = hwc->remaining;
-                       hwc->remaining = 0;
-               } else {
-                       period = max_t(u64, 10000, hwc->sample_period);
-               }
-               __hrtimer_start_range_ns(&hwc->hrtimer,
-                               ns_to_ktime(period), 0,
-                               HRTIMER_MODE_REL, 0);
-       }
- }
- static void perf_swevent_cancel_hrtimer(struct perf_event *event)
- {
-       struct hw_perf_event *hwc = &event->hw;
-       if (hwc->sample_period) {
-               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
-               hwc->remaining = ktime_to_ns(remaining);
-               hrtimer_cancel(&hwc->hrtimer);
-       }
- }
- /*
-  * Software event: cpu wall time clock
-  */
- static void cpu_clock_perf_event_update(struct perf_event *event)
- {
-       int cpu = raw_smp_processor_id();
-       s64 prev;
-       u64 now;
-       now = cpu_clock(cpu);
-       prev = local64_xchg(&event->hw.prev_count, now);
-       local64_add(now - prev, &event->count);
- }
- static int cpu_clock_perf_event_enable(struct perf_event *event)
- {
-       struct hw_perf_event *hwc = &event->hw;
-       int cpu = raw_smp_processor_id();
-       local64_set(&hwc->prev_count, cpu_clock(cpu));
-       perf_swevent_start_hrtimer(event);
-       return 0;
- }
- static void cpu_clock_perf_event_disable(struct perf_event *event)
- {
-       perf_swevent_cancel_hrtimer(event);
-       cpu_clock_perf_event_update(event);
- }
- static void cpu_clock_perf_event_read(struct perf_event *event)
- {
-       cpu_clock_perf_event_update(event);
- }
- static const struct pmu perf_ops_cpu_clock = {
-       .enable         = cpu_clock_perf_event_enable,
-       .disable        = cpu_clock_perf_event_disable,
-       .read           = cpu_clock_perf_event_read,
- };
- /*
-  * Software event: task time clock
-  */
- static void task_clock_perf_event_update(struct perf_event *event, u64 now)
- {
-       u64 prev;
-       s64 delta;
-       prev = local64_xchg(&event->hw.prev_count, now);
-       delta = now - prev;
-       local64_add(delta, &event->count);
- }
- static int task_clock_perf_event_enable(struct perf_event *event)
- {
-       struct hw_perf_event *hwc = &event->hw;
-       u64 now;
-       now = event->ctx->time;
-       local64_set(&hwc->prev_count, now);
-       perf_swevent_start_hrtimer(event);
-       return 0;
- }
- static void task_clock_perf_event_disable(struct perf_event *event)
- {
-       perf_swevent_cancel_hrtimer(event);
-       task_clock_perf_event_update(event, event->ctx->time);
- }
- static void task_clock_perf_event_read(struct perf_event *event)
- {
-       u64 time;
-       if (!in_nmi()) {
-               update_context_time(event->ctx);
-               time = event->ctx->time;
-       } else {
-               u64 now = perf_clock();
-               u64 delta = now - event->ctx->timestamp;
-               time = event->ctx->time + delta;
-       }
-       task_clock_perf_event_update(event, time);
+       event->hw.state = PERF_HES_STOPPED;
  }
  
- static const struct pmu perf_ops_task_clock = {
-       .enable         = task_clock_perf_event_enable,
-       .disable        = task_clock_perf_event_disable,
-       .read           = task_clock_perf_event_read,
- };
  /* Deref the hlist from the update side */
  static inline struct swevent_hlist *
- swevent_hlist_deref(struct perf_cpu_context *cpuctx)
+ swevent_hlist_deref(struct swevent_htable *swhash)
  {
-       return rcu_dereference_protected(cpuctx->swevent_hlist,
-                                        lockdep_is_held(&cpuctx->hlist_mutex));
+       return rcu_dereference_protected(swhash->swevent_hlist,
+                                        lockdep_is_held(&swhash->hlist_mutex));
  }
  
  static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
        kfree(hlist);
  }
  
- static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
+ static void swevent_hlist_release(struct swevent_htable *swhash)
  {
-       struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
+       struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
  
        if (!hlist)
                return;
  
-       rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
+       rcu_assign_pointer(swhash->swevent_hlist, NULL);
        call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
  }
  
  static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
  {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
  
-       if (!--cpuctx->hlist_refcount)
-               swevent_hlist_release(cpuctx);
+       if (!--swhash->hlist_refcount)
+               swevent_hlist_release(swhash);
  
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
  }
  
  static void swevent_hlist_put(struct perf_event *event)
  
  static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
  {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
        int err = 0;
  
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
  
-       if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
+       if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
                struct swevent_hlist *hlist;
  
                hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
                        err = -ENOMEM;
                        goto exit;
                }
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       cpuctx->hlist_refcount++;
-  exit:
-       mutex_unlock(&cpuctx->hlist_mutex);
+       swhash->hlist_refcount++;
+ exit:
+       mutex_unlock(&swhash->hlist_mutex);
  
        return err;
  }
@@@ -4578,7 -4650,7 +4648,7 @@@ static int swevent_hlist_get(struct per
        put_online_cpus();
  
        return 0;
-  fail:
+ fail:
        for_each_possible_cpu(cpu) {
                if (cpu == failed_cpu)
                        break;
        return err;
  }
  
- #ifdef CONFIG_EVENT_TRACING
+ atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+ static void sw_perf_event_destroy(struct perf_event *event)
+ {
+       u64 event_id = event->attr.config;
+       WARN_ON(event->parent);
+       jump_label_dec(&perf_swevent_enabled[event_id]);
+       swevent_hlist_put(event);
+ }
+ static int perf_swevent_init(struct perf_event *event)
+ {
+       int event_id = event->attr.config;
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+       switch (event_id) {
+       case PERF_COUNT_SW_CPU_CLOCK:
+       case PERF_COUNT_SW_TASK_CLOCK:
+               return -ENOENT;
  
- static const struct pmu perf_ops_tracepoint = {
-       .enable         = perf_trace_enable,
-       .disable        = perf_trace_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
+       default:
+               break;
+       }
+       if (event_id > PERF_COUNT_SW_MAX)
+               return -ENOENT;
+       if (!event->parent) {
+               int err;
+               err = swevent_hlist_get(event);
+               if (err)
+                       return err;
+               jump_label_inc(&perf_swevent_enabled[event_id]);
+               event->destroy = sw_perf_event_destroy;
+       }
+       return 0;
+ }
+ static struct pmu perf_swevent = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = perf_swevent_init,
+       .add            = perf_swevent_add,
+       .del            = perf_swevent_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void,
  };
  
+ #ifdef CONFIG_EVENT_TRACING
  static int perf_tp_filter_match(struct perf_event *event,
                                struct perf_sample_data *data)
  {
@@@ -4643,7 -4762,7 +4760,7 @@@ void perf_tp_event(u64 addr, u64 count
  
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
-                       perf_swevent_add(event, count, 1, &data, regs);
+                       perf_swevent_event(event, count, 1, &data, regs);
        }
  
        perf_swevent_put_recursion_context(rctx);
@@@ -4655,10 -4774,13 +4772,13 @@@ static void tp_perf_event_destroy(struc
        perf_trace_destroy(event);
  }
  
- static const struct pmu *tp_perf_event_init(struct perf_event *event)
+ static int perf_tp_event_init(struct perf_event *event)
  {
        int err;
  
+       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+               return -ENOENT;
        /*
         * Raw tracepoint data is a severe data leak, only allow root to
         * have these.
        if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
                        perf_paranoid_tracepoint_raw() &&
                        !capable(CAP_SYS_ADMIN))
-               return ERR_PTR(-EPERM);
+               return -EPERM;
  
        err = perf_trace_init(event);
        if (err)
-               return NULL;
+               return err;
  
        event->destroy = tp_perf_event_destroy;
  
-       return &perf_ops_tracepoint;
+       return 0;
+ }
+ static struct pmu perf_tracepoint = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = perf_tp_event_init,
+       .add            = perf_trace_add,
+       .del            = perf_trace_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
+       .read           = perf_swevent_read,
+ };
+ static inline void perf_tp_register(void)
+ {
+       perf_pmu_register(&perf_tracepoint);
  }
  
  static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@@ -4702,9 -4840,8 +4838,8 @@@ static void perf_event_free_filter(stru
  
  #else
  
- static const struct pmu *tp_perf_event_init(struct perf_event *event)
+ static inline void perf_tp_register(void)
  {
-       return NULL;
  }
  
  static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@@ -4719,105 -4856,389 +4854,389 @@@ static void perf_event_free_filter(stru
  #endif /* CONFIG_EVENT_TRACING */
  
  #ifdef CONFIG_HAVE_HW_BREAKPOINT
- static void bp_perf_event_destroy(struct perf_event *event)
+ void perf_bp_event(struct perf_event *bp, void *data)
  {
-       release_bp_slot(event);
+       struct perf_sample_data sample;
+       struct pt_regs *regs = data;
+       perf_sample_data_init(&sample, bp->attr.bp_addr);
+       if (!bp->hw.state && !perf_exclude_event(bp, regs))
+               perf_swevent_event(bp, 1, 1, &sample, regs);
  }
+ #endif
  
- static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+ /*
+  * hrtimer based swevent callback
+  */
+ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
  {
-       int err;
+       enum hrtimer_restart ret = HRTIMER_RESTART;
+       struct perf_sample_data data;
+       struct pt_regs *regs;
+       struct perf_event *event;
+       u64 period;
  
-       err = register_perf_hw_breakpoint(bp);
-       if (err)
-               return ERR_PTR(err);
+       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+       event->pmu->read(event);
+       perf_sample_data_init(&data, 0);
+       data.period = event->hw.last_period;
+       regs = get_irq_regs();
+       if (regs && !perf_exclude_event(event, regs)) {
+               if (!(event->attr.exclude_idle && current->pid == 0))
+                       if (perf_event_overflow(event, 0, &data, regs))
+                               ret = HRTIMER_NORESTART;
+       }
+       period = max_t(u64, 10000, event->hw.sample_period);
+       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  
-       bp->destroy = bp_perf_event_destroy;
+       return ret;
+ }
  
-       return &perf_ops_bp;
+ static void perf_swevent_start_hrtimer(struct perf_event *event)
+ {
+       struct hw_perf_event *hwc = &event->hw;
+       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hwc->hrtimer.function = perf_swevent_hrtimer;
+       if (hwc->sample_period) {
+               s64 period = local64_read(&hwc->period_left);
+               if (period) {
+                       if (period < 0)
+                               period = 10000;
+                       local64_set(&hwc->period_left, 0);
+               } else {
+                       period = max_t(u64, 10000, hwc->sample_period);
+               }
+               __hrtimer_start_range_ns(&hwc->hrtimer,
+                               ns_to_ktime(period), 0,
+                               HRTIMER_MODE_REL_PINNED, 0);
+       }
  }
  
- void perf_bp_event(struct perf_event *bp, void *data)
+ static void perf_swevent_cancel_hrtimer(struct perf_event *event)
  {
-       struct perf_sample_data sample;
-       struct pt_regs *regs = data;
+       struct hw_perf_event *hwc = &event->hw;
  
-       perf_sample_data_init(&sample, bp->attr.bp_addr);
+       if (hwc->sample_period) {
+               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
+               local64_set(&hwc->period_left, ktime_to_ns(remaining));
  
-       if (!perf_exclude_event(bp, regs))
-               perf_swevent_add(bp, 1, 1, &sample, regs);
+               hrtimer_cancel(&hwc->hrtimer);
+       }
  }
- #else
- static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+ /*
+  * Software event: cpu wall time clock
+  */
+ static void cpu_clock_event_update(struct perf_event *event)
  {
-       return NULL;
+       s64 prev;
+       u64 now;
+       now = local_clock();
+       prev = local64_xchg(&event->hw.prev_count, now);
+       local64_add(now - prev, &event->count);
  }
  
- void perf_bp_event(struct perf_event *bp, void *regs)
+ static void cpu_clock_event_start(struct perf_event *event, int flags)
  {
+       local64_set(&event->hw.prev_count, local_clock());
+       perf_swevent_start_hrtimer(event);
  }
- #endif
  
- atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+ static void cpu_clock_event_stop(struct perf_event *event, int flags)
+ {
+       perf_swevent_cancel_hrtimer(event);
+       cpu_clock_event_update(event);
+ }
  
- static void sw_perf_event_destroy(struct perf_event *event)
+ static int cpu_clock_event_add(struct perf_event *event, int flags)
  {
-       u64 event_id = event->attr.config;
+       if (flags & PERF_EF_START)
+               cpu_clock_event_start(event, flags);
  
-       WARN_ON(event->parent);
+       return 0;
+ }
  
-       atomic_dec(&perf_swevent_enabled[event_id]);
-       swevent_hlist_put(event);
+ static void cpu_clock_event_del(struct perf_event *event, int flags)
+ {
+       cpu_clock_event_stop(event, flags);
  }
  
- static const struct pmu *sw_perf_event_init(struct perf_event *event)
+ static void cpu_clock_event_read(struct perf_event *event)
  {
-       const struct pmu *pmu = NULL;
-       u64 event_id = event->attr.config;
+       cpu_clock_event_update(event);
+ }
+ static int cpu_clock_event_init(struct perf_event *event)
+ {
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+       if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
+               return -ENOENT;
+       return 0;
+ }
  
+ static struct pmu perf_cpu_clock = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = cpu_clock_event_init,
+       .add            = cpu_clock_event_add,
+       .del            = cpu_clock_event_del,
+       .start          = cpu_clock_event_start,
+       .stop           = cpu_clock_event_stop,
+       .read           = cpu_clock_event_read,
+ };
+ /*
+  * Software event: task time clock
+  */
+ static void task_clock_event_update(struct perf_event *event, u64 now)
+ {
+       u64 prev;
+       s64 delta;
+       prev = local64_xchg(&event->hw.prev_count, now);
+       delta = now - prev;
+       local64_add(delta, &event->count);
+ }
+ static void task_clock_event_start(struct perf_event *event, int flags)
+ {
+       local64_set(&event->hw.prev_count, event->ctx->time);
+       perf_swevent_start_hrtimer(event);
+ }
+ static void task_clock_event_stop(struct perf_event *event, int flags)
+ {
+       perf_swevent_cancel_hrtimer(event);
+       task_clock_event_update(event, event->ctx->time);
+ }
+ static int task_clock_event_add(struct perf_event *event, int flags)
+ {
+       if (flags & PERF_EF_START)
+               task_clock_event_start(event, flags);
+       return 0;
+ }
+ static void task_clock_event_del(struct perf_event *event, int flags)
+ {
+       task_clock_event_stop(event, PERF_EF_UPDATE);
+ }
+ static void task_clock_event_read(struct perf_event *event)
+ {
+       u64 time;
+       if (!in_nmi()) {
+               update_context_time(event->ctx);
+               time = event->ctx->time;
+       } else {
+               u64 now = perf_clock();
+               u64 delta = now - event->ctx->timestamp;
+               time = event->ctx->time + delta;
+       }
+       task_clock_event_update(event, time);
+ }
+ static int task_clock_event_init(struct perf_event *event)
+ {
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+       if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
+               return -ENOENT;
+       return 0;
+ }
+ static struct pmu perf_task_clock = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = task_clock_event_init,
+       .add            = task_clock_event_add,
+       .del            = task_clock_event_del,
+       .start          = task_clock_event_start,
+       .stop           = task_clock_event_stop,
+       .read           = task_clock_event_read,
+ };
+ static void perf_pmu_nop_void(struct pmu *pmu)
+ {
+ }
+ static int perf_pmu_nop_int(struct pmu *pmu)
+ {
+       return 0;
+ }
+ static void perf_pmu_start_txn(struct pmu *pmu)
+ {
+       perf_pmu_disable(pmu);
+ }
+ static int perf_pmu_commit_txn(struct pmu *pmu)
+ {
+       perf_pmu_enable(pmu);
+       return 0;
+ }
+ static void perf_pmu_cancel_txn(struct pmu *pmu)
+ {
+       perf_pmu_enable(pmu);
+ }
+ /*
+  * Ensures all contexts with the same task_ctx_nr have the same
+  * pmu_cpu_context too.
+  */
+ static void *find_pmu_context(int ctxn)
+ {
+       struct pmu *pmu;
+       if (ctxn < 0)
+               return NULL;
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->task_ctx_nr == ctxn)
+                       return pmu->pmu_cpu_context;
+       }
+       return NULL;
+ }
+ static void free_pmu_context(void * __percpu cpu_context)
+ {
+       struct pmu *pmu;
+       mutex_lock(&pmus_lock);
        /*
-        * Software events (currently) can't in general distinguish
-        * between user, kernel and hypervisor events.
-        * However, context switches and cpu migrations are considered
-        * to be kernel events, and page faults are never hypervisor
-        * events.
+        * Like a real lame refcount.
         */
-       switch (event_id) {
-       case PERF_COUNT_SW_CPU_CLOCK:
-               pmu = &perf_ops_cpu_clock;
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->pmu_cpu_context == cpu_context)
+                       goto out;
+       }
  
-               break;
-       case PERF_COUNT_SW_TASK_CLOCK:
-               /*
-                * If the user instantiates this as a per-cpu event,
-                * use the cpu_clock event instead.
-                */
-               if (event->ctx->task)
-                       pmu = &perf_ops_task_clock;
-               else
-                       pmu = &perf_ops_cpu_clock;
+       free_percpu(cpu_context);
+ out:
+       mutex_unlock(&pmus_lock);
+ }
  
-               break;
-       case PERF_COUNT_SW_PAGE_FAULTS:
-       case PERF_COUNT_SW_PAGE_FAULTS_MIN:
-       case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
-       case PERF_COUNT_SW_CONTEXT_SWITCHES:
-       case PERF_COUNT_SW_CPU_MIGRATIONS:
-       case PERF_COUNT_SW_ALIGNMENT_FAULTS:
-       case PERF_COUNT_SW_EMULATION_FAULTS:
-               if (!event->parent) {
-                       int err;
-                       err = swevent_hlist_get(event);
-                       if (err)
-                               return ERR_PTR(err);
+ int perf_pmu_register(struct pmu *pmu)
+ {
+       int cpu, ret;
+       mutex_lock(&pmus_lock);
+       ret = -ENOMEM;
+       pmu->pmu_disable_count = alloc_percpu(int);
+       if (!pmu->pmu_disable_count)
+               goto unlock;
  
-                       atomic_inc(&perf_swevent_enabled[event_id]);
-                       event->destroy = sw_perf_event_destroy;
+       pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
+       if (pmu->pmu_cpu_context)
+               goto got_cpu_context;
+       pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+       if (!pmu->pmu_cpu_context)
+               goto free_pdc;
+       for_each_possible_cpu(cpu) {
+               struct perf_cpu_context *cpuctx;
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               __perf_event_init_context(&cpuctx->ctx);
+               cpuctx->ctx.type = cpu_context;
+               cpuctx->ctx.pmu = pmu;
+               cpuctx->jiffies_interval = 1;
+               INIT_LIST_HEAD(&cpuctx->rotation_list);
+       }
+ got_cpu_context:
+       if (!pmu->start_txn) {
+               if (pmu->pmu_enable) {
+                       /*
+                        * If we have pmu_enable/pmu_disable calls, install
+                        * transaction stubs that use that to try and batch
+                        * hardware accesses.
+                        */
+                       pmu->start_txn  = perf_pmu_start_txn;
+                       pmu->commit_txn = perf_pmu_commit_txn;
+                       pmu->cancel_txn = perf_pmu_cancel_txn;
+               } else {
+                       pmu->start_txn  = perf_pmu_nop_void;
+                       pmu->commit_txn = perf_pmu_nop_int;
+                       pmu->cancel_txn = perf_pmu_nop_void;
+               }
+       }
+       if (!pmu->pmu_enable) {
+               pmu->pmu_enable  = perf_pmu_nop_void;
+               pmu->pmu_disable = perf_pmu_nop_void;
+       }
+       list_add_rcu(&pmu->entry, &pmus);
+       ret = 0;
+ unlock:
+       mutex_unlock(&pmus_lock);
+       return ret;
+ free_pdc:
+       free_percpu(pmu->pmu_disable_count);
+       goto unlock;
+ }
+ void perf_pmu_unregister(struct pmu *pmu)
+ {
+       mutex_lock(&pmus_lock);
+       list_del_rcu(&pmu->entry);
+       mutex_unlock(&pmus_lock);
+       /*
+        * We dereference the pmu list under both SRCU and regular RCU, so
+        * synchronize against both of those.
+        */
+       synchronize_srcu(&pmus_srcu);
+       synchronize_rcu();
+       free_percpu(pmu->pmu_disable_count);
+       free_pmu_context(pmu->pmu_cpu_context);
+ }
+ struct pmu *perf_init_event(struct perf_event *event)
+ {
+       struct pmu *pmu = NULL;
+       int idx;
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               int ret = pmu->event_init(event);
+               if (!ret)
+                       goto unlock;
+               if (ret != -ENOENT) {
+                       pmu = ERR_PTR(ret);
+                       goto unlock;
                }
-               pmu = &perf_ops_generic;
-               break;
        }
+       pmu = ERR_PTR(-ENOENT);
+ unlock:
+       srcu_read_unlock(&pmus_srcu, idx);
  
        return pmu;
  }
   * Allocate and initialize a event structure
   */
  static struct perf_event *
- perf_event_alloc(struct perf_event_attr *attr,
-                  int cpu,
-                  struct perf_event_context *ctx,
-                  struct perf_event *group_leader,
-                  struct perf_event *parent_event,
-                  perf_overflow_handler_t overflow_handler,
-                  gfp_t gfpflags)
- {
-       const struct pmu *pmu;
+ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+                struct task_struct *task,
+                struct perf_event *group_leader,
+                struct perf_event *parent_event,
+                perf_overflow_handler_t overflow_handler)
+ {
+       struct pmu *pmu;
        struct perf_event *event;
        struct hw_perf_event *hwc;
        long err;
  
-       event = kzalloc(sizeof(*event), gfpflags);
+       event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return ERR_PTR(-ENOMEM);
  
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
        init_waitqueue_head(&event->waitq);
+       init_irq_work(&event->pending, perf_pending_event);
  
        mutex_init(&event->mmap_mutex);
  
        event->attr             = *attr;
        event->group_leader     = group_leader;
        event->pmu              = NULL;
-       event->ctx              = ctx;
        event->oncpu            = -1;
  
        event->parent           = parent_event;
  
        event->state            = PERF_EVENT_STATE_INACTIVE;
  
+       if (task) {
+               event->attach_state = PERF_ATTACH_TASK;
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+               /*
+                * hw_breakpoint is a bit difficult here..
+                */
+               if (attr->type == PERF_TYPE_BREAKPOINT)
+                       event->hw.bp_target = task;
+ #endif
+       }
        if (!overflow_handler && parent_event)
                overflow_handler = parent_event->overflow_handler;
        
        if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
                goto done;
  
-       switch (attr->type) {
-       case PERF_TYPE_RAW:
-       case PERF_TYPE_HARDWARE:
-       case PERF_TYPE_HW_CACHE:
-               pmu = hw_perf_event_init(event);
-               break;
-       case PERF_TYPE_SOFTWARE:
-               pmu = sw_perf_event_init(event);
-               break;
-       case PERF_TYPE_TRACEPOINT:
-               pmu = tp_perf_event_init(event);
-               break;
+       pmu = perf_init_event(event);
  
-       case PERF_TYPE_BREAKPOINT:
-               pmu = bp_perf_event_init(event);
-               break;
-       default:
-               break;
-       }
  done:
        err = 0;
        if (!pmu)
        event->pmu = pmu;
  
        if (!event->parent) {
-               atomic_inc(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_inc(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_inc(&nr_mmap_events);
                if (event->attr.comm)
                        atomic_inc(&nr_comm_events);
                if (event->attr.task)
                        atomic_inc(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       err = get_callchain_buffers();
+                       if (err) {
+                               free_event(event);
+                               return ERR_PTR(err);
+                       }
+               }
        }
  
        return event;
@@@ -5092,12 -5509,16 +5507,16 @@@ SYSCALL_DEFINE5(perf_event_open
                struct perf_event_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  {
-       struct perf_event *event, *group_leader = NULL, *output_event = NULL;
+       struct perf_event *group_leader = NULL, *output_event = NULL;
+       struct perf_event *event, *sibling;
        struct perf_event_attr attr;
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
        struct file *group_file = NULL;
+       struct task_struct *task = NULL;
+       struct pmu *pmu;
        int event_fd;
+       int move_group = 0;
        int fput_needed = 0;
        int err;
  
        if (event_fd < 0)
                return event_fd;
  
-       /*
-        * Get the target context (task or percpu):
-        */
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_fd;
-       }
        if (group_fd != -1) {
                group_leader = perf_fget_light(group_fd, &fput_needed);
                if (IS_ERR(group_leader)) {
                        err = PTR_ERR(group_leader);
-                       goto err_put_context;
+                       goto err_fd;
                }
                group_file = group_leader->filp;
                if (flags & PERF_FLAG_FD_OUTPUT)
                        group_leader = NULL;
        }
  
+       if (pid != -1) {
+               task = find_lively_task_by_vpid(pid);
+               if (IS_ERR(task)) {
+                       err = PTR_ERR(task);
+                       goto err_group_fd;
+               }
+       }
+       event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+               goto err_task;
+       }
+       /*
+        * Special case software events and allow them to be part of
+        * any hardware group.
+        */
+       pmu = event->pmu;
+       if (group_leader &&
+           (is_software_event(event) != is_software_event(group_leader))) {
+               if (is_software_event(event)) {
+                       /*
+                        * If event and group_leader are not both a software
+                        * event, and event is, then group leader is not.
+                        *
+                        * Allow the addition of software events to !software
+                        * groups, this is safe because software events never
+                        * fail to schedule.
+                        */
+                       pmu = group_leader->pmu;
+               } else if (is_software_event(group_leader) &&
+                          (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+                       /*
+                        * In case the group is a pure software group, and we
+                        * try to add a hardware event, move the whole group to
+                        * the hardware context.
+                        */
+                       move_group = 1;
+               }
+       }
+       /*
+        * Get the target context (task or percpu):
+        */
+       ctx = find_get_context(pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_alloc;
+       }
        /*
         * Look up the group leader (we will attach this event to it):
         */
                 * becoming part of another group-sibling):
                 */
                if (group_leader->group_leader != group_leader)
-                       goto err_put_context;
+                       goto err_context;
                /*
                 * Do not allow to attach to a group in a different
                 * task or CPU context:
                 */
-               if (group_leader->ctx != ctx)
-                       goto err_put_context;
+               if (move_group) {
+                       if (group_leader->ctx->type != ctx->type)
+                               goto err_context;
+               } else {
+                       if (group_leader->ctx != ctx)
+                               goto err_context;
+               }
                /*
                 * Only a group leader can be exclusive or pinned
                 */
                if (attr.exclusive || attr.pinned)
-                       goto err_put_context;
-       }
-       event = perf_event_alloc(&attr, cpu, ctx, group_leader,
-                                    NULL, NULL, GFP_KERNEL);
-       if (IS_ERR(event)) {
-               err = PTR_ERR(event);
-               goto err_put_context;
+                       goto err_context;
        }
  
        if (output_event) {
                err = perf_event_set_output(event, output_event);
                if (err)
-                       goto err_free_put_context;
+                       goto err_context;
        }
  
        event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
        if (IS_ERR(event_file)) {
                err = PTR_ERR(event_file);
-               goto err_free_put_context;
+               goto err_context;
+       }
+       if (move_group) {
+               struct perf_event_context *gctx = group_leader->ctx;
+               mutex_lock(&gctx->mutex);
+               perf_event_remove_from_context(group_leader);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_event_remove_from_context(sibling);
+                       put_ctx(gctx);
+               }
+               mutex_unlock(&gctx->mutex);
+               put_ctx(gctx);
        }
  
        event->filp = event_file;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
+       if (move_group) {
+               perf_install_in_context(ctx, group_leader, cpu);
+               get_ctx(ctx);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_install_in_context(ctx, sibling, cpu);
+                       get_ctx(ctx);
+               }
+       }
        perf_install_in_context(ctx, event, cpu);
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
        fd_install(event_fd, event_file);
        return event_fd;
  
- err_free_put_context:
+ err_context:
+       put_ctx(ctx);
+ err_alloc:
        free_event(event);
- err_put_context:
+ err_task:
+       if (task)
+               put_task_struct(task);
+ err_group_fd:
        fput_light(group_file, fput_needed);
-       put_ctx(ctx);
  err_fd:
        put_unused_fd(event_fd);
        return err;
   *
   * @attr: attributes of the counter to create
   * @cpu: cpu in which the counter is bound
-  * @pid: task to profile
+  * @task: task to profile (NULL for percpu)
   */
  struct perf_event *
  perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
-                                pid_t pid,
+                                struct task_struct *task,
                                 perf_overflow_handler_t overflow_handler)
  {
-       struct perf_event *event;
        struct perf_event_context *ctx;
+       struct perf_event *event;
        int err;
  
        /*
         * Get the target context (task or percpu):
         */
  
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_exit;
-       }
-       event = perf_event_alloc(attr, cpu, ctx, NULL,
-                                NULL, overflow_handler, GFP_KERNEL);
+       event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
-               goto err_put_context;
+               goto err;
+       }
+       ctx = find_get_context(event->pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_free;
        }
  
        event->filp = NULL;
  
        return event;
  
-  err_put_context:
-       put_ctx(ctx);
-  err_exit:
+ err_free:
+       free_event(event);
+ err:
        return ERR_PTR(err);
  }
  EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
  
- /*
-  * inherit a event from parent task to child task:
-  */
- static struct perf_event *
- inherit_event(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event *group_leader,
-             struct perf_event_context *child_ctx)
- {
-       struct perf_event *child_event;
-       /*
-        * Instead of creating recursive hierarchies of events,
-        * we link inherited events back to the original parent,
-        * which has a filp for sure, which we use as the reference
-        * count:
-        */
-       if (parent_event->parent)
-               parent_event = parent_event->parent;
-       child_event = perf_event_alloc(&parent_event->attr,
-                                          parent_event->cpu, child_ctx,
-                                          group_leader, parent_event,
-                                          NULL, GFP_KERNEL);
-       if (IS_ERR(child_event))
-               return child_event;
-       get_ctx(child_ctx);
-       /*
-        * Make the child state follow the state of the parent event,
-        * not its attr.disabled bit.  We hold the parent's mutex,
-        * so we won't race with perf_event_{en, dis}able_family.
-        */
-       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
-               child_event->state = PERF_EVENT_STATE_INACTIVE;
-       else
-               child_event->state = PERF_EVENT_STATE_OFF;
-       if (parent_event->attr.freq) {
-               u64 sample_period = parent_event->hw.sample_period;
-               struct hw_perf_event *hwc = &child_event->hw;
-               hwc->sample_period = sample_period;
-               hwc->last_period   = sample_period;
-               local64_set(&hwc->period_left, sample_period);
-       }
-       child_event->overflow_handler = parent_event->overflow_handler;
-       /*
-        * Link it up in the child's context:
-        */
-       add_event_to_ctx(child_event, child_ctx);
-       /*
-        * Get a reference to the parent filp - we will fput it
-        * when the child event exits. This is safe to do because
-        * we are in the parent and we know that the filp still
-        * exists and has a nonzero count:
-        */
-       atomic_long_inc(&parent_event->filp->f_count);
-       /*
-        * Link this into the parent event's child list
-        */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
-       list_add_tail(&child_event->child_list, &parent_event->child_list);
-       mutex_unlock(&parent_event->child_mutex);
-       return child_event;
- }
- static int inherit_group(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event_context *child_ctx)
- {
-       struct perf_event *leader;
-       struct perf_event *sub;
-       struct perf_event *child_ctr;
-       leader = inherit_event(parent_event, parent, parent_ctx,
-                                child, NULL, child_ctx);
-       if (IS_ERR(leader))
-               return PTR_ERR(leader);
-       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
-               child_ctr = inherit_event(sub, parent, parent_ctx,
-                                           child, leader, child_ctx);
-               if (IS_ERR(child_ctr))
-                       return PTR_ERR(child_ctr);
-       }
-       return 0;
- }
  static void sync_child_event(struct perf_event *child_event,
                               struct task_struct *child)
  {
@@@ -5432,16 -5824,13 +5822,13 @@@ __perf_event_exit_task(struct perf_even
        }
  }
  
- /*
-  * When a child task exits, feed back event values to parent events.
-  */
- void perf_event_exit_task(struct task_struct *child)
+ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
  {
        struct perf_event *child_event, *tmp;
        struct perf_event_context *child_ctx;
        unsigned long flags;
  
-       if (likely(!child->perf_event_ctxp)) {
+       if (likely(!child->perf_event_ctxp[ctxn])) {
                perf_event_task(child, NULL, 0);
                return;
        }
         * scheduled, so we are now safe from rescheduling changing
         * our context.
         */
-       child_ctx = child->perf_event_ctxp;
-       __perf_event_task_sched_out(child_ctx);
+       child_ctx = child->perf_event_ctxp[ctxn];
+       task_ctx_sched_out(child_ctx, EVENT_ALL);
  
        /*
         * Take the context lock here so that if find_get_context is
         * incremented the context's refcount before we do put_ctx below.
         */
        raw_spin_lock(&child_ctx->lock);
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@@ -5515,6 -5904,17 +5902,17 @@@ again
        put_ctx(child_ctx);
  }
  
+ /*
+  * When a child task exits, feed back event values to parent events.
+  */
+ void perf_event_exit_task(struct task_struct *child)
+ {
+       int ctxn;
+       for_each_task_context_nr(ctxn)
+               perf_event_exit_task_context(child, ctxn);
+ }
  static void perf_free_event(struct perf_event *event,
                            struct perf_event_context *ctx)
  {
  
  /*
   * free an unexposed, unused context as created by inheritance by
-  * init_task below, used by fork() in case of fail.
+  * perf_event_init_task below, used by fork() in case of fail.
   */
  void perf_event_free_task(struct task_struct *task)
  {
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx;
        struct perf_event *event, *tmp;
+       int ctxn;
  
-       if (!ctx)
-               return;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
  
-       mutex_lock(&ctx->mutex);
+               mutex_lock(&ctx->mutex);
  again:
-       list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
  
-       list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
-                                group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
  
-       if (!list_empty(&ctx->pinned_groups) ||
-           !list_empty(&ctx->flexible_groups))
-               goto again;
+               if (!list_empty(&ctx->pinned_groups) ||
+                               !list_empty(&ctx->flexible_groups))
+                       goto again;
  
-       mutex_unlock(&ctx->mutex);
+               mutex_unlock(&ctx->mutex);
  
-       put_ctx(ctx);
+               put_ctx(ctx);
+       }
+ }
+ void perf_event_delayed_put(struct task_struct *task)
+ {
+       int ctxn;
+       for_each_task_context_nr(ctxn)
+               WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
+ }
+ /*
+  * inherit a event from parent task to child task:
+  */
+ static struct perf_event *
+ inherit_event(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event *group_leader,
+             struct perf_event_context *child_ctx)
+ {
+       struct perf_event *child_event;
+       unsigned long flags;
+       /*
+        * Instead of creating recursive hierarchies of events,
+        * we link inherited events back to the original parent,
+        * which has a filp for sure, which we use as the reference
+        * count:
+        */
+       if (parent_event->parent)
+               parent_event = parent_event->parent;
+       child_event = perf_event_alloc(&parent_event->attr,
+                                          parent_event->cpu,
+                                          child,
+                                          group_leader, parent_event,
+                                          NULL);
+       if (IS_ERR(child_event))
+               return child_event;
+       get_ctx(child_ctx);
+       /*
+        * Make the child state follow the state of the parent event,
+        * not its attr.disabled bit.  We hold the parent's mutex,
+        * so we won't race with perf_event_{en, dis}able_family.
+        */
+       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
+               child_event->state = PERF_EVENT_STATE_INACTIVE;
+       else
+               child_event->state = PERF_EVENT_STATE_OFF;
+       if (parent_event->attr.freq) {
+               u64 sample_period = parent_event->hw.sample_period;
+               struct hw_perf_event *hwc = &child_event->hw;
+               hwc->sample_period = sample_period;
+               hwc->last_period   = sample_period;
+               local64_set(&hwc->period_left, sample_period);
+       }
+       child_event->ctx = child_ctx;
+       child_event->overflow_handler = parent_event->overflow_handler;
+       /*
+        * Link it up in the child's context:
+        */
+       raw_spin_lock_irqsave(&child_ctx->lock, flags);
+       add_event_to_ctx(child_event, child_ctx);
+       raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+       /*
+        * Get a reference to the parent filp - we will fput it
+        * when the child event exits. This is safe to do because
+        * we are in the parent and we know that the filp still
+        * exists and has a nonzero count:
+        */
+       atomic_long_inc(&parent_event->filp->f_count);
+       /*
+        * Link this into the parent event's child list
+        */
+       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+       mutex_lock(&parent_event->child_mutex);
+       list_add_tail(&child_event->child_list, &parent_event->child_list);
+       mutex_unlock(&parent_event->child_mutex);
+       return child_event;
+ }
+ static int inherit_group(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event_context *child_ctx)
+ {
+       struct perf_event *leader;
+       struct perf_event *sub;
+       struct perf_event *child_ctr;
+       leader = inherit_event(parent_event, parent, parent_ctx,
+                                child, NULL, child_ctx);
+       if (IS_ERR(leader))
+               return PTR_ERR(leader);
+       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+               child_ctr = inherit_event(sub, parent, parent_ctx,
+                                           child, leader, child_ctx);
+               if (IS_ERR(child_ctr))
+                       return PTR_ERR(child_ctr);
+       }
+       return 0;
  }
  
  static int
  inherit_task_group(struct perf_event *event, struct task_struct *parent,
                   struct perf_event_context *parent_ctx,
-                  struct task_struct *child,
+                  struct task_struct *child, int ctxn,
                   int *inherited_all)
  {
        int ret;
-       struct perf_event_context *child_ctx = child->perf_event_ctxp;
+       struct perf_event_context *child_ctx;
  
        if (!event->attr.inherit) {
                *inherited_all = 0;
                return 0;
        }
  
+               child_ctx = child->perf_event_ctxp[ctxn];
        if (!child_ctx) {
                /*
                 * This is executed from the parent task context, so
                 * child.
                 */
  
-               child_ctx = kzalloc(sizeof(struct perf_event_context),
-                                   GFP_KERNEL);
+               child_ctx = alloc_perf_context(event->pmu, child);
                if (!child_ctx)
                        return -ENOMEM;
  
-               __perf_event_init_context(child_ctx, child);
-               child->perf_event_ctxp = child_ctx;
-               get_task_struct(child);
+               child->perf_event_ctxp[ctxn] = child_ctx;
        }
  
        ret = inherit_group(event, parent, parent_ctx,
        return ret;
  }
  
  /*
   * Initialize the perf_event context in task_struct
   */
- int perf_event_init_task(struct task_struct *child)
+ int perf_event_init_context(struct task_struct *child, int ctxn)
  {
        struct perf_event_context *child_ctx, *parent_ctx;
        struct perf_event_context *cloned_ctx;
        int inherited_all = 1;
        int ret = 0;
  
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
  
        mutex_init(&child->perf_event_mutex);
        INIT_LIST_HEAD(&child->perf_event_list);
  
-       if (likely(!parent->perf_event_ctxp))
+       if (likely(!parent->perf_event_ctxp[ctxn]))
                return 0;
  
        /*
         * If the parent's context is a clone, pin it so it won't get
         * swapped under us.
         */
-       parent_ctx = perf_pin_task_context(parent);
+       parent_ctx = perf_pin_task_context(parent, ctxn);
  
        /*
         * No need to check if parent_ctx != NULL here; since we saw
         * the list, not manipulating it:
         */
        list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
  
        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
  
-       child_ctx = child->perf_event_ctxp;
+       child_ctx = child->perf_event_ctxp[ctxn];
  
        if (child_ctx && inherited_all) {
                /*
        return ret;
  }
  
+ /*
+  * Initialize the perf_event context in task_struct
+  */
+ int perf_event_init_task(struct task_struct *child)
+ {
+       int ctxn, ret;
+       for_each_task_context_nr(ctxn) {
+               ret = perf_event_init_context(child, ctxn);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
  static void __init perf_event_init_all_cpus(void)
  {
+       struct swevent_htable *swhash;
        int cpu;
-       struct perf_cpu_context *cpuctx;
  
        for_each_possible_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               mutex_init(&cpuctx->hlist_mutex);
-               __perf_event_init_context(&cpuctx->ctx, NULL);
+               swhash = &per_cpu(swevent_htable, cpu);
+               mutex_init(&swhash->hlist_mutex);
+               INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
        }
  }
  
  static void __cpuinit perf_event_init_cpu(int cpu)
  {
-       struct perf_cpu_context *cpuctx;
-       cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
-       spin_lock(&perf_resource_lock);
-       cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
-       spin_unlock(&perf_resource_lock);
-       mutex_lock(&cpuctx->hlist_mutex);
-       if (cpuctx->hlist_refcount > 0) {
+       mutex_lock(&swhash->hlist_mutex);
+       if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
  
-               hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
-               WARN_ON_ONCE(!hlist);
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
+               WARN_ON(!hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
- static void __perf_event_exit_cpu(void *info)
+ static void perf_pmu_rotate_stop(struct pmu *pmu)
  {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       WARN_ON(!irqs_disabled());
+       list_del_init(&cpuctx->rotation_list);
+ }
+ static void __perf_event_exit_context(void *__info)
+ {
+       struct perf_event_context *ctx = __info;
        struct perf_event *event, *tmp;
  
+       perf_pmu_rotate_stop(ctx->pmu);
        list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
                __perf_event_remove_from_context(event);
        list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
                __perf_event_remove_from_context(event);
  }
+ static void perf_event_exit_cpu_context(int cpu)
+ {
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int idx;
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+               mutex_lock(&ctx->mutex);
+               smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+               mutex_unlock(&ctx->mutex);
+       }
+       srcu_read_unlock(&pmus_srcu, idx);
+ }
  static void perf_event_exit_cpu(int cpu)
  {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
-       mutex_lock(&cpuctx->hlist_mutex);
-       swevent_hlist_release(cpuctx);
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
+       swevent_hlist_release(swhash);
+       mutex_unlock(&swhash->hlist_mutex);
  
-       mutex_lock(&ctx->mutex);
-       smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
-       mutex_unlock(&ctx->mutex);
+       perf_event_exit_cpu_context(cpu);
  }
  #else
  static inline void perf_event_exit_cpu(int cpu) { }
@@@ -5778,118 -6327,13 +6325,13 @@@ perf_cpu_notify(struct notifier_block *
        return NOTIFY_OK;
  }
  
  void __init perf_event_init(void)
  {
        perf_event_init_all_cpus();
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
-                       (void *)(long)smp_processor_id());
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
-                       (void *)(long)smp_processor_id());
-       register_cpu_notifier(&perf_cpu_nb);
- }
- static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
-                                       struct sysdev_class_attribute *attr,
-                                       char *buf)
- {
-       return sprintf(buf, "%d\n", perf_reserved_percpu);
- }
- static ssize_t
- perf_set_reserve_percpu(struct sysdev_class *class,
-                       struct sysdev_class_attribute *attr,
-                       const char *buf,
-                       size_t count)
- {
-       struct perf_cpu_context *cpuctx;
-       unsigned long val;
-       int err, cpu, mpt;
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > perf_max_events)
-               return -EINVAL;
-       spin_lock(&perf_resource_lock);
-       perf_reserved_percpu = val;
-       for_each_online_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               raw_spin_lock_irq(&cpuctx->ctx.lock);
-               mpt = min(perf_max_events - cpuctx->ctx.nr_events,
-                         perf_max_events - perf_reserved_percpu);
-               cpuctx->max_pertask = mpt;
-               raw_spin_unlock_irq(&cpuctx->ctx.lock);
-       }
-       spin_unlock(&perf_resource_lock);
-       return count;
- }
- static ssize_t perf_show_overcommit(struct sysdev_class *class,
-                                   struct sysdev_class_attribute *attr,
-                                   char *buf)
- {
-       return sprintf(buf, "%d\n", perf_overcommit);
- }
- static ssize_t
- perf_set_overcommit(struct sysdev_class *class,
-                   struct sysdev_class_attribute *attr,
-                   const char *buf, size_t count)
- {
-       unsigned long val;
-       int err;
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > 1)
-               return -EINVAL;
-       spin_lock(&perf_resource_lock);
-       perf_overcommit = val;
-       spin_unlock(&perf_resource_lock);
-       return count;
- }
- static SYSDEV_CLASS_ATTR(
-                               reserve_percpu,
-                               0644,
-                               perf_show_reserve_percpu,
-                               perf_set_reserve_percpu
-                       );
- static SYSDEV_CLASS_ATTR(
-                               overcommit,
-                               0644,
-                               perf_show_overcommit,
-                               perf_set_overcommit
-                       );
- static struct attribute *perfclass_attrs[] = {
-       &attr_reserve_percpu.attr,
-       &attr_overcommit.attr,
-       NULL
- };
- static struct attribute_group perfclass_attr_group = {
-       .attrs                  = perfclass_attrs,
-       .name                   = "perf_events",
- };
- static int __init perf_event_sysfs_init(void)
- {
-       return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
-                                 &perfclass_attr_group);
+       init_srcu_struct(&pmus_srcu);
+       perf_pmu_register(&perf_swevent);
+       perf_pmu_register(&perf_cpu_clock);
+       perf_pmu_register(&perf_task_clock);
+       perf_tp_register();
+       perf_cpu_notifier(perf_cpu_notify);
  }
- device_initcall(perf_event_sysfs_init);
diff --combined kernel/sched.c
@@@ -3584,7 -3584,7 +3584,7 @@@ void scheduler_tick(void
        curr->sched_class->task_tick(rq, curr, 0);
        raw_spin_unlock(&rq->lock);
  
-       perf_event_task_tick(curr);
+       perf_event_task_tick();
  
  #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
@@@ -4645,7 -4645,7 +4645,7 @@@ recheck
        }
  
        if (user) {
 -              retval = security_task_setscheduler(p, policy, param);
 +              retval = security_task_setscheduler(p);
                if (retval)
                        return retval;
        }
@@@ -4887,7 -4887,7 +4887,7 @@@ long sched_setaffinity(pid_t pid, cons
        if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
                goto out_unlock;
  
 -      retval = security_task_setscheduler(p, 0, NULL);
 +      retval = security_task_setscheduler(p);
        if (retval)
                goto out_unlock;
  
@@@ -5337,19 -5337,7 +5337,19 @@@ void __cpuinit init_idle(struct task_st
        idle->se.exec_start = sched_clock();
  
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
 +      /*
 +       * We're having a chicken and egg problem, even though we are
 +       * holding rq->lock, the cpu isn't yet set to this cpu so the
 +       * lockdep check in task_group() will fail.
 +       *
 +       * Similar case to sched_fork(). / Alternatively we could
 +       * use task_rq_lock() here and obtain the other rq->lock.
 +       *
 +       * Silence PROVE_RCU
 +       */
 +      rcu_read_lock();
        __set_task_cpu(idle, cpu);
 +      rcu_read_unlock();
  
        rq->curr = rq->idle = idle;
  #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
@@@ -405,7 -405,7 +405,7 @@@ static inline int test_time_stamp(u64 d
  #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
  
  /* Max number of timestamps that can fit on a page */
 -#define RB_TIMESTAMPS_PER_PAGE        (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
 +#define RB_TIMESTAMPS_PER_PAGE        (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
  
  int ring_buffer_print_page_header(struct trace_seq *s)
  {
@@@ -2606,6 -2606,19 +2606,19 @@@ void ring_buffer_record_enable_cpu(stru
  }
  EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  
+ /*
+  * The total entries in the ring buffer is the running counter
+  * of entries entered into the ring buffer, minus the sum of
+  * the entries read from the ring buffer and the number of
+  * entries that were overwritten.
+  */
+ static inline unsigned long
+ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+       return local_read(&cpu_buffer->entries) -
+               (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
+ }
  /**
   * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
   * @buffer: The ring buffer
  unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  {
        struct ring_buffer_per_cpu *cpu_buffer;
-       unsigned long ret;
  
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return 0;
  
        cpu_buffer = buffer->buffers[cpu];
-       ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
-               - cpu_buffer->read;
  
-       return ret;
+       return rb_num_of_entries(cpu_buffer);
  }
  EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  
@@@ -2684,8 -2694,7 +2694,7 @@@ unsigned long ring_buffer_entries(struc
        /* if you care about this being correct, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
-               entries += (local_read(&cpu_buffer->entries) -
-                           local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
+               entries += rb_num_of_entries(cpu_buffer);
        }
  
        return entries;
diff --combined kernel/watchdog.c
@@@ -43,7 -43,6 +43,6 @@@ static DEFINE_PER_CPU(unsigned long, hr
  static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
  #endif
  
- static int __read_mostly did_panic;
  static int __initdata no_watchdog;
  
  
@@@ -187,18 -186,6 +186,6 @@@ static int is_softlockup(unsigned long 
        return 0;
  }
  
- static int
- watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr)
- {
-       did_panic = 1;
-       return NOTIFY_DONE;
- }
- static struct notifier_block panic_block = {
-       .notifier_call = watchdog_panic,
- };
  #ifdef CONFIG_HARDLOCKUP_DETECTOR
  static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
  };
  
  /* Callback function for perf event subsystem */
 -void watchdog_overflow_callback(struct perf_event *event, int nmi,
 +static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                 struct perf_sample_data *data,
                 struct pt_regs *regs)
  {
@@@ -371,14 -358,14 +358,14 @@@ static int watchdog_nmi_enable(int cpu
        /* Try to register using hardware perf events */
        wd_attr = &wd_hw_attr;
        wd_attr->sample_period = hw_nmi_get_sample_period();
-       event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
+       event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
        if (!IS_ERR(event)) {
                printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
                goto out_save;
        }
  
        printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
-       return -1;
+       return PTR_ERR(event);
  
        /* success path */
  out_save:
@@@ -422,17 -409,19 +409,19 @@@ static int watchdog_prepare_cpu(int cpu
  static int watchdog_enable(int cpu)
  {
        struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
+       int err;
  
        /* enable the perf event */
-       if (watchdog_nmi_enable(cpu) != 0)
-               return -1;
+       err = watchdog_nmi_enable(cpu);
+       if (err)
+               return err;
  
        /* create the watchdog thread */
        if (!p) {
                p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
                if (IS_ERR(p)) {
                        printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
-                       return -1;
+                       return PTR_ERR(p);
                }
                kthread_bind(p, cpu);
                per_cpu(watchdog_touch_ts, cpu) = 0;
@@@ -484,6 -473,9 +473,9 @@@ static void watchdog_disable_all_cpus(v
  {
        int cpu;
  
+       if (no_watchdog)
+               return;
        for_each_online_cpu(cpu)
                watchdog_disable(cpu);
  
@@@ -526,17 -518,16 +518,16 @@@ static int __cpuini
  cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  {
        int hotcpu = (unsigned long)hcpu;
+       int err = 0;
  
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               if (watchdog_prepare_cpu(hotcpu))
-                       return NOTIFY_BAD;
+               err = watchdog_prepare_cpu(hotcpu);
                break;
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               if (watchdog_enable(hotcpu))
-                       return NOTIFY_BAD;
+               err = watchdog_enable(hotcpu);
                break;
  #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
                break;
  #endif /* CONFIG_HOTPLUG_CPU */
        }
-       return NOTIFY_OK;
+       return notifier_from_errno(err);
  }
  
  static struct notifier_block __cpuinitdata cpu_nfb = {
@@@ -565,13 -556,11 +556,11 @@@ static int __init spawn_watchdog_task(v
                return 0;
  
        err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
-       WARN_ON(err == NOTIFY_BAD);
+       WARN_ON(notifier_to_errno(err));
  
        cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
        register_cpu_notifier(&cpu_nfb);
  
-       atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
        return 0;
  }
  early_initcall(spawn_watchdog_task);
diff --combined lib/Kconfig.debug
@@@ -482,6 -482,7 +482,7 @@@ config PROVE_LOCKIN
        select DEBUG_SPINLOCK
        select DEBUG_MUTEXES
        select DEBUG_LOCK_ALLOC
+       select TRACE_IRQFLAGS
        default n
        help
         This feature enables the kernel to prove that all locking
@@@ -539,23 -540,6 +540,23 @@@ config PROVE_RCU_REPEATEDL
         disabling, allowing multiple RCU-lockdep warnings to be printed
         on a single reboot.
  
 +       Say Y to allow multiple RCU-lockdep warnings per boot.
 +
 +       Say N if you are unsure.
 +
 +config SPARSE_RCU_POINTER
 +      bool "RCU debugging: sparse-based checks for pointer usage"
 +      default n
 +      help
 +       This feature enables the __rcu sparse annotation for
 +       RCU-protected pointers.  This annotation will cause sparse
 +       to flag any non-RCU used of annotated pointers.  This can be
 +       helpful when debugging RCU usage.  Please note that this feature
 +       is not intended to enforce code cleanliness; it is instead merely
 +       a debugging aid.
 +
 +       Say Y to make sparse flag questionable use of RCU-protected pointers
 +
         Say N if you are unsure.
  
  config LOCKDEP
@@@ -596,11 -580,10 +597,10 @@@ config DEBUG_LOCKDE
          of more runtime overhead.
  
  config TRACE_IRQFLAGS
-       depends on DEBUG_KERNEL
        bool
-       default y
-       depends on TRACE_IRQFLAGS_SUPPORT
-       depends on PROVE_LOCKING
+       help
+         Enables hooks to interrupt enabling and disabling for
+         either tracing or lock debugging.
  
  config DEBUG_SPINLOCK_SLEEP
        bool "Spinlock debugging: sleep-inside-spinlock checking"
@@@ -849,30 -832,6 +849,30 @@@ config RCU_CPU_STALL_DETECTO
  
          Say Y if you are unsure.
  
 +config RCU_CPU_STALL_TIMEOUT
 +      int "RCU CPU stall timeout in seconds"
 +      depends on RCU_CPU_STALL_DETECTOR
 +      range 3 300
 +      default 60
 +      help
 +        If a given RCU grace period extends more than the specified
 +        number of seconds, a CPU stall warning is printed.  If the
 +        RCU grace period persists, additional CPU stall warnings are
 +        printed at more widely spaced intervals.
 +
 +config RCU_CPU_STALL_DETECTOR_RUNNABLE
 +      bool "RCU CPU stall checking starts automatically at boot"
 +      depends on RCU_CPU_STALL_DETECTOR
 +      default y
 +      help
 +        If set, start checking for RCU CPU stalls immediately on
 +        boot.  Otherwise, RCU CPU stall checking must be manually
 +        enabled.
 +
 +        Say Y if you are unsure.
 +
 +        Say N if you wish to suppress RCU CPU stall checking during boot.
 +
  config RCU_CPU_STALL_VERBOSE
        bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
        depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU