2 * Performance counter core code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
7 * For licencing details see kernel-base/COPYING
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
13 #include <linux/poll.h>
14 #include <linux/sysfs.h>
15 #include <linux/ptrace.h>
16 #include <linux/percpu.h>
17 #include <linux/uaccess.h>
18 #include <linux/syscalls.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/perf_counter.h>
23 * Each CPU has a list of per CPU counters:
25 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
27 int perf_max_counters __read_mostly;
28 static int perf_reserved_percpu __read_mostly;
29 static int perf_overcommit __read_mostly = 1;
32 * Mutex for (sysadmin-configurable) counter reservations:
34 static DEFINE_MUTEX(perf_resource_mutex);
37 * Architecture provided APIs - weak aliases:
40 int __weak hw_perf_counter_init(struct perf_counter *counter, u32 hw_event_type)
45 void __weak hw_perf_counter_enable(struct perf_counter *counter) { }
46 void __weak hw_perf_counter_disable(struct perf_counter *counter) { }
47 void __weak hw_perf_counter_read(struct perf_counter *counter) { }
48 void __weak hw_perf_disable_all(void) { }
49 void __weak hw_perf_enable_all(void) { }
50 void __weak hw_perf_counter_setup(void) { }
52 #if BITS_PER_LONG == 64
55 * Read the cached counter in counter safe against cross CPU / NMI
56 * modifications. 64 bit version - no complications.
58 static inline u64 perf_read_counter_safe(struct perf_counter *counter)
60 return (u64) atomic64_read(&counter->count);
66 * Read the cached counter in counter safe against cross CPU / NMI
67 * modifications. 32 bit version.
69 static u64 perf_read_counter_safe(struct perf_counter *counter)
75 cnth = atomic_read(&counter->count32[1]);
76 cntl = atomic_read(&counter->count32[0]);
77 } while (cnth != atomic_read(&counter->count32[1]));
81 return cntl | ((u64) cnth) << 32;
87 * Cross CPU call to remove a performance counter
89 * We disable the counter on the hardware level first. After that we
90 * remove it from the context list.
92 static void __perf_remove_from_context(void *info)
94 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
95 struct perf_counter *counter = info;
96 struct perf_counter_context *ctx = counter->ctx;
99 * If this is a task context, we need to check whether it is
100 * the current task context of this cpu. If not it has been
101 * scheduled out before the smp call arrived.
103 if (ctx->task && cpuctx->task_ctx != ctx)
106 spin_lock(&ctx->lock);
108 if (counter->active) {
109 hw_perf_counter_disable(counter);
112 cpuctx->active_oncpu--;
113 counter->task = NULL;
118 * Protect the list operation against NMI by disabling the
119 * counters on a global level. NOP for non NMI based counters.
121 hw_perf_disable_all();
122 list_del_init(&counter->list);
123 hw_perf_enable_all();
127 * Allow more per task counters with respect to the
130 cpuctx->max_pertask =
131 min(perf_max_counters - ctx->nr_counters,
132 perf_max_counters - perf_reserved_percpu);
135 spin_unlock(&ctx->lock);
140 * Remove the counter from a task's (or a CPU's) list of counters.
142 * Must be called with counter->mutex held.
144 * CPU counters are removed with a smp call. For task counters we only
145 * call when the task is on a CPU.
147 static void perf_remove_from_context(struct perf_counter *counter)
149 struct perf_counter_context *ctx = counter->ctx;
150 struct task_struct *task = ctx->task;
154 * Per cpu counters are removed via an smp call and
155 * the removal is always sucessful.
157 smp_call_function_single(counter->cpu,
158 __perf_remove_from_context,
164 task_oncpu_function_call(task, __perf_remove_from_context,
167 spin_lock_irq(&ctx->lock);
169 * If the context is active we need to retry the smp call.
171 if (ctx->nr_active && !list_empty(&counter->list)) {
172 spin_unlock_irq(&ctx->lock);
177 * The lock prevents that this context is scheduled in so we
178 * can remove the counter safely, if it the call above did not
181 if (!list_empty(&counter->list)) {
183 list_del_init(&counter->list);
184 counter->task = NULL;
186 spin_unlock_irq(&ctx->lock);
190 * Cross CPU call to install and enable a preformance counter
192 static void __perf_install_in_context(void *info)
194 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
195 struct perf_counter *counter = info;
196 struct perf_counter_context *ctx = counter->ctx;
197 int cpu = smp_processor_id();
200 * If this is a task context, we need to check whether it is
201 * the current task context of this cpu. If not it has been
202 * scheduled out before the smp call arrived.
204 if (ctx->task && cpuctx->task_ctx != ctx)
207 spin_lock(&ctx->lock);
210 * Protect the list operation against NMI by disabling the
211 * counters on a global level. NOP for non NMI based counters.
213 hw_perf_disable_all();
214 list_add_tail(&counter->list, &ctx->counters);
215 hw_perf_enable_all();
219 if (cpuctx->active_oncpu < perf_max_counters) {
220 hw_perf_counter_enable(counter);
222 counter->oncpu = cpu;
224 cpuctx->active_oncpu++;
227 if (!ctx->task && cpuctx->max_pertask)
228 cpuctx->max_pertask--;
230 spin_unlock(&ctx->lock);
234 * Attach a performance counter to a context
236 * First we add the counter to the list with the hardware enable bit
237 * in counter->hw_config cleared.
239 * If the counter is attached to a task which is on a CPU we use a smp
240 * call to enable it in the task context. The task might have been
241 * scheduled away, but we check this in the smp call again.
244 perf_install_in_context(struct perf_counter_context *ctx,
245 struct perf_counter *counter,
248 struct task_struct *task = ctx->task;
253 * Per cpu counters are installed via an smp call and
254 * the install is always sucessful.
256 smp_call_function_single(cpu, __perf_install_in_context,
261 counter->task = task;
263 task_oncpu_function_call(task, __perf_install_in_context,
266 spin_lock_irq(&ctx->lock);
268 * If the context is active and the counter has not been added
269 * we need to retry the smp call.
271 if (ctx->nr_active && list_empty(&counter->list)) {
272 spin_unlock_irq(&ctx->lock);
277 * The lock prevents that this context is scheduled in so we
278 * can add the counter safely, if it the call above did not
281 if (list_empty(&counter->list)) {
282 list_add_tail(&counter->list, &ctx->counters);
285 spin_unlock_irq(&ctx->lock);
289 * Called from scheduler to remove the counters of the current task,
290 * with interrupts disabled.
292 * We stop each counter and update the counter value in counter->count.
294 * This does not protect us against NMI, but hw_perf_counter_disable()
295 * sets the disabled bit in the control field of counter _before_
296 * accessing the counter control register. If a NMI hits, then it will
297 * not restart the counter.
299 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
301 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
302 struct perf_counter_context *ctx = &task->perf_counter_ctx;
303 struct perf_counter *counter;
305 if (likely(!cpuctx->task_ctx))
308 spin_lock(&ctx->lock);
309 list_for_each_entry(counter, &ctx->counters, list) {
312 if (counter->active) {
313 hw_perf_counter_disable(counter);
317 cpuctx->active_oncpu--;
320 spin_unlock(&ctx->lock);
321 cpuctx->task_ctx = NULL;
325 * Called from scheduler to add the counters of the current task
326 * with interrupts disabled.
328 * We restore the counter value and then enable it.
330 * This does not protect us against NMI, but hw_perf_counter_enable()
331 * sets the enabled bit in the control field of counter _before_
332 * accessing the counter control register. If a NMI hits, then it will
333 * keep the counter running.
335 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
337 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
338 struct perf_counter_context *ctx = &task->perf_counter_ctx;
339 struct perf_counter *counter;
341 if (likely(!ctx->nr_counters))
344 spin_lock(&ctx->lock);
345 list_for_each_entry(counter, &ctx->counters, list) {
346 if (ctx->nr_active == cpuctx->max_pertask)
348 if (counter->cpu != -1 && counter->cpu != cpu)
351 hw_perf_counter_enable(counter);
353 counter->oncpu = cpu;
355 cpuctx->active_oncpu++;
357 spin_unlock(&ctx->lock);
358 cpuctx->task_ctx = ctx;
361 void perf_counter_task_tick(struct task_struct *curr, int cpu)
363 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
364 struct perf_counter *counter;
366 if (likely(!ctx->nr_counters))
369 perf_counter_task_sched_out(curr, cpu);
371 spin_lock(&ctx->lock);
374 * Rotate the first entry last:
376 hw_perf_disable_all();
377 list_for_each_entry(counter, &ctx->counters, list) {
378 list_del(&counter->list);
379 list_add_tail(&counter->list, &ctx->counters);
382 hw_perf_enable_all();
384 spin_unlock(&ctx->lock);
386 perf_counter_task_sched_in(curr, cpu);
390 * Initialize the perf_counter context in task_struct
392 void perf_counter_init_task(struct task_struct *task)
394 struct perf_counter_context *ctx = &task->perf_counter_ctx;
396 spin_lock_init(&ctx->lock);
397 INIT_LIST_HEAD(&ctx->counters);
398 ctx->nr_counters = 0;
403 * Cross CPU call to read the hardware counter
405 static void __hw_perf_counter_read(void *info)
407 hw_perf_counter_read(info);
410 static u64 perf_read_counter(struct perf_counter *counter)
413 * If counter is enabled and currently active on a CPU, update the
414 * value in the counter structure:
416 if (counter->active) {
417 smp_call_function_single(counter->oncpu,
418 __hw_perf_counter_read, counter, 1);
421 return perf_read_counter_safe(counter);
425 * Cross CPU call to switch performance data pointers
427 static void __perf_switch_irq_data(void *info)
429 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
430 struct perf_counter *counter = info;
431 struct perf_counter_context *ctx = counter->ctx;
432 struct perf_data *oldirqdata = counter->irqdata;
435 * If this is a task context, we need to check whether it is
436 * the current task context of this cpu. If not it has been
437 * scheduled out before the smp call arrived.
440 if (cpuctx->task_ctx != ctx)
442 spin_lock(&ctx->lock);
445 /* Change the pointer NMI safe */
446 atomic_long_set((atomic_long_t *)&counter->irqdata,
447 (unsigned long) counter->usrdata);
448 counter->usrdata = oldirqdata;
451 spin_unlock(&ctx->lock);
454 static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
456 struct perf_counter_context *ctx = counter->ctx;
457 struct perf_data *oldirqdata = counter->irqdata;
458 struct task_struct *task = ctx->task;
461 smp_call_function_single(counter->cpu,
462 __perf_switch_irq_data,
464 return counter->usrdata;
468 spin_lock_irq(&ctx->lock);
469 if (!counter->active) {
470 counter->irqdata = counter->usrdata;
471 counter->usrdata = oldirqdata;
472 spin_unlock_irq(&ctx->lock);
475 spin_unlock_irq(&ctx->lock);
476 task_oncpu_function_call(task, __perf_switch_irq_data, counter);
477 /* Might have failed, because task was scheduled out */
478 if (counter->irqdata == oldirqdata)
481 return counter->usrdata;
484 static void put_context(struct perf_counter_context *ctx)
487 put_task_struct(ctx->task);
490 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
492 struct perf_cpu_context *cpuctx;
493 struct perf_counter_context *ctx;
494 struct task_struct *task;
497 * If cpu is not a wildcard then this is a percpu counter:
500 /* Must be root to operate on a CPU counter: */
501 if (!capable(CAP_SYS_ADMIN))
502 return ERR_PTR(-EACCES);
504 if (cpu < 0 || cpu > num_possible_cpus())
505 return ERR_PTR(-EINVAL);
508 * We could be clever and allow to attach a counter to an
509 * offline CPU and activate it when the CPU comes up, but
512 if (!cpu_isset(cpu, cpu_online_map))
513 return ERR_PTR(-ENODEV);
515 cpuctx = &per_cpu(perf_cpu_context, cpu);
518 WARN_ON_ONCE(ctx->task);
526 task = find_task_by_vpid(pid);
528 get_task_struct(task);
532 return ERR_PTR(-ESRCH);
534 ctx = &task->perf_counter_ctx;
537 /* Reuse ptrace permission checks for now. */
538 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
540 return ERR_PTR(-EACCES);
547 * Called when the last reference to the file is gone.
549 static int perf_release(struct inode *inode, struct file *file)
551 struct perf_counter *counter = file->private_data;
552 struct perf_counter_context *ctx = counter->ctx;
554 file->private_data = NULL;
556 mutex_lock(&counter->mutex);
558 perf_remove_from_context(counter);
561 mutex_unlock(&counter->mutex);
569 * Read the performance counter - simple non blocking version for now
572 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
576 if (count != sizeof(cntval))
579 mutex_lock(&counter->mutex);
580 cntval = perf_read_counter(counter);
581 mutex_unlock(&counter->mutex);
583 return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
587 perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
592 count = min(count, (size_t)usrdata->len);
593 if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
596 /* Adjust the counters */
597 usrdata->len -= count;
601 usrdata->rd_idx += count;
607 perf_read_irq_data(struct perf_counter *counter,
612 struct perf_data *irqdata, *usrdata;
613 DECLARE_WAITQUEUE(wait, current);
616 irqdata = counter->irqdata;
617 usrdata = counter->usrdata;
619 if (usrdata->len + irqdata->len >= count)
625 spin_lock_irq(&counter->waitq.lock);
626 __add_wait_queue(&counter->waitq, &wait);
628 set_current_state(TASK_INTERRUPTIBLE);
629 if (usrdata->len + irqdata->len >= count)
632 if (signal_pending(current))
635 spin_unlock_irq(&counter->waitq.lock);
637 spin_lock_irq(&counter->waitq.lock);
639 __remove_wait_queue(&counter->waitq, &wait);
640 __set_current_state(TASK_RUNNING);
641 spin_unlock_irq(&counter->waitq.lock);
643 if (usrdata->len + irqdata->len < count)
646 mutex_lock(&counter->mutex);
648 /* Drain pending data first: */
649 res = perf_copy_usrdata(usrdata, buf, count);
650 if (res < 0 || res == count)
653 /* Switch irq buffer: */
654 usrdata = perf_switch_irq_data(counter);
655 if (perf_copy_usrdata(usrdata, buf + res, count - res) < 0) {
662 mutex_unlock(&counter->mutex);
668 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
670 struct perf_counter *counter = file->private_data;
672 switch (counter->record_type) {
673 case PERF_RECORD_SIMPLE:
674 return perf_read_hw(counter, buf, count);
676 case PERF_RECORD_IRQ:
677 case PERF_RECORD_GROUP:
678 return perf_read_irq_data(counter, buf, count,
679 file->f_flags & O_NONBLOCK);
684 static unsigned int perf_poll(struct file *file, poll_table *wait)
686 struct perf_counter *counter = file->private_data;
687 unsigned int events = 0;
690 poll_wait(file, &counter->waitq, wait);
692 spin_lock_irqsave(&counter->waitq.lock, flags);
693 if (counter->usrdata->len || counter->irqdata->len)
695 spin_unlock_irqrestore(&counter->waitq.lock, flags);
700 static const struct file_operations perf_fops = {
701 .release = perf_release,
707 * Allocate and initialize a counter structure
709 static struct perf_counter *
710 perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type)
712 struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);
717 mutex_init(&counter->mutex);
718 INIT_LIST_HEAD(&counter->list);
719 init_waitqueue_head(&counter->waitq);
721 counter->irqdata = &counter->data[0];
722 counter->usrdata = &counter->data[1];
724 counter->record_type = record_type;
725 counter->__irq_period = hw_event_period;
726 counter->wakeup_pending = 0;
732 * sys_perf_task_open - open a performance counter associate it to a task
733 * @hw_event_type: event type for monitoring/sampling...
737 sys_perf_counter_open(u32 hw_event_type,
743 struct perf_counter_context *ctx;
744 struct perf_counter *counter;
747 ctx = find_get_context(pid, cpu);
752 counter = perf_counter_alloc(hw_event_period, cpu, record_type);
754 goto err_put_context;
756 ret = hw_perf_counter_init(counter, hw_event_type);
758 goto err_free_put_context;
760 perf_install_in_context(ctx, counter, cpu);
762 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
764 goto err_remove_free_put_context;
768 err_remove_free_put_context:
769 mutex_lock(&counter->mutex);
770 perf_remove_from_context(counter);
771 mutex_unlock(&counter->mutex);
773 err_free_put_context:
782 static void __cpuinit perf_init_cpu(int cpu)
784 struct perf_cpu_context *ctx;
786 ctx = &per_cpu(perf_cpu_context, cpu);
787 spin_lock_init(&ctx->ctx.lock);
788 INIT_LIST_HEAD(&ctx->ctx.counters);
790 mutex_lock(&perf_resource_mutex);
791 ctx->max_pertask = perf_max_counters - perf_reserved_percpu;
792 mutex_unlock(&perf_resource_mutex);
793 hw_perf_counter_setup();
796 #ifdef CONFIG_HOTPLUG_CPU
797 static void __perf_exit_cpu(void *info)
799 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
800 struct perf_counter_context *ctx = &cpuctx->ctx;
801 struct perf_counter *counter, *tmp;
803 list_for_each_entry_safe(counter, tmp, &ctx->counters, list)
804 __perf_remove_from_context(counter);
807 static void perf_exit_cpu(int cpu)
809 smp_call_function_single(cpu, __perf_exit_cpu, NULL, 1);
812 static inline void perf_exit_cpu(int cpu) { }
816 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
818 unsigned int cpu = (long)hcpu;
823 case CPU_UP_PREPARE_FROZEN:
827 case CPU_DOWN_PREPARE:
828 case CPU_DOWN_PREPARE_FROZEN:
839 static struct notifier_block __cpuinitdata perf_cpu_nb = {
840 .notifier_call = perf_cpu_notify,
843 static int __init perf_counter_init(void)
845 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
846 (void *)(long)smp_processor_id());
847 register_cpu_notifier(&perf_cpu_nb);
851 early_initcall(perf_counter_init);
853 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
855 return sprintf(buf, "%d\n", perf_reserved_percpu);
859 perf_set_reserve_percpu(struct sysdev_class *class,
863 struct perf_cpu_context *cpuctx;
867 err = strict_strtoul(buf, 10, &val);
870 if (val > perf_max_counters)
873 mutex_lock(&perf_resource_mutex);
874 perf_reserved_percpu = val;
875 for_each_online_cpu(cpu) {
876 cpuctx = &per_cpu(perf_cpu_context, cpu);
877 spin_lock_irq(&cpuctx->ctx.lock);
878 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
879 perf_max_counters - perf_reserved_percpu);
880 cpuctx->max_pertask = mpt;
881 spin_unlock_irq(&cpuctx->ctx.lock);
883 mutex_unlock(&perf_resource_mutex);
888 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
890 return sprintf(buf, "%d\n", perf_overcommit);
894 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
899 err = strict_strtoul(buf, 10, &val);
905 mutex_lock(&perf_resource_mutex);
906 perf_overcommit = val;
907 mutex_unlock(&perf_resource_mutex);
912 static SYSDEV_CLASS_ATTR(
915 perf_show_reserve_percpu,
916 perf_set_reserve_percpu
919 static SYSDEV_CLASS_ATTR(
922 perf_show_overcommit,
926 static struct attribute *perfclass_attrs[] = {
927 &attr_reserve_percpu.attr,
928 &attr_overcommit.attr,
932 static struct attribute_group perfclass_attr_group = {
933 .attrs = perfclass_attrs,
934 .name = "perf_counters",
937 static int __init perf_counter_sysfs_init(void)
939 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
940 &perfclass_attr_group);
942 device_initcall(perf_counter_sysfs_init);