hw-breakpoint: Attribute authorship of hw-breakpoint related files
[linux-2.6.git] / kernel / hw_breakpoint.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15  *
16  * Copyright (C) 2007 Alan Stern
17  * Copyright (C) IBM Corporation, 2009
18  * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19  *
20  * Thanks to Ingo Molnar for his many suggestions.
21  *
22  * Authors: Alan Stern <stern@rowland.harvard.edu>
23  *          K.Prasad <prasad@linux.vnet.ibm.com>
24  *          Frederic Weisbecker <fweisbec@gmail.com>
25  */
26
27 /*
28  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29  * using the CPU's debug registers.
30  * This file contains the arch-independent routines.
31  */
32
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/smp.h>
44
45 #include <linux/hw_breakpoint.h>
46
47 /*
48  * Constraints data
49  */
50
51 /* Number of pinned cpu breakpoints in a cpu */
52 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
53
54 /* Number of pinned task breakpoints in a cpu */
55 static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
56
57 /* Number of non-pinned cpu/task breakpoints in a cpu */
58 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
59
60 /* Gather the number of total pinned and un-pinned bp in a cpuset */
61 struct bp_busy_slots {
62         unsigned int pinned;
63         unsigned int flexible;
64 };
65
66 /* Serialize accesses to the above constraints */
67 static DEFINE_MUTEX(nr_bp_mutex);
68
69 /*
70  * Report the maximum number of pinned breakpoints a task
71  * have in this cpu
72  */
73 static unsigned int max_task_bp_pinned(int cpu)
74 {
75         int i;
76         unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
77
78         for (i = HBP_NUM -1; i >= 0; i--) {
79                 if (tsk_pinned[i] > 0)
80                         return i + 1;
81         }
82
83         return 0;
84 }
85
86 /*
87  * Report the number of pinned/un-pinned breakpoints we have in
88  * a given cpu (cpu > -1) or in all of them (cpu = -1).
89  */
90 static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
91 {
92         if (cpu >= 0) {
93                 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
94                 slots->pinned += max_task_bp_pinned(cpu);
95                 slots->flexible = per_cpu(nr_bp_flexible, cpu);
96
97                 return;
98         }
99
100         for_each_online_cpu(cpu) {
101                 unsigned int nr;
102
103                 nr = per_cpu(nr_cpu_bp_pinned, cpu);
104                 nr += max_task_bp_pinned(cpu);
105
106                 if (nr > slots->pinned)
107                         slots->pinned = nr;
108
109                 nr = per_cpu(nr_bp_flexible, cpu);
110
111                 if (nr > slots->flexible)
112                         slots->flexible = nr;
113         }
114 }
115
116 /*
117  * Add a pinned breakpoint for the given task in our constraint table
118  */
119 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
120 {
121         int count = 0;
122         struct perf_event *bp;
123         struct perf_event_context *ctx = tsk->perf_event_ctxp;
124         unsigned int *task_bp_pinned;
125         struct list_head *list;
126         unsigned long flags;
127
128         if (WARN_ONCE(!ctx, "No perf context for this task"))
129                 return;
130
131         list = &ctx->event_list;
132
133         spin_lock_irqsave(&ctx->lock, flags);
134
135         /*
136          * The current breakpoint counter is not included in the list
137          * at the open() callback time
138          */
139         list_for_each_entry(bp, list, event_entry) {
140                 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
141                         count++;
142         }
143
144         spin_unlock_irqrestore(&ctx->lock, flags);
145
146         if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
147                 return;
148
149         task_bp_pinned = per_cpu(task_bp_pinned, cpu);
150         if (enable) {
151                 task_bp_pinned[count]++;
152                 if (count > 0)
153                         task_bp_pinned[count-1]--;
154         } else {
155                 task_bp_pinned[count]--;
156                 if (count > 0)
157                         task_bp_pinned[count-1]++;
158         }
159 }
160
161 /*
162  * Add/remove the given breakpoint in our constraint table
163  */
164 static void toggle_bp_slot(struct perf_event *bp, bool enable)
165 {
166         int cpu = bp->cpu;
167         struct task_struct *tsk = bp->ctx->task;
168
169         /* Pinned counter task profiling */
170         if (tsk) {
171                 if (cpu >= 0) {
172                         toggle_bp_task_slot(tsk, cpu, enable);
173                         return;
174                 }
175
176                 for_each_online_cpu(cpu)
177                         toggle_bp_task_slot(tsk, cpu, enable);
178                 return;
179         }
180
181         /* Pinned counter cpu profiling */
182         if (enable)
183                 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
184         else
185                 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
186 }
187
188 /*
189  * Contraints to check before allowing this new breakpoint counter:
190  *
191  *  == Non-pinned counter == (Considered as pinned for now)
192  *
193  *   - If attached to a single cpu, check:
194  *
195  *       (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
196  *           + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
197  *
198  *       -> If there are already non-pinned counters in this cpu, it means
199  *          there is already a free slot for them.
200  *          Otherwise, we check that the maximum number of per task
201  *          breakpoints (for this cpu) plus the number of per cpu breakpoint
202  *          (for this cpu) doesn't cover every registers.
203  *
204  *   - If attached to every cpus, check:
205  *
206  *       (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
207  *           + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
208  *
209  *       -> This is roughly the same, except we check the number of per cpu
210  *          bp for every cpu and we keep the max one. Same for the per tasks
211  *          breakpoints.
212  *
213  *
214  * == Pinned counter ==
215  *
216  *   - If attached to a single cpu, check:
217  *
218  *       ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
219  *            + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
220  *
221  *       -> Same checks as before. But now the nr_bp_flexible, if any, must keep
222  *          one register at least (or they will never be fed).
223  *
224  *   - If attached to every cpus, check:
225  *
226  *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
227  *            + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
228  */
229 int reserve_bp_slot(struct perf_event *bp)
230 {
231         struct bp_busy_slots slots = {0};
232         int ret = 0;
233
234         mutex_lock(&nr_bp_mutex);
235
236         fetch_bp_busy_slots(&slots, bp->cpu);
237
238         /* Flexible counters need to keep at least one slot */
239         if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
240                 ret = -ENOSPC;
241                 goto end;
242         }
243
244         toggle_bp_slot(bp, true);
245
246 end:
247         mutex_unlock(&nr_bp_mutex);
248
249         return ret;
250 }
251
252 void release_bp_slot(struct perf_event *bp)
253 {
254         mutex_lock(&nr_bp_mutex);
255
256         toggle_bp_slot(bp, false);
257
258         mutex_unlock(&nr_bp_mutex);
259 }
260
261
262 int __register_perf_hw_breakpoint(struct perf_event *bp)
263 {
264         int ret;
265
266         ret = reserve_bp_slot(bp);
267         if (ret)
268                 return ret;
269
270         if (!bp->attr.disabled)
271                 ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
272
273         return ret;
274 }
275
276 int register_perf_hw_breakpoint(struct perf_event *bp)
277 {
278         bp->callback = perf_bp_event;
279
280         return __register_perf_hw_breakpoint(bp);
281 }
282
283 /*
284  * Register a breakpoint bound to a task and a given cpu.
285  * If cpu is -1, the breakpoint is active for the task in every cpu
286  * If the task is -1, the breakpoint is active for every tasks in the given
287  * cpu.
288  */
289 static struct perf_event *
290 register_user_hw_breakpoint_cpu(unsigned long addr,
291                                 int len,
292                                 int type,
293                                 perf_callback_t triggered,
294                                 pid_t pid,
295                                 int cpu,
296                                 bool active)
297 {
298         struct perf_event_attr *attr;
299         struct perf_event *bp;
300
301         attr = kzalloc(sizeof(*attr), GFP_KERNEL);
302         if (!attr)
303                 return ERR_PTR(-ENOMEM);
304
305         attr->type = PERF_TYPE_BREAKPOINT;
306         attr->size = sizeof(*attr);
307         attr->bp_addr = addr;
308         attr->bp_len = len;
309         attr->bp_type = type;
310         /*
311          * Such breakpoints are used by debuggers to trigger signals when
312          * we hit the excepted memory op. We can't miss such events, they
313          * must be pinned.
314          */
315         attr->pinned = 1;
316
317         if (!active)
318                 attr->disabled = 1;
319
320         bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
321         kfree(attr);
322
323         return bp;
324 }
325
326 /**
327  * register_user_hw_breakpoint - register a hardware breakpoint for user space
328  * @addr: is the memory address that triggers the breakpoint
329  * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
330  * @type: the type of the access to the memory (read/write/exec)
331  * @triggered: callback to trigger when we hit the breakpoint
332  * @tsk: pointer to 'task_struct' of the process to which the address belongs
333  * @active: should we activate it while registering it
334  *
335  */
336 struct perf_event *
337 register_user_hw_breakpoint(unsigned long addr,
338                             int len,
339                             int type,
340                             perf_callback_t triggered,
341                             struct task_struct *tsk,
342                             bool active)
343 {
344         return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
345                                                tsk->pid, -1, active);
346 }
347 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
348
349 /**
350  * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
351  * @bp: the breakpoint structure to modify
352  * @addr: is the memory address that triggers the breakpoint
353  * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
354  * @type: the type of the access to the memory (read/write/exec)
355  * @triggered: callback to trigger when we hit the breakpoint
356  * @tsk: pointer to 'task_struct' of the process to which the address belongs
357  * @active: should we activate it while registering it
358  */
359 struct perf_event *
360 modify_user_hw_breakpoint(struct perf_event *bp,
361                           unsigned long addr,
362                           int len,
363                           int type,
364                           perf_callback_t triggered,
365                           struct task_struct *tsk,
366                           bool active)
367 {
368         /*
369          * FIXME: do it without unregistering
370          * - We don't want to lose our slot
371          * - If the new bp is incorrect, don't lose the older one
372          */
373         unregister_hw_breakpoint(bp);
374
375         return register_user_hw_breakpoint(addr, len, type, triggered,
376                                            tsk, active);
377 }
378 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
379
380 /**
381  * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
382  * @bp: the breakpoint structure to unregister
383  */
384 void unregister_hw_breakpoint(struct perf_event *bp)
385 {
386         if (!bp)
387                 return;
388         perf_event_release_kernel(bp);
389 }
390 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
391
392 static struct perf_event *
393 register_kernel_hw_breakpoint_cpu(unsigned long addr,
394                                   int len,
395                                   int type,
396                                   perf_callback_t triggered,
397                                   int cpu,
398                                   bool active)
399 {
400         return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
401                                                -1, cpu, active);
402 }
403
404 /**
405  * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
406  * @addr: is the memory address that triggers the breakpoint
407  * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
408  * @type: the type of the access to the memory (read/write/exec)
409  * @triggered: callback to trigger when we hit the breakpoint
410  * @active: should we activate it while registering it
411  *
412  * @return a set of per_cpu pointers to perf events
413  */
414 struct perf_event **
415 register_wide_hw_breakpoint(unsigned long addr,
416                             int len,
417                             int type,
418                             perf_callback_t triggered,
419                             bool active)
420 {
421         struct perf_event **cpu_events, **pevent, *bp;
422         long err;
423         int cpu;
424
425         cpu_events = alloc_percpu(typeof(*cpu_events));
426         if (!cpu_events)
427                 return ERR_PTR(-ENOMEM);
428
429         for_each_possible_cpu(cpu) {
430                 pevent = per_cpu_ptr(cpu_events, cpu);
431                 bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
432                                         triggered, cpu, active);
433
434                 *pevent = bp;
435
436                 if (IS_ERR(bp) || !bp) {
437                         err = PTR_ERR(bp);
438                         goto fail;
439                 }
440         }
441
442         return cpu_events;
443
444 fail:
445         for_each_possible_cpu(cpu) {
446                 pevent = per_cpu_ptr(cpu_events, cpu);
447                 if (IS_ERR(*pevent) || !*pevent)
448                         break;
449                 unregister_hw_breakpoint(*pevent);
450         }
451         free_percpu(cpu_events);
452         /* return the error if any */
453         return ERR_PTR(err);
454 }
455 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
456
457 /**
458  * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
459  * @cpu_events: the per cpu set of events to unregister
460  */
461 void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
462 {
463         int cpu;
464         struct perf_event **pevent;
465
466         for_each_possible_cpu(cpu) {
467                 pevent = per_cpu_ptr(cpu_events, cpu);
468                 unregister_hw_breakpoint(*pevent);
469         }
470         free_percpu(cpu_events);
471 }
472 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
473
474 static struct notifier_block hw_breakpoint_exceptions_nb = {
475         .notifier_call = hw_breakpoint_exceptions_notify,
476         /* we need to be notified first */
477         .priority = 0x7fffffff
478 };
479
480 static int __init init_hw_breakpoint(void)
481 {
482         return register_die_notifier(&hw_breakpoint_exceptions_nb);
483 }
484 core_initcall(init_hw_breakpoint);
485
486
487 struct pmu perf_ops_bp = {
488         .enable         = arch_install_hw_breakpoint,
489         .disable        = arch_uninstall_hw_breakpoint,
490         .read           = hw_breakpoint_pmu_read,
491         .unthrottle     = hw_breakpoint_pmu_unthrottle
492 };