77d68bfb79da8013e3111efdfc498cb873ae9a17
[linux-2.6.git] / tools / perf / builtin-test.c
1 /*
2  * builtin-test.c
3  *
4  * Builtin regression testing command: ever growing number of sanity tests
5  */
6 #include "builtin.h"
7
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/debugfs.h"
11 #include "util/evlist.h"
12 #include "util/parse-options.h"
13 #include "util/parse-events.h"
14 #include "util/symbol.h"
15 #include "util/thread_map.h"
16 #include "../../include/linux/hw_breakpoint.h"
17
18 static long page_size;
19
20 static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
21 {
22         bool *visited = symbol__priv(sym);
23         *visited = true;
24         return 0;
25 }
26
27 static int test__vmlinux_matches_kallsyms(void)
28 {
29         int err = -1;
30         struct rb_node *nd;
31         struct symbol *sym;
32         struct map *kallsyms_map, *vmlinux_map;
33         struct machine kallsyms, vmlinux;
34         enum map_type type = MAP__FUNCTION;
35         struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
36
37         /*
38          * Step 1:
39          *
40          * Init the machines that will hold kernel, modules obtained from
41          * both vmlinux + .ko files and from /proc/kallsyms split by modules.
42          */
43         machine__init(&kallsyms, "", HOST_KERNEL_ID);
44         machine__init(&vmlinux, "", HOST_KERNEL_ID);
45
46         /*
47          * Step 2:
48          *
49          * Create the kernel maps for kallsyms and the DSO where we will then
50          * load /proc/kallsyms. Also create the modules maps from /proc/modules
51          * and find the .ko files that match them in /lib/modules/`uname -r`/.
52          */
53         if (machine__create_kernel_maps(&kallsyms) < 0) {
54                 pr_debug("machine__create_kernel_maps ");
55                 return -1;
56         }
57
58         /*
59          * Step 3:
60          *
61          * Load and split /proc/kallsyms into multiple maps, one per module.
62          */
63         if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
64                 pr_debug("dso__load_kallsyms ");
65                 goto out;
66         }
67
68         /*
69          * Step 4:
70          *
71          * kallsyms will be internally on demand sorted by name so that we can
72          * find the reference relocation * symbol, i.e. the symbol we will use
73          * to see if the running kernel was relocated by checking if it has the
74          * same value in the vmlinux file we load.
75          */
76         kallsyms_map = machine__kernel_map(&kallsyms, type);
77
78         sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
79         if (sym == NULL) {
80                 pr_debug("dso__find_symbol_by_name ");
81                 goto out;
82         }
83
84         ref_reloc_sym.addr = sym->start;
85
86         /*
87          * Step 5:
88          *
89          * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
90          */
91         if (machine__create_kernel_maps(&vmlinux) < 0) {
92                 pr_debug("machine__create_kernel_maps ");
93                 goto out;
94         }
95
96         vmlinux_map = machine__kernel_map(&vmlinux, type);
97         map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
98
99         /*
100          * Step 6:
101          *
102          * Locate a vmlinux file in the vmlinux path that has a buildid that
103          * matches the one of the running kernel.
104          *
105          * While doing that look if we find the ref reloc symbol, if we find it
106          * we'll have its ref_reloc_symbol.unrelocated_addr and then
107          * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
108          * to fixup the symbols.
109          */
110         if (machine__load_vmlinux_path(&vmlinux, type,
111                                        vmlinux_matches_kallsyms_filter) <= 0) {
112                 pr_debug("machine__load_vmlinux_path ");
113                 goto out;
114         }
115
116         err = 0;
117         /*
118          * Step 7:
119          *
120          * Now look at the symbols in the vmlinux DSO and check if we find all of them
121          * in the kallsyms dso. For the ones that are in both, check its names and
122          * end addresses too.
123          */
124         for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
125                 struct symbol *pair, *first_pair;
126                 bool backwards = true;
127
128                 sym  = rb_entry(nd, struct symbol, rb_node);
129
130                 if (sym->start == sym->end)
131                         continue;
132
133                 first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
134                 pair = first_pair;
135
136                 if (pair && pair->start == sym->start) {
137 next_pair:
138                         if (strcmp(sym->name, pair->name) == 0) {
139                                 /*
140                                  * kallsyms don't have the symbol end, so we
141                                  * set that by using the next symbol start - 1,
142                                  * in some cases we get this up to a page
143                                  * wrong, trace_kmalloc when I was developing
144                                  * this code was one such example, 2106 bytes
145                                  * off the real size. More than that and we
146                                  * _really_ have a problem.
147                                  */
148                                 s64 skew = sym->end - pair->end;
149                                 if (llabs(skew) < page_size)
150                                         continue;
151
152                                 pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
153                                          sym->start, sym->name, sym->end, pair->end);
154                         } else {
155                                 struct rb_node *nnd;
156 detour:
157                                 nnd = backwards ? rb_prev(&pair->rb_node) :
158                                                   rb_next(&pair->rb_node);
159                                 if (nnd) {
160                                         struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
161
162                                         if (next->start == sym->start) {
163                                                 pair = next;
164                                                 goto next_pair;
165                                         }
166                                 }
167
168                                 if (backwards) {
169                                         backwards = false;
170                                         pair = first_pair;
171                                         goto detour;
172                                 }
173
174                                 pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
175                                          sym->start, sym->name, pair->name);
176                         }
177                 } else
178                         pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
179
180                 err = -1;
181         }
182
183         if (!verbose)
184                 goto out;
185
186         pr_info("Maps only in vmlinux:\n");
187
188         for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
189                 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
190                 /*
191                  * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
192                  * the kernel will have the path for the vmlinux file being used,
193                  * so use the short name, less descriptive but the same ("[kernel]" in
194                  * both cases.
195                  */
196                 pair = map_groups__find_by_name(&kallsyms.kmaps, type,
197                                                 (pos->dso->kernel ?
198                                                         pos->dso->short_name :
199                                                         pos->dso->name));
200                 if (pair)
201                         pair->priv = 1;
202                 else
203                         map__fprintf(pos, stderr);
204         }
205
206         pr_info("Maps in vmlinux with a different name in kallsyms:\n");
207
208         for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
209                 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
210
211                 pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
212                 if (pair == NULL || pair->priv)
213                         continue;
214
215                 if (pair->start == pos->start) {
216                         pair->priv = 1;
217                         pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
218                                 pos->start, pos->end, pos->pgoff, pos->dso->name);
219                         if (pos->pgoff != pair->pgoff || pos->end != pair->end)
220                                 pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
221                                         pair->start, pair->end, pair->pgoff);
222                         pr_info(" %s\n", pair->dso->name);
223                         pair->priv = 1;
224                 }
225         }
226
227         pr_info("Maps only in kallsyms:\n");
228
229         for (nd = rb_first(&kallsyms.kmaps.maps[type]);
230              nd; nd = rb_next(nd)) {
231                 struct map *pos = rb_entry(nd, struct map, rb_node);
232
233                 if (!pos->priv)
234                         map__fprintf(pos, stderr);
235         }
236 out:
237         return err;
238 }
239
240 #include "util/cpumap.h"
241 #include "util/evsel.h"
242 #include <sys/types.h>
243
244 static int trace_event__id(const char *evname)
245 {
246         char *filename;
247         int err = -1, fd;
248
249         if (asprintf(&filename,
250                      "%s/syscalls/%s/id",
251                      tracing_events_path, evname) < 0)
252                 return -1;
253
254         fd = open(filename, O_RDONLY);
255         if (fd >= 0) {
256                 char id[16];
257                 if (read(fd, id, sizeof(id)) > 0)
258                         err = atoi(id);
259                 close(fd);
260         }
261
262         free(filename);
263         return err;
264 }
265
266 static int test__open_syscall_event(void)
267 {
268         int err = -1, fd;
269         struct thread_map *threads;
270         struct perf_evsel *evsel;
271         struct perf_event_attr attr;
272         unsigned int nr_open_calls = 111, i;
273         int id = trace_event__id("sys_enter_open");
274
275         if (id < 0) {
276                 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
277                 return -1;
278         }
279
280         threads = thread_map__new(-1, getpid());
281         if (threads == NULL) {
282                 pr_debug("thread_map__new\n");
283                 return -1;
284         }
285
286         memset(&attr, 0, sizeof(attr));
287         attr.type = PERF_TYPE_TRACEPOINT;
288         attr.config = id;
289         evsel = perf_evsel__new(&attr, 0);
290         if (evsel == NULL) {
291                 pr_debug("perf_evsel__new\n");
292                 goto out_thread_map_delete;
293         }
294
295         if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
296                 pr_debug("failed to open counter: %s, "
297                          "tweak /proc/sys/kernel/perf_event_paranoid?\n",
298                          strerror(errno));
299                 goto out_evsel_delete;
300         }
301
302         for (i = 0; i < nr_open_calls; ++i) {
303                 fd = open("/etc/passwd", O_RDONLY);
304                 close(fd);
305         }
306
307         if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
308                 pr_debug("perf_evsel__read_on_cpu\n");
309                 goto out_close_fd;
310         }
311
312         if (evsel->counts->cpu[0].val != nr_open_calls) {
313                 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
314                          nr_open_calls, evsel->counts->cpu[0].val);
315                 goto out_close_fd;
316         }
317         
318         err = 0;
319 out_close_fd:
320         perf_evsel__close_fd(evsel, 1, threads->nr);
321 out_evsel_delete:
322         perf_evsel__delete(evsel);
323 out_thread_map_delete:
324         thread_map__delete(threads);
325         return err;
326 }
327
328 #include <sched.h>
329
330 static int test__open_syscall_event_on_all_cpus(void)
331 {
332         int err = -1, fd, cpu;
333         struct thread_map *threads;
334         struct cpu_map *cpus;
335         struct perf_evsel *evsel;
336         struct perf_event_attr attr;
337         unsigned int nr_open_calls = 111, i;
338         cpu_set_t cpu_set;
339         int id = trace_event__id("sys_enter_open");
340
341         if (id < 0) {
342                 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
343                 return -1;
344         }
345
346         threads = thread_map__new(-1, getpid());
347         if (threads == NULL) {
348                 pr_debug("thread_map__new\n");
349                 return -1;
350         }
351
352         cpus = cpu_map__new(NULL);
353         if (cpus == NULL) {
354                 pr_debug("cpu_map__new\n");
355                 goto out_thread_map_delete;
356         }
357
358
359         CPU_ZERO(&cpu_set);
360
361         memset(&attr, 0, sizeof(attr));
362         attr.type = PERF_TYPE_TRACEPOINT;
363         attr.config = id;
364         evsel = perf_evsel__new(&attr, 0);
365         if (evsel == NULL) {
366                 pr_debug("perf_evsel__new\n");
367                 goto out_thread_map_delete;
368         }
369
370         if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
371                 pr_debug("failed to open counter: %s, "
372                          "tweak /proc/sys/kernel/perf_event_paranoid?\n",
373                          strerror(errno));
374                 goto out_evsel_delete;
375         }
376
377         for (cpu = 0; cpu < cpus->nr; ++cpu) {
378                 unsigned int ncalls = nr_open_calls + cpu;
379                 /*
380                  * XXX eventually lift this restriction in a way that
381                  * keeps perf building on older glibc installations
382                  * without CPU_ALLOC. 1024 cpus in 2010 still seems
383                  * a reasonable upper limit tho :-)
384                  */
385                 if (cpus->map[cpu] >= CPU_SETSIZE) {
386                         pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
387                         continue;
388                 }
389
390                 CPU_SET(cpus->map[cpu], &cpu_set);
391                 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
392                         pr_debug("sched_setaffinity() failed on CPU %d: %s ",
393                                  cpus->map[cpu],
394                                  strerror(errno));
395                         goto out_close_fd;
396                 }
397                 for (i = 0; i < ncalls; ++i) {
398                         fd = open("/etc/passwd", O_RDONLY);
399                         close(fd);
400                 }
401                 CPU_CLR(cpus->map[cpu], &cpu_set);
402         }
403
404         /*
405          * Here we need to explicitely preallocate the counts, as if
406          * we use the auto allocation it will allocate just for 1 cpu,
407          * as we start by cpu 0.
408          */
409         if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
410                 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
411                 goto out_close_fd;
412         }
413
414         err = 0;
415
416         for (cpu = 0; cpu < cpus->nr; ++cpu) {
417                 unsigned int expected;
418
419                 if (cpus->map[cpu] >= CPU_SETSIZE)
420                         continue;
421
422                 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
423                         pr_debug("perf_evsel__read_on_cpu\n");
424                         err = -1;
425                         break;
426                 }
427
428                 expected = nr_open_calls + cpu;
429                 if (evsel->counts->cpu[cpu].val != expected) {
430                         pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
431                                  expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
432                         err = -1;
433                 }
434         }
435
436 out_close_fd:
437         perf_evsel__close_fd(evsel, 1, threads->nr);
438 out_evsel_delete:
439         perf_evsel__delete(evsel);
440 out_thread_map_delete:
441         thread_map__delete(threads);
442         return err;
443 }
444
445 /*
446  * This test will generate random numbers of calls to some getpid syscalls,
447  * then establish an mmap for a group of events that are created to monitor
448  * the syscalls.
449  *
450  * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
451  * sample.id field to map back to its respective perf_evsel instance.
452  *
453  * Then it checks if the number of syscalls reported as perf events by
454  * the kernel corresponds to the number of syscalls made.
455  */
456 static int test__basic_mmap(void)
457 {
458         int err = -1;
459         union perf_event *event;
460         struct thread_map *threads;
461         struct cpu_map *cpus;
462         struct perf_evlist *evlist;
463         struct perf_event_attr attr = {
464                 .type           = PERF_TYPE_TRACEPOINT,
465                 .read_format    = PERF_FORMAT_ID,
466                 .sample_type    = PERF_SAMPLE_ID,
467                 .watermark      = 0,
468         };
469         cpu_set_t cpu_set;
470         const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
471                                         "getpgid", };
472         pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
473                                       (void*)getpgid };
474 #define nsyscalls ARRAY_SIZE(syscall_names)
475         int ids[nsyscalls];
476         unsigned int nr_events[nsyscalls],
477                      expected_nr_events[nsyscalls], i, j;
478         struct perf_evsel *evsels[nsyscalls], *evsel;
479         int sample_size = __perf_evsel__sample_size(attr.sample_type);
480
481         for (i = 0; i < nsyscalls; ++i) {
482                 char name[64];
483
484                 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
485                 ids[i] = trace_event__id(name);
486                 if (ids[i] < 0) {
487                         pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
488                         return -1;
489                 }
490                 nr_events[i] = 0;
491                 expected_nr_events[i] = random() % 257;
492         }
493
494         threads = thread_map__new(-1, getpid());
495         if (threads == NULL) {
496                 pr_debug("thread_map__new\n");
497                 return -1;
498         }
499
500         cpus = cpu_map__new(NULL);
501         if (cpus == NULL) {
502                 pr_debug("cpu_map__new\n");
503                 goto out_free_threads;
504         }
505
506         CPU_ZERO(&cpu_set);
507         CPU_SET(cpus->map[0], &cpu_set);
508         sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
509         if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
510                 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
511                          cpus->map[0], strerror(errno));
512                 goto out_free_cpus;
513         }
514
515         evlist = perf_evlist__new(cpus, threads);
516         if (evlist == NULL) {
517                 pr_debug("perf_evlist__new\n");
518                 goto out_free_cpus;
519         }
520
521         /* anonymous union fields, can't be initialized above */
522         attr.wakeup_events = 1;
523         attr.sample_period = 1;
524
525         for (i = 0; i < nsyscalls; ++i) {
526                 attr.config = ids[i];
527                 evsels[i] = perf_evsel__new(&attr, i);
528                 if (evsels[i] == NULL) {
529                         pr_debug("perf_evsel__new\n");
530                         goto out_free_evlist;
531                 }
532
533                 perf_evlist__add(evlist, evsels[i]);
534
535                 if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
536                         pr_debug("failed to open counter: %s, "
537                                  "tweak /proc/sys/kernel/perf_event_paranoid?\n",
538                                  strerror(errno));
539                         goto out_close_fd;
540                 }
541         }
542
543         if (perf_evlist__mmap(evlist, 128, true) < 0) {
544                 pr_debug("failed to mmap events: %d (%s)\n", errno,
545                          strerror(errno));
546                 goto out_close_fd;
547         }
548
549         for (i = 0; i < nsyscalls; ++i)
550                 for (j = 0; j < expected_nr_events[i]; ++j) {
551                         int foo = syscalls[i]();
552                         ++foo;
553                 }
554
555         while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
556                 struct perf_sample sample;
557
558                 if (event->header.type != PERF_RECORD_SAMPLE) {
559                         pr_debug("unexpected %s event\n",
560                                  perf_event__name(event->header.type));
561                         goto out_munmap;
562                 }
563
564                 err = perf_event__parse_sample(event, attr.sample_type, sample_size,
565                                                false, &sample, false);
566                 if (err) {
567                         pr_err("Can't parse sample, err = %d\n", err);
568                         goto out_munmap;
569                 }
570
571                 evsel = perf_evlist__id2evsel(evlist, sample.id);
572                 if (evsel == NULL) {
573                         pr_debug("event with id %" PRIu64
574                                  " doesn't map to an evsel\n", sample.id);
575                         goto out_munmap;
576                 }
577                 nr_events[evsel->idx]++;
578         }
579
580         list_for_each_entry(evsel, &evlist->entries, node) {
581                 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
582                         pr_debug("expected %d %s events, got %d\n",
583                                  expected_nr_events[evsel->idx],
584                                  event_name(evsel), nr_events[evsel->idx]);
585                         goto out_munmap;
586                 }
587         }
588
589         err = 0;
590 out_munmap:
591         perf_evlist__munmap(evlist);
592 out_close_fd:
593         for (i = 0; i < nsyscalls; ++i)
594                 perf_evsel__close_fd(evsels[i], 1, threads->nr);
595 out_free_evlist:
596         perf_evlist__delete(evlist);
597 out_free_cpus:
598         cpu_map__delete(cpus);
599 out_free_threads:
600         thread_map__delete(threads);
601         return err;
602 #undef nsyscalls
603 }
604
605 #define TEST_ASSERT_VAL(text, cond) \
606 do { \
607         if (!cond) { \
608                 pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
609                 return -1; \
610         } \
611 } while (0)
612
613 static int test__checkevent_tracepoint(struct perf_evlist *evlist)
614 {
615         struct perf_evsel *evsel = list_entry(evlist->entries.next,
616                                               struct perf_evsel, node);
617
618         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
619         TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
620         TEST_ASSERT_VAL("wrong sample_type",
621                 (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
622                 evsel->attr.sample_type);
623         TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
624         return 0;
625 }
626
627 static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
628 {
629         struct perf_evsel *evsel;
630
631         TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
632
633         list_for_each_entry(evsel, &evlist->entries, node) {
634                 TEST_ASSERT_VAL("wrong type",
635                         PERF_TYPE_TRACEPOINT == evsel->attr.type);
636                 TEST_ASSERT_VAL("wrong sample_type",
637                         (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
638                         == evsel->attr.sample_type);
639                 TEST_ASSERT_VAL("wrong sample_period",
640                         1 == evsel->attr.sample_period);
641         }
642         return 0;
643 }
644
645 static int test__checkevent_raw(struct perf_evlist *evlist)
646 {
647         struct perf_evsel *evsel = list_entry(evlist->entries.next,
648                                               struct perf_evsel, node);
649
650         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
651         TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
652         TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
653         return 0;
654 }
655
656 static int test__checkevent_numeric(struct perf_evlist *evlist)
657 {
658         struct perf_evsel *evsel = list_entry(evlist->entries.next,
659                                               struct perf_evsel, node);
660
661         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
662         TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
663         TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
664         return 0;
665 }
666
667 static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
668 {
669         struct perf_evsel *evsel = list_entry(evlist->entries.next,
670                                               struct perf_evsel, node);
671
672         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
673         TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
674         TEST_ASSERT_VAL("wrong config",
675                         PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
676         return 0;
677 }
678
679 static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
680 {
681         struct perf_evsel *evsel = list_entry(evlist->entries.next,
682                                               struct perf_evsel, node);
683
684         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
685         TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
686         TEST_ASSERT_VAL("wrong config",
687                         PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config);
688         return 0;
689 }
690
691 static int test__checkevent_genhw(struct perf_evlist *evlist)
692 {
693         struct perf_evsel *evsel = list_entry(evlist->entries.next,
694                                               struct perf_evsel, node);
695
696         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
697         TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
698         TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config);
699         return 0;
700 }
701
702 static int test__checkevent_breakpoint(struct perf_evlist *evlist)
703 {
704         struct perf_evsel *evsel = list_entry(evlist->entries.next,
705                                               struct perf_evsel, node);
706
707         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
708         TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
709         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
710         TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
711                                          evsel->attr.bp_type);
712         TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
713                                         evsel->attr.bp_len);
714         return 0;
715 }
716
717 static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
718 {
719         struct perf_evsel *evsel = list_entry(evlist->entries.next,
720                                               struct perf_evsel, node);
721
722         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
723         TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
724         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
725         TEST_ASSERT_VAL("wrong bp_type",
726                         HW_BREAKPOINT_X == evsel->attr.bp_type);
727         TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len);
728         return 0;
729 }
730
731 static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
732 {
733         struct perf_evsel *evsel = list_entry(evlist->entries.next,
734                                               struct perf_evsel, node);
735
736         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
737         TEST_ASSERT_VAL("wrong type",
738                         PERF_TYPE_BREAKPOINT == evsel->attr.type);
739         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
740         TEST_ASSERT_VAL("wrong bp_type",
741                         HW_BREAKPOINT_R == evsel->attr.bp_type);
742         TEST_ASSERT_VAL("wrong bp_len",
743                         HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
744         return 0;
745 }
746
747 static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
748 {
749         struct perf_evsel *evsel = list_entry(evlist->entries.next,
750                                               struct perf_evsel, node);
751
752         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
753         TEST_ASSERT_VAL("wrong type",
754                         PERF_TYPE_BREAKPOINT == evsel->attr.type);
755         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
756         TEST_ASSERT_VAL("wrong bp_type",
757                         HW_BREAKPOINT_W == evsel->attr.bp_type);
758         TEST_ASSERT_VAL("wrong bp_len",
759                         HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
760         return 0;
761 }
762
763 static struct test__event_st {
764         const char *name;
765         __u32 type;
766         int (*check)(struct perf_evlist *evlist);
767 } test__events[] = {
768         {
769                 .name  = "syscalls:sys_enter_open",
770                 .check = test__checkevent_tracepoint,
771         },
772         {
773                 .name  = "syscalls:*",
774                 .check = test__checkevent_tracepoint_multi,
775         },
776         {
777                 .name  = "r1",
778                 .check = test__checkevent_raw,
779         },
780         {
781                 .name  = "1:1",
782                 .check = test__checkevent_numeric,
783         },
784         {
785                 .name  = "instructions",
786                 .check = test__checkevent_symbolic_name,
787         },
788         {
789                 .name  = "faults",
790                 .check = test__checkevent_symbolic_alias,
791         },
792         {
793                 .name  = "L1-dcache-load-miss",
794                 .check = test__checkevent_genhw,
795         },
796         {
797                 .name  = "mem:0",
798                 .check = test__checkevent_breakpoint,
799         },
800         {
801                 .name  = "mem:0:x",
802                 .check = test__checkevent_breakpoint_x,
803         },
804         {
805                 .name  = "mem:0:r",
806                 .check = test__checkevent_breakpoint_r,
807         },
808         {
809                 .name  = "mem:0:w",
810                 .check = test__checkevent_breakpoint_w,
811         },
812 };
813
814 #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
815
816 static int test__parse_events(void)
817 {
818         struct perf_evlist *evlist;
819         u_int i;
820         int ret = 0;
821
822         for (i = 0; i < TEST__EVENTS_CNT; i++) {
823                 struct test__event_st *e = &test__events[i];
824
825                 evlist = perf_evlist__new(NULL, NULL);
826                 if (evlist == NULL)
827                         break;
828
829                 ret = parse_events(evlist, e->name, 0);
830                 if (ret) {
831                         pr_debug("failed to parse event '%s', err %d\n",
832                                  e->name, ret);
833                         break;
834                 }
835
836                 ret = e->check(evlist);
837                 if (ret)
838                         break;
839
840                 perf_evlist__delete(evlist);
841         }
842
843         return ret;
844 }
845 static struct test {
846         const char *desc;
847         int (*func)(void);
848 } tests[] = {
849         {
850                 .desc = "vmlinux symtab matches kallsyms",
851                 .func = test__vmlinux_matches_kallsyms,
852         },
853         {
854                 .desc = "detect open syscall event",
855                 .func = test__open_syscall_event,
856         },
857         {
858                 .desc = "detect open syscall event on all cpus",
859                 .func = test__open_syscall_event_on_all_cpus,
860         },
861         {
862                 .desc = "read samples using the mmap interface",
863                 .func = test__basic_mmap,
864         },
865         {
866                 .desc = "parse events tests",
867                 .func = test__parse_events,
868         },
869         {
870                 .func = NULL,
871         },
872 };
873
874 static int __cmd_test(void)
875 {
876         int i = 0;
877
878         page_size = sysconf(_SC_PAGE_SIZE);
879
880         while (tests[i].func) {
881                 int err;
882                 pr_info("%2d: %s:", i + 1, tests[i].desc);
883                 pr_debug("\n--- start ---\n");
884                 err = tests[i].func();
885                 pr_debug("---- end ----\n%s:", tests[i].desc);
886                 pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
887                 ++i;
888         }
889
890         return 0;
891 }
892
893 static const char * const test_usage[] = {
894         "perf test [<options>]",
895         NULL,
896 };
897
898 static const struct option test_options[] = {
899         OPT_INTEGER('v', "verbose", &verbose,
900                     "be more verbose (show symbol address, etc)"),
901         OPT_END()
902 };
903
904 int cmd_test(int argc, const char **argv, const char *prefix __used)
905 {
906         argc = parse_options(argc, argv, test_options, test_usage, 0);
907         if (argc)
908                 usage_with_options(test_usage, test_options);
909
910         symbol_conf.priv_size = sizeof(int);
911         symbol_conf.sort_by_name = true;
912         symbol_conf.try_vmlinux_path = true;
913
914         if (symbol__init() < 0)
915                 return -1;
916
917         setup_pager();
918
919         return __cmd_test();
920 }