]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - tools/perf/util/evlist.c
perf evlist: Use cpu_map__nr() helper
[linux-3.10.git] / tools / perf / util / evlist.c
index 705293489e3c319c0b13ec3f5f32a3c1fd1fe5ed..a482547495b64bb11c6df8fa77668eb84e7c0de7 100644 (file)
@@ -7,7 +7,7 @@
  * Released under the GPL v2. (and only v2, not any later version)
  */
 #include "util.h"
-#include "debugfs.h"
+#include <lk/debugfs.h>
 #include <poll.h>
 #include "cpumap.h"
 #include "thread_map.h"
@@ -38,21 +38,26 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
        evlist->workload.pid = -1;
 }
 
-struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
-                                    struct thread_map *threads)
+struct perf_evlist *perf_evlist__new(void)
 {
        struct perf_evlist *evlist = zalloc(sizeof(*evlist));
 
        if (evlist != NULL)
-               perf_evlist__init(evlist, cpus, threads);
+               perf_evlist__init(evlist, NULL, NULL);
 
        return evlist;
 }
 
-void perf_evlist__config_attrs(struct perf_evlist *evlist,
-                              struct perf_record_opts *opts)
+void perf_evlist__config(struct perf_evlist *evlist,
+                       struct perf_record_opts *opts)
 {
        struct perf_evsel *evsel;
+       /*
+        * Set the evsel leader links before we configure attributes,
+        * since some might depend on this info.
+        */
+       if (opts->group)
+               perf_evlist__set_leader(evlist);
 
        if (evlist->cpus->map[0] < 0)
                opts->no_inherit = true;
@@ -61,7 +66,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist,
                perf_evsel__config(evsel, opts);
 
                if (evlist->nr_entries > 1)
-                       evsel->attr.sample_type |= PERF_SAMPLE_ID;
+                       perf_evsel__set_sample_id(evsel);
        }
 }
 
@@ -111,18 +116,21 @@ void __perf_evlist__set_leader(struct list_head *list)
        struct perf_evsel *evsel, *leader;
 
        leader = list_entry(list->next, struct perf_evsel, node);
-       leader->leader = NULL;
+       evsel = list_entry(list->prev, struct perf_evsel, node);
+
+       leader->nr_members = evsel->idx - leader->idx + 1;
 
        list_for_each_entry(evsel, list, node) {
-               if (evsel != leader)
-                       evsel->leader = leader;
+               evsel->leader = leader;
        }
 }
 
 void perf_evlist__set_leader(struct perf_evlist *evlist)
 {
-       if (evlist->nr_entries)
+       if (evlist->nr_entries) {
+               evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
                __perf_evlist__set_leader(&evlist->entries);
+       }
 }
 
 int perf_evlist__add_default(struct perf_evlist *evlist)
@@ -220,9 +228,9 @@ void perf_evlist__disable(struct perf_evlist *evlist)
        int cpu, thread;
        struct perf_evsel *pos;
 
-       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+       for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
-                       if (perf_evsel__is_group_member(pos))
+                       if (!perf_evsel__is_group_leader(pos))
                                continue;
                        for (thread = 0; thread < evlist->threads->nr; thread++)
                                ioctl(FD(pos, cpu, thread),
@@ -238,7 +246,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
 
        for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
-                       if (perf_evsel__is_group_member(pos))
+                       if (!perf_evsel__is_group_leader(pos))
                                continue;
                        for (thread = 0; thread < evlist->threads->nr; thread++)
                                ioctl(FD(pos, cpu, thread),
@@ -366,7 +374,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
                if ((old & md->mask) + size != ((old + size) & md->mask)) {
                        unsigned int offset = old;
                        unsigned int len = min(sizeof(*event), size), cpy;
-                       void *dst = &evlist->event_copy;
+                       void *dst = &md->event_copy;
 
                        do {
                                cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -376,7 +384,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
                                len -= cpy;
                        } while (len);
 
-                       event = &evlist->event_copy;
+                       event = &md->event_copy;
                }
 
                old += size;
@@ -435,7 +443,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
        struct perf_evsel *evsel;
        int cpu, thread;
 
-       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+       for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
                int output = -1;
 
                for (thread = 0; thread < evlist->threads->nr; thread++) {
@@ -462,7 +470,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
        return 0;
 
 out_unmap:
-       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+       for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
                if (evlist->mmap[cpu].base != NULL) {
                        munmap(evlist->mmap[cpu].base, evlist->mmap_len);
                        evlist->mmap[cpu].base = NULL;
@@ -717,7 +725,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
 
        return 0;
 out_err:
-       ncpus = evlist->cpus ? evlist->cpus->nr : 1;
+       ncpus = cpu_map__nr(evlist->cpus);
        nthreads = evlist->threads ? evlist->threads->nr : 1;
 
        list_for_each_entry_reverse(evsel, &evlist->entries, node)