2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include "thread_map.h"
18 #include <linux/bitops.h>
19 #include <linux/hash.h>
21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
24 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
25 struct thread_map *threads)
29 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
30 INIT_HLIST_HEAD(&evlist->heads[i]);
31 INIT_LIST_HEAD(&evlist->entries);
32 perf_evlist__set_maps(evlist, cpus, threads);
35 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
36 struct thread_map *threads)
38 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
41 perf_evlist__init(evlist, cpus, threads);
46 static void perf_evlist__purge(struct perf_evlist *evlist)
48 struct perf_evsel *pos, *n;
50 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
51 list_del_init(&pos->node);
52 perf_evsel__delete(pos);
55 evlist->nr_entries = 0;
58 void perf_evlist__exit(struct perf_evlist *evlist)
63 evlist->pollfd = NULL;
66 void perf_evlist__delete(struct perf_evlist *evlist)
68 perf_evlist__purge(evlist);
69 perf_evlist__exit(evlist);
73 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
75 list_add_tail(&entry->node, &evlist->entries);
79 int perf_evlist__add_default(struct perf_evlist *evlist)
81 struct perf_event_attr attr = {
82 .type = PERF_TYPE_HARDWARE,
83 .config = PERF_COUNT_HW_CPU_CYCLES,
85 struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
90 perf_evlist__add(evlist, evsel);
94 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
96 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
97 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
98 return evlist->pollfd != NULL ? 0 : -ENOMEM;
101 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
103 fcntl(fd, F_SETFL, O_NONBLOCK);
104 evlist->pollfd[evlist->nr_fds].fd = fd;
105 evlist->pollfd[evlist->nr_fds].events = POLLIN;
109 static void perf_evlist__id_hash(struct perf_evlist *evlist,
110 struct perf_evsel *evsel,
111 int cpu, int thread, u64 id)
114 struct perf_sample_id *sid = SID(evsel, cpu, thread);
118 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
119 hlist_add_head(&sid->node, &evlist->heads[hash]);
122 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
123 int cpu, int thread, u64 id)
125 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
126 evsel->id[evsel->ids++] = id;
129 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
130 struct perf_evsel *evsel,
131 int cpu, int thread, int fd)
133 u64 read_data[4] = { 0, };
134 int id_idx = 1; /* The first entry is the counter value */
136 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
137 read(fd, &read_data, sizeof(read_data)) == -1)
140 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
142 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
145 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
149 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
151 struct hlist_head *head;
152 struct hlist_node *pos;
153 struct perf_sample_id *sid;
156 if (evlist->nr_entries == 1)
157 return list_entry(evlist->entries.next, struct perf_evsel, node);
159 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
160 head = &evlist->heads[hash];
162 hlist_for_each_entry(sid, pos, head, node)
168 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
170 /* XXX Move this to perf.c, making it generally available */
171 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
172 struct perf_mmap *md = &evlist->mmap[idx];
173 unsigned int head = perf_mmap__read_head(md);
174 unsigned int old = md->prev;
175 unsigned char *data = md->base + page_size;
176 union perf_event *event = NULL;
178 if (evlist->overwrite) {
180 * If we're further behind than half the buffer, there's a chance
181 * the writer will bite our tail and mess up the samples under us.
183 * If we somehow ended up ahead of the head, we got messed up.
185 * In either case, truncate and restart at head.
187 int diff = head - old;
188 if (diff > md->mask / 2 || diff < 0) {
189 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
192 * head points to a known good entry, start there.
201 event = (union perf_event *)&data[old & md->mask];
202 size = event->header.size;
205 * Event straddles the mmap boundary -- header should always
206 * be inside due to u64 alignment of output.
208 if ((old & md->mask) + size != ((old + size) & md->mask)) {
209 unsigned int offset = old;
210 unsigned int len = min(sizeof(*event), size), cpy;
211 void *dst = &evlist->event_copy;
214 cpy = min(md->mask + 1 - (offset & md->mask), len);
215 memcpy(dst, &data[offset & md->mask], cpy);
221 event = &evlist->event_copy;
229 if (!evlist->overwrite)
230 perf_mmap__write_tail(md, old);
235 void perf_evlist__munmap(struct perf_evlist *evlist)
239 for (i = 0; i < evlist->nr_mmaps; i++) {
240 if (evlist->mmap[i].base != NULL) {
241 munmap(evlist->mmap[i].base, evlist->mmap_len);
242 evlist->mmap[i].base = NULL;
250 int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
252 evlist->nr_mmaps = evlist->cpus->nr;
253 if (evlist->cpus->map[0] == -1)
254 evlist->nr_mmaps = evlist->threads->nr;
255 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
256 return evlist->mmap != NULL ? 0 : -ENOMEM;
259 static int __perf_evlist__mmap(struct perf_evlist *evlist,
260 int idx, int prot, int mask, int fd)
262 evlist->mmap[idx].prev = 0;
263 evlist->mmap[idx].mask = mask;
264 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
266 if (evlist->mmap[idx].base == MAP_FAILED)
269 perf_evlist__add_pollfd(evlist, fd);
273 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
275 struct perf_evsel *evsel;
278 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
281 for (thread = 0; thread < evlist->threads->nr; thread++) {
282 list_for_each_entry(evsel, &evlist->entries, node) {
283 int fd = FD(evsel, cpu, thread);
287 if (__perf_evlist__mmap(evlist, cpu,
288 prot, mask, output) < 0)
291 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
295 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
296 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
305 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
306 if (evlist->mmap[cpu].base != NULL) {
307 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
308 evlist->mmap[cpu].base = NULL;
314 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
316 struct perf_evsel *evsel;
319 for (thread = 0; thread < evlist->threads->nr; thread++) {
322 list_for_each_entry(evsel, &evlist->entries, node) {
323 int fd = FD(evsel, 0, thread);
327 if (__perf_evlist__mmap(evlist, thread,
328 prot, mask, output) < 0)
331 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
335 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
336 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
344 for (thread = 0; thread < evlist->threads->nr; thread++) {
345 if (evlist->mmap[thread].base != NULL) {
346 munmap(evlist->mmap[thread].base, evlist->mmap_len);
347 evlist->mmap[thread].base = NULL;
353 /** perf_evlist__mmap - Create per cpu maps to receive events
355 * @evlist - list of events
356 * @pages - map length in pages
357 * @overwrite - overwrite older events?
359 * If overwrite is false the user needs to signal event consuption using:
361 * struct perf_mmap *m = &evlist->mmap[cpu];
362 * unsigned int head = perf_mmap__read_head(m);
364 * perf_mmap__write_tail(m, head)
366 * Using perf_evlist__read_on_cpu does this automatically.
368 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
370 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
371 int mask = pages * page_size - 1;
372 struct perf_evsel *evsel;
373 const struct cpu_map *cpus = evlist->cpus;
374 const struct thread_map *threads = evlist->threads;
375 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
377 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
380 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
383 evlist->overwrite = overwrite;
384 evlist->mmap_len = (pages + 1) * page_size;
386 list_for_each_entry(evsel, &evlist->entries, node) {
387 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
388 evsel->sample_id == NULL &&
389 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
393 if (evlist->cpus->map[0] == -1)
394 return perf_evlist__mmap_per_thread(evlist, prot, mask);
396 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
399 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
400 pid_t target_tid, const char *cpu_list)
402 evlist->threads = thread_map__new(target_pid, target_tid);
404 if (evlist->threads == NULL)
407 if (cpu_list == NULL && target_tid != -1)
408 evlist->cpus = cpu_map__dummy_new();
410 evlist->cpus = cpu_map__new(cpu_list);
412 if (evlist->cpus == NULL)
413 goto out_delete_threads;
418 thread_map__delete(evlist->threads);
422 void perf_evlist__delete_maps(struct perf_evlist *evlist)
424 cpu_map__delete(evlist->cpus);
425 thread_map__delete(evlist->threads);
427 evlist->threads = NULL;
430 int perf_evlist__set_filters(struct perf_evlist *evlist)
432 const struct thread_map *threads = evlist->threads;
433 const struct cpu_map *cpus = evlist->cpus;
434 struct perf_evsel *evsel;
441 list_for_each_entry(evsel, &evlist->entries, node) {
442 filter = evsel->filter;
445 for (cpu = 0; cpu < cpus->nr; cpu++) {
446 for (thread = 0; thread < threads->nr; thread++) {
447 fd = FD(evsel, cpu, thread);
448 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
458 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
460 struct perf_evsel *pos, *first;
462 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
464 list_for_each_entry_continue(pos, &evlist->entries, node) {
465 if (first->attr.sample_type != pos->attr.sample_type)
472 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
474 struct perf_evsel *first;
476 first = list_entry(evlist->entries.next, struct perf_evsel, node);
477 return first->attr.sample_type;
480 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
482 struct perf_evsel *pos, *first;
484 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
486 list_for_each_entry_continue(pos, &evlist->entries, node) {
487 if (first->attr.sample_id_all != pos->attr.sample_id_all)
494 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
496 struct perf_evsel *first;
498 first = list_entry(evlist->entries.next, struct perf_evsel, node);
499 return first->attr.sample_id_all;