b498eecbe8507ca59f943c7739345e787ab1c099
[linux-2.6.git] / tools / perf / util / evlist.c
1 #include <poll.h>
2 #include "evlist.h"
3 #include "evsel.h"
4 #include "util.h"
5
6 #include <linux/bitops.h>
7 #include <linux/hash.h>
8
9 void perf_evlist__init(struct perf_evlist *evlist)
10 {
11         int i;
12
13         for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
14                 INIT_HLIST_HEAD(&evlist->heads[i]);
15         INIT_LIST_HEAD(&evlist->entries);
16 }
17
18 struct perf_evlist *perf_evlist__new(void)
19 {
20         struct perf_evlist *evlist = zalloc(sizeof(*evlist));
21
22         if (evlist != NULL)
23                 perf_evlist__init(evlist);
24
25         return evlist;
26 }
27
28 static void perf_evlist__purge(struct perf_evlist *evlist)
29 {
30         struct perf_evsel *pos, *n;
31
32         list_for_each_entry_safe(pos, n, &evlist->entries, node) {
33                 list_del_init(&pos->node);
34                 perf_evsel__delete(pos);
35         }
36
37         evlist->nr_entries = 0;
38 }
39
40 void perf_evlist__exit(struct perf_evlist *evlist)
41 {
42         free(evlist->mmap);
43         free(evlist->pollfd);
44         evlist->mmap = NULL;
45         evlist->pollfd = NULL;
46 }
47
48 void perf_evlist__delete(struct perf_evlist *evlist)
49 {
50         perf_evlist__purge(evlist);
51         perf_evlist__exit(evlist);
52         free(evlist);
53 }
54
55 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
56 {
57         list_add_tail(&entry->node, &evlist->entries);
58         ++evlist->nr_entries;
59 }
60
61 int perf_evlist__add_default(struct perf_evlist *evlist)
62 {
63         struct perf_event_attr attr = {
64                 .type = PERF_TYPE_HARDWARE,
65                 .config = PERF_COUNT_HW_CPU_CYCLES,
66         };
67         struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
68
69         if (evsel == NULL)
70                 return -ENOMEM;
71
72         perf_evlist__add(evlist, evsel);
73         return 0;
74 }
75
76 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist, int ncpus, int nthreads)
77 {
78         int nfds = ncpus * nthreads * evlist->nr_entries;
79         evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
80         return evlist->pollfd != NULL ? 0 : -ENOMEM;
81 }
82
83 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
84 {
85         fcntl(fd, F_SETFL, O_NONBLOCK);
86         evlist->pollfd[evlist->nr_fds].fd = fd;
87         evlist->pollfd[evlist->nr_fds].events = POLLIN;
88         evlist->nr_fds++;
89 }
90
91 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
92 {
93         struct hlist_head *head;
94         struct hlist_node *pos;
95         struct perf_sample_id *sid;
96         int hash;
97
98         if (evlist->nr_entries == 1)
99                 return list_entry(evlist->entries.next, struct perf_evsel, node);
100
101         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
102         head = &evlist->heads[hash];
103
104         hlist_for_each_entry(sid, pos, head, node)
105                 if (sid->id == id)
106                         return sid->evsel;
107         return NULL;
108 }
109
110 event_t *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
111 {
112         /* XXX Move this to perf.c, making it generally available */
113         unsigned int page_size = sysconf(_SC_PAGE_SIZE);
114         struct perf_mmap *md = &evlist->mmap[cpu];
115         unsigned int head = perf_mmap__read_head(md);
116         unsigned int old = md->prev;
117         unsigned char *data = md->base + page_size;
118         event_t *event = NULL;
119
120         if (evlist->overwrite) {
121                 /*
122                  * If we're further behind than half the buffer, there's a chance
123                  * the writer will bite our tail and mess up the samples under us.
124                  *
125                  * If we somehow ended up ahead of the head, we got messed up.
126                  *
127                  * In either case, truncate and restart at head.
128                  */
129                 int diff = head - old;
130                 if (diff > md->mask / 2 || diff < 0) {
131                         fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
132
133                         /*
134                          * head points to a known good entry, start there.
135                          */
136                         old = head;
137                 }
138         }
139
140         if (old != head) {
141                 size_t size;
142
143                 event = (event_t *)&data[old & md->mask];
144                 size = event->header.size;
145
146                 /*
147                  * Event straddles the mmap boundary -- header should always
148                  * be inside due to u64 alignment of output.
149                  */
150                 if ((old & md->mask) + size != ((old + size) & md->mask)) {
151                         unsigned int offset = old;
152                         unsigned int len = min(sizeof(*event), size), cpy;
153                         void *dst = &evlist->event_copy;
154
155                         do {
156                                 cpy = min(md->mask + 1 - (offset & md->mask), len);
157                                 memcpy(dst, &data[offset & md->mask], cpy);
158                                 offset += cpy;
159                                 dst += cpy;
160                                 len -= cpy;
161                         } while (len);
162
163                         event = &evlist->event_copy;
164                 }
165
166                 old += size;
167         }
168
169         md->prev = old;
170
171         if (!evlist->overwrite)
172                 perf_mmap__write_tail(md, old);
173
174         return event;
175 }