10 void map_groups__init(struct map_groups *self)
13 for (i = 0; i < MAP__NR_TYPES; ++i) {
14 self->maps[i] = RB_ROOT;
15 INIT_LIST_HEAD(&self->removed_maps[i]);
19 static struct thread *thread__new(pid_t pid)
21 struct thread *self = zalloc(sizeof(*self));
24 map_groups__init(&self->mg);
26 self->comm = malloc(32);
28 snprintf(self->comm, 32, ":%d", self->pid);
34 static void map_groups__flush(struct map_groups *self)
38 for (type = 0; type < MAP__NR_TYPES; type++) {
39 struct rb_root *root = &self->maps[type];
40 struct rb_node *next = rb_first(root);
43 struct map *pos = rb_entry(next, struct map, rb_node);
44 next = rb_next(&pos->rb_node);
45 rb_erase(&pos->rb_node, root);
47 * We may have references to this map, for
48 * instance in some hist_entry instances, so
49 * just move them to a separate list.
51 list_add_tail(&pos->node, &self->removed_maps[pos->type]);
56 int thread__set_comm(struct thread *self, const char *comm)
62 self->comm = strdup(comm);
63 err = self->comm == NULL ? -ENOMEM : 0;
65 self->comm_set = true;
66 map_groups__flush(&self->mg);
71 int thread__comm_len(struct thread *self)
73 if (!self->comm_len) {
76 self->comm_len = strlen(self->comm);
79 return self->comm_len;
82 static size_t __map_groups__fprintf_maps(struct map_groups *self,
83 enum map_type type, FILE *fp)
85 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
88 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
89 struct map *pos = rb_entry(nd, struct map, rb_node);
90 printed += fprintf(fp, "Map:");
91 printed += map__fprintf(pos, fp);
93 printed += dso__fprintf(pos->dso, type, fp);
94 printed += fprintf(fp, "--\n");
101 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
103 size_t printed = 0, i;
104 for (i = 0; i < MAP__NR_TYPES; ++i)
105 printed += __map_groups__fprintf_maps(self, i, fp);
109 static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
110 enum map_type type, FILE *fp)
115 list_for_each_entry(pos, &self->removed_maps[type], node) {
116 printed += fprintf(fp, "Map:");
117 printed += map__fprintf(pos, fp);
119 printed += dso__fprintf(pos->dso, type, fp);
120 printed += fprintf(fp, "--\n");
126 static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
128 size_t printed = 0, i;
129 for (i = 0; i < MAP__NR_TYPES; ++i)
130 printed += __map_groups__fprintf_removed_maps(self, i, fp);
134 static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
136 size_t printed = map_groups__fprintf_maps(self, fp);
137 printed += fprintf(fp, "Removed maps:\n");
138 return printed + map_groups__fprintf_removed_maps(self, fp);
141 static size_t thread__fprintf(struct thread *self, FILE *fp)
143 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
144 map_groups__fprintf(&self->mg, fp);
147 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
149 struct rb_node **p = &self->threads.rb_node;
150 struct rb_node *parent = NULL;
154 * Font-end cache - PID lookups come in blocks,
155 * so most of the time we dont have to look up
158 if (self->last_match && self->last_match->pid == pid)
159 return self->last_match;
163 th = rb_entry(parent, struct thread, rb_node);
165 if (th->pid == pid) {
166 self->last_match = th;
176 th = thread__new(pid);
178 rb_link_node(&th->rb_node, parent, p);
179 rb_insert_color(&th->rb_node, &self->threads);
180 self->last_match = th;
186 static void map_groups__remove_overlappings(struct map_groups *self,
189 struct rb_root *root = &self->maps[map->type];
190 struct rb_node *next = rb_first(root);
193 struct map *pos = rb_entry(next, struct map, rb_node);
194 next = rb_next(&pos->rb_node);
196 if (!map__overlap(pos, map))
200 fputs("overlapping maps:\n", stderr);
201 map__fprintf(map, stderr);
202 map__fprintf(pos, stderr);
205 rb_erase(&pos->rb_node, root);
207 * We may have references to this map, for instance in some
208 * hist_entry instances, so just move them to a separate
211 list_add_tail(&pos->node, &self->removed_maps[map->type]);
215 void maps__insert(struct rb_root *maps, struct map *map)
217 struct rb_node **p = &maps->rb_node;
218 struct rb_node *parent = NULL;
219 const u64 ip = map->start;
224 m = rb_entry(parent, struct map, rb_node);
231 rb_link_node(&map->rb_node, parent, p);
232 rb_insert_color(&map->rb_node, maps);
235 struct map *maps__find(struct rb_root *maps, u64 ip)
237 struct rb_node **p = &maps->rb_node;
238 struct rb_node *parent = NULL;
243 m = rb_entry(parent, struct map, rb_node);
246 else if (ip > m->end)
255 void thread__insert_map(struct thread *self, struct map *map)
257 map_groups__remove_overlappings(&self->mg, map);
258 map_groups__insert(&self->mg, map);
262 * XXX This should not really _copy_ te maps, but refcount them.
264 static int map_groups__clone(struct map_groups *self,
265 struct map_groups *parent, enum map_type type)
268 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
269 struct map *map = rb_entry(nd, struct map, rb_node);
270 struct map *new = map__clone(map);
273 map_groups__insert(self, new);
278 int thread__fork(struct thread *self, struct thread *parent)
282 if (parent->comm_set) {
285 self->comm = strdup(parent->comm);
288 self->comm_set = true;
291 for (i = 0; i < MAP__NR_TYPES; ++i)
292 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
297 size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
302 for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
303 struct thread *pos = rb_entry(nd, struct thread, rb_node);
305 ret += thread__fprintf(pos, fp);
311 struct symbol *map_groups__find_symbol(struct map_groups *self,
312 enum map_type type, u64 addr,
313 symbol_filter_t filter)
315 struct map *map = map_groups__find(self, type, addr);
318 return map__find_symbol(map, map->map_ip(map, addr), filter);