diff options
Diffstat (limited to 'tools/perf/util/symbol.c')
| -rw-r--r-- | tools/perf/util/symbol.c | 160 | 
1 files changed, 81 insertions, 79 deletions
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a8f80e427674..db9667aacb88 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -242,28 +242,24 @@ void symbols__fixup_end(struct rb_root_cached *symbols)  void map_groups__fixup_end(struct map_groups *mg)  {  	struct maps *maps = &mg->maps; -	struct map *next, *curr; +	struct map *prev = NULL, *curr;  	down_write(&maps->lock); -	curr = maps__first(maps); -	if (curr == NULL) -		goto out_unlock; +	maps__for_each_entry(maps, curr) { +		if (prev != NULL && !prev->end) +			prev->end = curr->start; -	for (next = map__next(curr); next; next = map__next(curr)) { -		if (!curr->end) -			curr->end = next->start; -		curr = next; +		prev = curr;  	}  	/*  	 * We still haven't the actual symbols, so guess the  	 * last map final address.  	 */ -	if (!curr->end) +	if (curr && !curr->end)  		curr->end = ~0ULL; -out_unlock:  	up_write(&maps->lock);  } @@ -1053,11 +1049,6 @@ out_delete_from:  	return ret;  } -struct map *map_groups__first(struct map_groups *mg) -{ -	return maps__first(&mg->maps); -} -  static int do_validate_kcore_modules(const char *filename,  				  struct map_groups *kmaps)  { @@ -1069,13 +1060,10 @@ static int do_validate_kcore_modules(const char *filename,  	if (err)  		return err; -	old_map = map_groups__first(kmaps); -	while (old_map) { -		struct map *next = map_groups__next(old_map); +	map_groups__for_each_entry(kmaps, old_map) {  		struct module_info *mi;  		if (!__map__is_kmodule(old_map)) { -			old_map = next;  			continue;  		} @@ -1085,8 +1073,6 @@ static int do_validate_kcore_modules(const char *filename,  			err = -EINVAL;  			goto out;  		} - -		old_map = next;  	}  out:  	delete_modules(&modules); @@ -1189,9 +1175,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)  	struct map *old_map;  	LIST_HEAD(merged); -	for (old_map = map_groups__first(kmaps); old_map; -	     old_map = map_groups__next(old_map)) { - +	map_groups__for_each_entry(kmaps, old_map) {  		/* no overload with this one */  		if (new_map->end < old_map->start ||  		    new_map->start >= old_map->end) @@ -1264,7 +1248,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,  {  	struct map_groups *kmaps = map__kmaps(map);  	struct kcore_mapfn_data md; -	struct map *old_map, *new_map, *replacement_map = NULL; +	struct map *old_map, *new_map, *replacement_map = NULL, *next;  	struct machine *machine;  	bool is_64_bit;  	int err, fd; @@ -1311,10 +1295,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,  	}  	/* Remove old maps */ -	old_map = map_groups__first(kmaps); -	while (old_map) { -		struct map *next = map_groups__next(old_map); - +	map_groups__for_each_entry_safe(kmaps, old_map, next) {  		/*  		 * We need to preserve eBPF maps even if they are  		 * covered by kcore, because we need to access @@ -1322,7 +1303,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,  		 */  		if (old_map != map && !__map__is_bpf_prog(old_map))  			map_groups__remove(kmaps, old_map); -		old_map = next;  	}  	machine->trampolines_mapped = false; @@ -1550,7 +1530,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,  	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:  		/*  		 * kernel modules know their symtab type - it's set when -		 * creating a module dso in machine__findnew_module_map(). +		 * creating a module dso in machine__addnew_module_map().  		 */  		return kmod && dso->symtab_type == type; @@ -1608,7 +1588,7 @@ int dso__load(struct dso *dso, struct map *map)  	char *name;  	int ret = -1;  	u_int i; -	struct machine *machine; +	struct machine *machine = NULL;  	char *root_dir = (char *) "";  	int ss_pos = 0;  	struct symsrc ss_[2]; @@ -1637,17 +1617,13 @@ int dso__load(struct dso *dso, struct map *map)  		goto out;  	} -	if (map->groups && map->groups->machine) -		machine = map->groups->machine; -	else -		machine = NULL; -  	if (dso->kernel) {  		if (dso->kernel == DSO_TYPE_KERNEL)  			ret = dso__load_kernel_sym(dso, map);  		else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)  			ret = dso__load_guest_kernel_sym(dso, map); +		machine = map__kmaps(map)->machine;  		if (machine__is(machine, "x86_64"))  			machine__map_x86_64_entry_trampolines(machine, dso);  		goto out; @@ -1784,28 +1760,82 @@ out:  	return ret;  } +static int map__strcmp(const void *a, const void *b) +{ +	const struct map *ma = *(const struct map **)a, *mb = *(const struct map **)b; +	return strcmp(ma->dso->short_name, mb->dso->short_name); +} + +static int map__strcmp_name(const void *name, const void *b) +{ +	const struct map *map = *(const struct map **)b; +	return strcmp(name, map->dso->short_name); +} + +void __map_groups__sort_by_name(struct map_groups *mg) +{ +	qsort(mg->maps_by_name, mg->nr_maps, sizeof(struct map *), map__strcmp); +} + +static int map__groups__sort_by_name_from_rbtree(struct map_groups *mg) +{ +	struct map *map; +	struct map **maps_by_name = realloc(mg->maps_by_name, mg->nr_maps * sizeof(map)); +	int i = 0; + +	if (maps_by_name == NULL) +		return -1; + +	mg->maps_by_name = maps_by_name; +	mg->nr_maps_allocated = mg->nr_maps; + +	maps__for_each_entry(&mg->maps, map) +		maps_by_name[i++] = map; + +	__map_groups__sort_by_name(mg); +	return 0; +} + +static struct map *__map_groups__find_by_name(struct map_groups *mg, const char *name) +{ +	struct map **mapp; + +	if (mg->maps_by_name == NULL && +	    map__groups__sort_by_name_from_rbtree(mg)) +		return NULL; + +	mapp = bsearch(name, mg->maps_by_name, mg->nr_maps, sizeof(*mapp), map__strcmp_name); +	if (mapp) +		return *mapp; +	return NULL; +} +  struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)  {  	struct maps *maps = &mg->maps;  	struct map *map; -	struct rb_node *node;  	down_read(&maps->lock); -	for (node = maps->names.rb_node; node; ) { -		int rc; - -		map = rb_entry(node, struct map, rb_node_name); - -		rc = strcmp(map->dso->short_name, name); -		if (rc < 0) -			node = node->rb_left; -		else if (rc > 0) -			node = node->rb_right; -		else +	if (mg->last_search_by_name && strcmp(mg->last_search_by_name->dso->short_name, name) == 0) { +		map = mg->last_search_by_name; +		goto out_unlock; +	} +	/* +	 * If we have mg->maps_by_name, then the name isn't in the rbtree, +	 * as mg->maps_by_name mirrors the rbtree when lookups by name are +	 * made. +	 */ +	map = __map_groups__find_by_name(mg, name); +	if (map || mg->maps_by_name != NULL) +		goto out_unlock; +	/* Fallback to traversing the rbtree... */ +	maps__for_each_entry(maps, map) +		if (strcmp(map->dso->short_name, name) == 0) { +			mg->last_search_by_name = map;  			goto out_unlock; -	} +		}  	map = NULL; @@ -2047,15 +2077,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)  {  	int err;  	const char *kallsyms_filename = NULL; -	struct machine *machine; +	struct machine *machine = map__kmaps(map)->machine;  	char path[PATH_MAX]; -	if (!map->groups) { -		pr_debug("Guest kernel map hasn't the point to groups\n"); -		return -1; -	} -	machine = map->groups->machine; -  	if (machine__is_default_guest(machine)) {  		/*  		 * if the user specified a vmlinux filename, use it and only @@ -2371,25 +2395,3 @@ struct mem_info *mem_info__new(void)  		refcount_set(&mi->refcnt, 1);  	return mi;  } - -struct block_info *block_info__get(struct block_info *bi) -{ -	if (bi) -		refcount_inc(&bi->refcnt); -	return bi; -} - -void block_info__put(struct block_info *bi) -{ -	if (bi && refcount_dec_and_test(&bi->refcnt)) -		free(bi); -} - -struct block_info *block_info__new(void) -{ -	struct block_info *bi = zalloc(sizeof(*bi)); - -	if (bi) -		refcount_set(&bi->refcnt, 1); -	return bi; -}  | 
