diff options
Diffstat (limited to 'tools/perf/tests')
50 files changed, 3106 insertions, 1394 deletions
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 56fba08a3037..61186d0d1cfa 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -34,7 +34,7 @@  #include "event.h"  #include "util.h"  #include "tests.h" -#include "pmu.h" +#include "pmus.h"  #define ENV "PERF_TEST_ATTR" @@ -185,8 +185,15 @@ static int test__attr(struct test_suite *test __maybe_unused, int subtest __mayb  	char path_dir[PATH_MAX];  	char *exec_path; -	if (perf_pmu__has_hybrid()) +	if (perf_pmus__num_core_pmus() > 1) { +		/* +		 * TODO: Attribute tests hard code the PMU type. If there are >1 +		 * core PMU then each PMU will have a different type whic +		 * requires additional support. +		 */ +		pr_debug("Skip test on hybrid systems");  		return TEST_SKIP; +	}  	/* First try development tree tests. */  	if (!lstat("./tests", &st)) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index b89d69afcef0..aa44fdc84763 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -88,15 +88,13 @@ static struct test_suite *generic_tests[] = {  	&suite__bpf,  	&suite__thread_map_synthesize,  	&suite__thread_map_remove, -	&suite__cpu_map_synthesize, +	&suite__cpu_map,  	&suite__synthesize_stat_config,  	&suite__synthesize_stat,  	&suite__synthesize_stat_round,  	&suite__event_update,  	&suite__event_times,  	&suite__backward_ring_buffer, -	&suite__cpu_map_print, -	&suite__cpu_map_merge,  	&suite__sdt_event,  	&suite__is_printable_array,  	&suite__bitmap_print, diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index efe026a35010..ed3815163d1b 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -241,6 +241,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,  	pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); +	addr_location__init(&al);  	if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {  		if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {  			pr_debug("Hypervisor address can not be resolved - skipping\n"); @@ -269,7 +270,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,  		len = map__end(al.map) - addr;  	/* Read the object code using perf */ -	ret_len = dso__data_read_offset(dso, maps__machine(thread->maps), +	ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),  					al.addr, buf1, len);  	if (ret_len != len) {  		pr_debug("dso__data_read_offset failed\n"); @@ -366,7 +367,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,  	}  	pr_debug("Bytes read match those read by objdump\n");  out: -	map__put(al.map); +	addr_location__exit(&al);  	return err;  } @@ -720,7 +721,6 @@ out_err:  	evlist__delete(evlist);  	perf_cpu_map__put(cpus);  	perf_thread_map__put(threads); -	machine__delete_threads(machine);  	machine__delete(machine);  	return err; diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c index b1a924314e09..7730fc2ab40b 100644 --- a/tools/perf/tests/cpumap.c +++ b/tools/perf/tests/cpumap.c @@ -171,6 +171,92 @@ static int test__cpu_map_merge(struct test_suite *test __maybe_unused, int subte  	return 0;  } -DEFINE_SUITE("Synthesize cpu map", cpu_map_synthesize); -DEFINE_SUITE("Print cpu map", cpu_map_print); -DEFINE_SUITE("Merge cpu map", cpu_map_merge); +static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected) +{ +	struct perf_cpu_map *a = perf_cpu_map__new(lhs); +	struct perf_cpu_map *b = perf_cpu_map__new(rhs); +	struct perf_cpu_map *c = perf_cpu_map__intersect(a, b); +	char buf[100]; + +	TEST_ASSERT_EQUAL("failed to intersect map: bad nr", perf_cpu_map__nr(c), nr); +	cpu_map__snprint(c, buf, sizeof(buf)); +	TEST_ASSERT_VAL("failed to intersect map: bad result", !strcmp(buf, expected)); +	perf_cpu_map__put(a); +	perf_cpu_map__put(b); +	perf_cpu_map__put(c); +	return 0; +} + +static int test__cpu_map_intersect(struct test_suite *test __maybe_unused, +				   int subtest __maybe_unused) +{ +	int ret; + +	ret = __test__cpu_map_intersect("4,2,1", "4,5,7", 1, "4"); +	if (ret) +		return ret; +	ret = __test__cpu_map_intersect("1-8", "6-9", 3, "6-8"); +	if (ret) +		return ret; +	ret = __test__cpu_map_intersect("1-8,12-20", "6-9,15", 4, "6-8,15"); +	if (ret) +		return ret; +	ret = __test__cpu_map_intersect("4,2,1", "1", 1, "1"); +	if (ret) +		return ret; +	ret = __test__cpu_map_intersect("1", "4,2,1", 1, "1"); +	if (ret) +		return ret; +	ret = __test__cpu_map_intersect("1", "1", 1, "1"); +	return ret; +} + +static int test__cpu_map_equal(struct test_suite *test __maybe_unused, int subtest __maybe_unused) +{ +	struct perf_cpu_map *any = perf_cpu_map__dummy_new(); +	struct perf_cpu_map *one = perf_cpu_map__new("1"); +	struct perf_cpu_map *two = perf_cpu_map__new("2"); +	struct perf_cpu_map *empty = perf_cpu_map__intersect(one, two); +	struct perf_cpu_map *pair = perf_cpu_map__new("1-2"); +	struct perf_cpu_map *tmp; +	struct perf_cpu_map *maps[] = {empty, any, one, two, pair}; + +	for (size_t i = 0; i < ARRAY_SIZE(maps); i++) { +		/* Maps equal themself. */ +		TEST_ASSERT_VAL("equal", perf_cpu_map__equal(maps[i], maps[i])); +		for (size_t j = 0; j < ARRAY_SIZE(maps); j++) { +			/* Maps dont't equal each other. */ +			if (i == j) +				continue; +			TEST_ASSERT_VAL("not equal", !perf_cpu_map__equal(maps[i], maps[j])); +		} +	} + +	/* Maps equal made maps. */ +	tmp = perf_cpu_map__merge(perf_cpu_map__get(one), two); +	TEST_ASSERT_VAL("pair", perf_cpu_map__equal(pair, tmp)); +	perf_cpu_map__put(tmp); + +	tmp = perf_cpu_map__intersect(pair, one); +	TEST_ASSERT_VAL("one", perf_cpu_map__equal(one, tmp)); +	perf_cpu_map__put(tmp); + +	for (size_t i = 0; i < ARRAY_SIZE(maps); i++) +		perf_cpu_map__put(maps[i]); + +	return TEST_OK; +} + +static struct test_case tests__cpu_map[] = { +	TEST_CASE("Synthesize cpu map", cpu_map_synthesize), +	TEST_CASE("Print cpu map", cpu_map_print), +	TEST_CASE("Merge cpu map", cpu_map_merge), +	TEST_CASE("Intersect cpu map", cpu_map_intersect), +	TEST_CASE("Equal cpu map", cpu_map_equal), +	{	.name = NULL, } +}; + +struct test_suite suite__cpu_map = { +	.desc = "CPU map", +	.test_cases = tests__cpu_map, +}; diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c index ee983b677a6a..d01aa931fe81 100644 --- a/tools/perf/tests/dwarf-unwind.c +++ b/tools/perf/tests/dwarf-unwind.c @@ -235,7 +235,6 @@ noinline int test__dwarf_unwind(struct test_suite *test __maybe_unused,  	thread__put(thread);   out: -	machine__delete_threads(machine);  	machine__delete(machine);  	return err;  } diff --git a/tools/perf/tests/event_groups.c b/tools/perf/tests/event_groups.c index 029442b4e9c6..ccd9d8b2903f 100644 --- a/tools/perf/tests/event_groups.c +++ b/tools/perf/tests/event_groups.c @@ -50,13 +50,10 @@ static int event_open(int type, unsigned long config, int group_fd)  static int setup_uncore_event(void)  { -	struct perf_pmu *pmu; +	struct perf_pmu *pmu = NULL;  	int i, fd; -	if (list_empty(&pmus)) -		perf_pmu__scan(NULL); - -	perf_pmus__for_each_pmu(pmu) { +	while ((pmu = perf_pmus__scan(pmu)) != NULL) {  		for (i = 0; i < NR_UNCORE_PMUS; i++) {  			if (!strcmp(uncore_pmus[i].name, pmu->name)) {  				pr_debug("Using %s for uncore pmu event\n", pmu->name); diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c index e94fed901992..15ff86f9da0b 100644 --- a/tools/perf/tests/evsel-roundtrip-name.c +++ b/tools/perf/tests/evsel-roundtrip-name.c @@ -4,114 +4,93 @@  #include "parse-events.h"  #include "tests.h"  #include "debug.h" -#include "pmu.h" -#include "pmu-hybrid.h" -#include <errno.h>  #include <linux/kernel.h>  static int perf_evsel__roundtrip_cache_name_test(void)  { -	char name[128]; -	int type, op, err = 0, ret = 0, i, idx; -	struct evsel *evsel; -	struct evlist *evlist = evlist__new(); +	int ret = TEST_OK; -        if (evlist == NULL) -                return -ENOMEM; - -	for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { -		for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { +	for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { +		for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {  			/* skip invalid cache type */  			if (!evsel__is_cache_op_valid(type, op))  				continue; -			for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { -				__evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); -				err = parse_event(evlist, name); -				if (err) -					ret = err; -			} -		} -	} - -	idx = 0; -	evsel = evlist__first(evlist); +			for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) { +				char name[128]; +				struct evlist *evlist = evlist__new(); +				struct evsel *evsel; +				int err; -	for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { -		for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { -			/* skip invalid cache type */ -			if (!evsel__is_cache_op_valid(type, op)) -				continue; +				if (evlist == NULL) { +					pr_debug("Failed to alloc evlist"); +					return TEST_FAIL; +				} +				__evsel__hw_cache_type_op_res_name(type, op, res, +								name, sizeof(name)); -			for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { -				__evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); -				if (evsel->core.idx != idx) +				err = parse_event(evlist, name); +				if (err) { +					pr_debug("Failure to parse cache event '%s' possibly as PMUs don't support it", +						name); +					evlist__delete(evlist);  					continue; - -				++idx; - -				if (strcmp(evsel__name(evsel), name)) { -					pr_debug("%s != %s\n", evsel__name(evsel), name); -					ret = -1;  				} - -				evsel = evsel__next(evsel); +				evlist__for_each_entry(evlist, evsel) { +					if (strcmp(evsel__name(evsel), name)) { +						pr_debug("%s != %s\n", evsel__name(evsel), name); +						ret = TEST_FAIL; +					} +				} +				evlist__delete(evlist);  			}  		}  	} - -	evlist__delete(evlist);  	return ret;  } -static int __perf_evsel__name_array_test(const char *const names[], int nr_names, -					 int distance) +static int perf_evsel__name_array_test(const char *const names[], int nr_names)  { -	int i, err; -	struct evsel *evsel; -	struct evlist *evlist = evlist__new(); +	int ret = TEST_OK; -        if (evlist == NULL) -                return -ENOMEM; +	for (int i = 0; i < nr_names; ++i) { +		struct evlist *evlist = evlist__new(); +		struct evsel *evsel; +		int err; -	for (i = 0; i < nr_names; ++i) { +		if (evlist == NULL) { +			pr_debug("Failed to alloc evlist"); +			return TEST_FAIL; +		}  		err = parse_event(evlist, names[i]);  		if (err) {  			pr_debug("failed to parse event '%s', err %d\n",  				 names[i], err); -			goto out_delete_evlist; +			evlist__delete(evlist); +			ret = TEST_FAIL; +			continue;  		} -	} - -	err = 0; -	evlist__for_each_entry(evlist, evsel) { -		if (strcmp(evsel__name(evsel), names[evsel->core.idx / distance])) { -			--err; -			pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->core.idx / distance]); +		evlist__for_each_entry(evlist, evsel) { +			if (strcmp(evsel__name(evsel), names[i])) { +				pr_debug("%s != %s\n", evsel__name(evsel), names[i]); +				ret = TEST_FAIL; +			}  		} +		evlist__delete(evlist);  	} - -out_delete_evlist: -	evlist__delete(evlist); -	return err; +	return ret;  } -#define perf_evsel__name_array_test(names, distance) \ -	__perf_evsel__name_array_test(names, ARRAY_SIZE(names), distance) -  static int test__perf_evsel__roundtrip_name_test(struct test_suite *test __maybe_unused,  						 int subtest __maybe_unused)  { -	int err = 0, ret = 0; - -	if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) -		return perf_evsel__name_array_test(evsel__hw_names, 2); +	int err = 0, ret = TEST_OK; -	err = perf_evsel__name_array_test(evsel__hw_names, 1); +	err = perf_evsel__name_array_test(evsel__hw_names, PERF_COUNT_HW_MAX);  	if (err)  		ret = err; -	err = __perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1, 1); +	err = perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1);  	if (err)  		ret = err; diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c index 733ead151c63..3d01eb5e2512 100644 --- a/tools/perf/tests/expr.c +++ b/tools/perf/tests/expr.c @@ -185,6 +185,46 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u  			NULL, ctx) == 0);  	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0); +	/* The expression is a constant 0.0 without needing to evaluate EVENT1. */ +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("0 & EVENT1 > 0", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0); +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("EVENT1 > 0 & 0", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0); +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("1 & EVENT1 > 0", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1); +	TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1", &val_ptr)); +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("EVENT1 > 0 & 1", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1); +	TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1", &val_ptr)); + +	/* The expression is a constant 1.0 without needing to evaluate EVENT1. */ +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("1 | EVENT1 > 0", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0); +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("EVENT1 > 0 | 1", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0); +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("0 | EVENT1 > 0", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1); +	TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1", &val_ptr)); +	expr__ctx_clear(ctx); +	TEST_ASSERT_VAL("find ids", +			expr__find_ids("EVENT1 > 0 | 0", NULL, ctx) == 0); +	TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1); +	TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1", &val_ptr)); +  	/* Test toplogy constants appear well ordered. */  	expr__ctx_clear(ctx);  	TEST_ASSERT_VAL("#num_cpus", expr__parse(&num_cpus, ctx, "#num_cpus") == 0); diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c index 745ab18d17db..d08add0f4da6 100644 --- a/tools/perf/tests/hists_common.c +++ b/tools/perf/tests/hists_common.c @@ -211,7 +211,7 @@ void print_hists_out(struct hists *hists)  			struct dso *dso = map__dso(he->ms.map);  			pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n", -				i, thread__comm_str(he->thread), he->thread->tid, +				i, thread__comm_str(he->thread), thread__tid(he->thread),  				dso->short_name,  				he->ms.sym->name, he->stat.period,  				he->stat_acc ? he->stat_acc->period : 0); diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c index 8c0e3f334747..71dacb0fec4d 100644 --- a/tools/perf/tests/hists_cumulate.c +++ b/tools/perf/tests/hists_cumulate.c @@ -8,8 +8,8 @@  #include "util/evsel.h"  #include "util/evlist.h"  #include "util/machine.h" -#include "util/thread.h"  #include "util/parse-events.h" +#include "util/thread.h"  #include "tests/tests.h"  #include "tests/hists_common.h"  #include <linux/kernel.h> @@ -84,6 +84,7 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)  	struct perf_sample sample = { .period = 1000, };  	size_t i; +	addr_location__init(&al);  	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {  		struct hist_entry_iter iter = {  			.evsel = evsel, @@ -107,20 +108,22 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)  		if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,  					 NULL) < 0) { -			addr_location__put(&al);  			goto out;  		} -		fake_samples[i].thread = al.thread; +		thread__put(fake_samples[i].thread); +		fake_samples[i].thread = thread__get(al.thread);  		map__put(fake_samples[i].map); -		fake_samples[i].map = al.map; +		fake_samples[i].map = map__get(al.map);  		fake_samples[i].sym = al.sym;  	} +	addr_location__exit(&al);  	return TEST_OK;  out:  	pr_debug("Not enough memory for adding a hist entry\n"); +	addr_location__exit(&al);  	return TEST_FAIL;  } @@ -152,8 +155,10 @@ static void put_fake_samples(void)  {  	size_t i; -	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) -		map__put(fake_samples[i].map); +	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { +		map__zput(fake_samples[i].map); +		thread__zput(fake_samples[i].thread); +	}  }  typedef int (*test_fn_t)(struct evsel *, struct machine *); @@ -162,7 +167,6 @@ typedef int (*test_fn_t)(struct evsel *, struct machine *);  #define DSO(he)   (map__dso(he->ms.map)->short_name)  #define SYM(he)   (he->ms.sym->name)  #define CPU(he)   (he->cpu) -#define PID(he)   (he->thread->tid)  #define DEPTH(he) (he->callchain->max_depth)  #define CDSO(cl)  (map__dso(cl->ms.map)->short_name)  #define CSYM(cl)  (cl->ms.sym->name) diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c index 98eff5935a1c..4b2e4f2fbe48 100644 --- a/tools/perf/tests/hists_filter.c +++ b/tools/perf/tests/hists_filter.c @@ -8,6 +8,7 @@  #include "util/evlist.h"  #include "util/machine.h"  #include "util/parse-events.h" +#include "util/thread.h"  #include "tests/tests.h"  #include "tests/hists_common.h"  #include <linux/kernel.h> @@ -53,6 +54,7 @@ static int add_hist_entries(struct evlist *evlist,  	struct perf_sample sample = { .period = 100, };  	size_t i; +	addr_location__init(&al);  	/*  	 * each evsel will have 10 samples but the 4th sample  	 * (perf [perf] main) will be collapsed to an existing entry @@ -84,21 +86,22 @@ static int add_hist_entries(struct evlist *evlist,  			al.socket = fake_samples[i].socket;  			if (hist_entry_iter__add(&iter, &al,  						 sysctl_perf_event_max_stack, NULL) < 0) { -				addr_location__put(&al);  				goto out;  			} -			fake_samples[i].thread = al.thread; +			thread__put(fake_samples[i].thread); +			fake_samples[i].thread = thread__get(al.thread);  			map__put(fake_samples[i].map); -			fake_samples[i].map = al.map; +			fake_samples[i].map = map__get(al.map);  			fake_samples[i].sym = al.sym;  		}  	} - +	addr_location__exit(&al);  	return 0;  out:  	pr_debug("Not enough memory for adding a hist entry\n"); +	addr_location__exit(&al);  	return TEST_FAIL;  } diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c index 141e2972e34f..2d19657ab5e0 100644 --- a/tools/perf/tests/hists_link.c +++ b/tools/perf/tests/hists_link.c @@ -8,6 +8,7 @@  #include "machine.h"  #include "map.h"  #include "parse-events.h" +#include "thread.h"  #include "hists_common.h"  #include "util/mmap.h"  #include <errno.h> @@ -70,6 +71,7 @@ static int add_hist_entries(struct evlist *evlist, struct machine *machine)  	struct perf_sample sample = { .period = 1, .weight = 1, };  	size_t i = 0, k; +	addr_location__init(&al);  	/*  	 * each evsel will have 10 samples - 5 common and 5 distinct.  	 * However the second evsel also has a collapsed entry for @@ -90,13 +92,13 @@ static int add_hist_entries(struct evlist *evlist, struct machine *machine)  			he = hists__add_entry(hists, &al, NULL,  					      NULL, NULL, NULL, &sample, true);  			if (he == NULL) { -				addr_location__put(&al);  				goto out;  			} -			fake_common_samples[k].thread = al.thread; +			thread__put(fake_common_samples[k].thread); +			fake_common_samples[k].thread = thread__get(al.thread);  			map__put(fake_common_samples[k].map); -			fake_common_samples[k].map = al.map; +			fake_common_samples[k].map = map__get(al.map);  			fake_common_samples[k].sym = al.sym;  		} @@ -110,20 +112,22 @@ static int add_hist_entries(struct evlist *evlist, struct machine *machine)  			he = hists__add_entry(hists, &al, NULL,  					      NULL, NULL, NULL, &sample, true);  			if (he == NULL) { -				addr_location__put(&al);  				goto out;  			} -			fake_samples[i][k].thread = al.thread; -			fake_samples[i][k].map = al.map; +			thread__put(fake_samples[i][k].thread); +			fake_samples[i][k].thread = thread__get(al.thread); +			map__put(fake_samples[i][k].map); +			fake_samples[i][k].map = map__get(al.map);  			fake_samples[i][k].sym = al.sym;  		}  		i++;  	} +	addr_location__exit(&al);  	return 0; -  out: +	addr_location__exit(&al);  	pr_debug("Not enough memory for adding a hist entry\n");  	return -1;  } @@ -144,7 +148,7 @@ static int find_sample(struct sample *samples, size_t nr_samples,  		       struct thread *t, struct map *m, struct symbol *s)  {  	while (nr_samples--) { -		if (samples->thread == t && +		if (RC_CHK_ACCESS(samples->thread) == RC_CHK_ACCESS(t) &&  		    RC_CHK_ACCESS(samples->map) == RC_CHK_ACCESS(m) &&  		    samples->sym == s)  			return 1; diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c index cebd5226bb12..ba1cccf57049 100644 --- a/tools/perf/tests/hists_output.c +++ b/tools/perf/tests/hists_output.c @@ -54,6 +54,7 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)  	struct perf_sample sample = { .period = 100, };  	size_t i; +	addr_location__init(&al);  	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {  		struct hist_entry_iter iter = {  			.evsel = evsel, @@ -73,20 +74,21 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)  		if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,  					 NULL) < 0) { -			addr_location__put(&al);  			goto out;  		}  		fake_samples[i].thread = al.thread;  		map__put(fake_samples[i].map); -		fake_samples[i].map = al.map; +		fake_samples[i].map = map__get(al.map);  		fake_samples[i].sym = al.sym;  	} +	addr_location__exit(&al);  	return TEST_OK;  out:  	pr_debug("Not enough memory for adding a hist entry\n"); +	addr_location__exit(&al);  	return TEST_FAIL;  } @@ -118,8 +120,10 @@ static void put_fake_samples(void)  {  	size_t i; -	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) +	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {  		map__put(fake_samples[i].map); +		fake_samples[i].map = NULL; +	}  }  typedef int (*test_fn_t)(struct evsel *, struct machine *); @@ -128,7 +132,7 @@ typedef int (*test_fn_t)(struct evsel *, struct machine *);  #define DSO(he)   (map__dso(he->ms.map)->short_name)  #define SYM(he)   (he->ms.sym->name)  #define CPU(he)   (he->cpu) -#define PID(he)   (he->thread->tid) +#define PID(he)   (thread__tid(he->thread))  /* default sort keys (no field) */  static int test1(struct evsel *evsel, struct machine *machine) diff --git a/tools/perf/tests/make b/tools/perf/tests/make index 8dd3f8090352..885cd321d67b 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make @@ -69,6 +69,7 @@ make_clean_all      := clean all  make_python_perf_so := $(python_perf_so)  make_debug          := DEBUG=1  make_nondistro      := BUILD_NONDISTRO=1 +make_extra_tests    := EXTRA_TESTS=1  make_no_libperl     := NO_LIBPERL=1  make_no_libpython   := NO_LIBPYTHON=1  make_no_scripts     := NO_LIBPYTHON=1 NO_LIBPERL=1 diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c index 8c0eb5cf8bb5..5bb1123a91a7 100644 --- a/tools/perf/tests/maps.c +++ b/tools/perf/tests/maps.c @@ -140,7 +140,7 @@ static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest  	ret = check_maps(merged3, ARRAY_SIZE(merged3), maps);  	TEST_ASSERT_VAL("merge check failed", !ret); -	maps__delete(maps); +	maps__zput(maps);  	return TEST_OK;  } diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c index 898eda55b7a8..ddd1da9a4ba9 100644 --- a/tools/perf/tests/mmap-thread-lookup.c +++ b/tools/perf/tests/mmap-thread-lookup.c @@ -187,6 +187,7 @@ static int mmap_events(synth_cb synth)  		struct addr_location al;  		struct thread *thread; +		addr_location__init(&al);  		thread = machine__findnew_thread(machine, getpid(), td->tid);  		pr_debug("looking for map %p\n", td->map); @@ -199,14 +200,14 @@ static int mmap_events(synth_cb synth)  		if (!al.map) {  			pr_debug("failed, couldn't find map\n");  			err = -1; +			addr_location__exit(&al);  			break;  		}  		pr_debug("map %p, addr %" PRIx64 "\n", al.map, map__start(al.map)); -		map__put(al.map); +		addr_location__exit(&al);  	} -	machine__delete_threads(machine);  	machine__delete(machine);  	return err;  } diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 8068cfd89b84..133218e51ab4 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -6,7 +6,7 @@  #include "tests.h"  #include "debug.h"  #include "pmu.h" -#include "pmu-hybrid.h" +#include "pmus.h"  #include <dirent.h>  #include <errno.h>  #include "fncache.h" @@ -20,6 +20,26 @@  #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \  			     PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) +static bool test_config(const struct evsel *evsel, __u64 expected_config) +{ +	__u32 type = evsel->core.attr.type; +	__u64 config = evsel->core.attr.config; + +	if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) { +		/* +		 * HARDWARE and HW_CACHE events encode the PMU's extended type +		 * in the top 32-bits. Mask in order to ignore. +		 */ +		config &= PERF_HW_EVENT_MASK; +	} +	return config == expected_config; +} + +static bool test_perf_config(const struct perf_evsel *evsel, __u64 expected_config) +{ +	return (evsel->attr.config & PERF_HW_EVENT_MASK) == expected_config; +} +  #ifdef HAVE_LIBTRACEEVENT  #if defined(__s390x__) @@ -82,11 +102,27 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist)  static int test__checkevent_raw(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); +	struct perf_evsel *evsel; +	bool raw_type_match = false; -	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries); + +	perf_evlist__for_each_evsel(&evlist->core, evsel) { +		struct perf_pmu *pmu = NULL; +		bool type_matched = false; + +		TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 0x1a)); +		while ((pmu = perf_pmus__scan(pmu)) != NULL) { +			if (pmu->type == evsel->attr.type) { +				TEST_ASSERT_VAL("PMU type expected once", !type_matched); +				type_matched = true; +				if (pmu->type == PERF_TYPE_RAW) +					raw_type_match = true; +			} +		} +		TEST_ASSERT_VAL("No PMU found for type", type_matched); +	} +	TEST_ASSERT_VAL("Raw PMU not matched", raw_type_match);  	return TEST_OK;  } @@ -96,39 +132,41 @@ static int test__checkevent_numeric(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));  	return TEST_OK;  }  static int test__checkevent_symbolic_name(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); +	struct perf_evsel *evsel; -	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries); + +	perf_evlist__for_each_evsel(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); +		TEST_ASSERT_VAL("wrong config", +				test_perf_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); +	}  	return TEST_OK;  }  static int test__checkevent_symbolic_name_config(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); +	struct perf_evsel *evsel; -	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	/* -	 * The period value gets configured within evlist__config, -	 * while this test executes only parse events method. -	 */ -	TEST_ASSERT_VAL("wrong period", -			0 == evsel->core.attr.sample_period); -	TEST_ASSERT_VAL("wrong config1", -			0 == evsel->core.attr.config1); -	TEST_ASSERT_VAL("wrong config2", -			1 == evsel->core.attr.config2); +	TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries); + +	perf_evlist__for_each_evsel(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); +		TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		/* +		 * The period value gets configured within evlist__config, +		 * while this test executes only parse events method. +		 */ +		TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period); +		TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1); +		TEST_ASSERT_VAL("wrong config2", 1 == evsel->attr.config2); +	}  	return TEST_OK;  } @@ -138,18 +176,20 @@ static int test__checkevent_symbolic_alias(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_SW_PAGE_FAULTS));  	return TEST_OK;  }  static int test__checkevent_genhw(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); +	struct perf_evsel *evsel; -	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries); + +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type); +		TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 1 << 16)); +	}  	return TEST_OK;  } @@ -159,7 +199,7 @@ static int test__checkevent_breakpoint(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));  	TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==  					 evsel->core.attr.bp_type);  	TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 == @@ -173,7 +213,7 @@ static int test__checkevent_breakpoint_x(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));  	TEST_ASSERT_VAL("wrong bp_type",  			HW_BREAKPOINT_X == evsel->core.attr.bp_type);  	TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->core.attr.bp_len); @@ -187,7 +227,7 @@ static int test__checkevent_breakpoint_r(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type",  			PERF_TYPE_BREAKPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));  	TEST_ASSERT_VAL("wrong bp_type",  			HW_BREAKPOINT_R == evsel->core.attr.bp_type);  	TEST_ASSERT_VAL("wrong bp_len", @@ -202,7 +242,7 @@ static int test__checkevent_breakpoint_w(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type",  			PERF_TYPE_BREAKPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));  	TEST_ASSERT_VAL("wrong bp_type",  			HW_BREAKPOINT_W == evsel->core.attr.bp_type);  	TEST_ASSERT_VAL("wrong bp_len", @@ -217,7 +257,7 @@ static int test__checkevent_breakpoint_rw(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type",  			PERF_TYPE_BREAKPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));  	TEST_ASSERT_VAL("wrong bp_type",  		(HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->core.attr.bp_type);  	TEST_ASSERT_VAL("wrong bp_len", @@ -241,17 +281,15 @@ static int test__checkevent_tracepoint_modifier(struct evlist *evlist)  static int  test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)  { -	struct evsel *evsel; +	struct perf_evsel *evsel;  	TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1); -	evlist__for_each_entry(evlist, evsel) { -		TEST_ASSERT_VAL("wrong exclude_user", -				!evsel->core.attr.exclude_user); -		TEST_ASSERT_VAL("wrong exclude_kernel", -				evsel->core.attr.exclude_kernel); -		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);  	}  	return test__checkevent_tracepoint_multi(evlist); @@ -260,57 +298,65 @@ test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)  static int test__checkevent_raw_modifier(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); +	struct perf_evsel *evsel; +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); +	}  	return test__checkevent_raw(evlist);  }  static int test__checkevent_numeric_modifier(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); +	struct perf_evsel *evsel; +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); +	}  	return test__checkevent_numeric(evlist);  }  static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); +	struct perf_evsel *evsel; -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == perf_pmus__num_core_pmus()); +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); +	}  	return test__checkevent_symbolic_name(evlist);  }  static int test__checkevent_exclude_host_modifier(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +	struct perf_evsel *evsel; +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); +	}  	return test__checkevent_symbolic_name(evlist);  }  static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +	struct perf_evsel *evsel; +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); +	}  	return test__checkevent_symbolic_name(evlist);  } @@ -328,13 +374,14 @@ static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)  static int test__checkevent_genhw_modifier(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); +	struct perf_evsel *evsel; +	perf_evlist__for_each_entry(&evlist->core, evsel) { +		TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); +	}  	return test__checkevent_genhw(evlist);  } @@ -439,6 +486,93 @@ static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)  	return test__checkevent_breakpoint_rw(evlist);  } +static int test__checkevent_breakpoint_modifier_name(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +	TEST_ASSERT_VAL("wrong name", +			!strcmp(evsel__name(evsel), "breakpoint")); + +	return test__checkevent_breakpoint(evlist); +} + +static int test__checkevent_breakpoint_x_modifier_name(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); +	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +	TEST_ASSERT_VAL("wrong name", +			!strcmp(evsel__name(evsel), "breakpoint")); + +	return test__checkevent_breakpoint_x(evlist); +} + +static int test__checkevent_breakpoint_r_modifier_name(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); +	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); +	TEST_ASSERT_VAL("wrong name", +			!strcmp(evsel__name(evsel), "breakpoint")); + +	return test__checkevent_breakpoint_r(evlist); +} + +static int test__checkevent_breakpoint_w_modifier_name(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); +	TEST_ASSERT_VAL("wrong name", +			!strcmp(evsel__name(evsel), "breakpoint")); + +	return test__checkevent_breakpoint_w(evlist); +} + +static int test__checkevent_breakpoint_rw_modifier_name(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); +	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); +	TEST_ASSERT_VAL("wrong name", +			!strcmp(evsel__name(evsel), "breakpoint")); + +	return test__checkevent_breakpoint_rw(evlist); +} + +static int test__checkevent_breakpoint_2_events(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); + +	TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); +	TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "breakpoint1")); + +	evsel = evsel__next(evsel); + +	TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); +	TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "breakpoint2")); + +	return TEST_OK; +} +  static int test__checkevent_pmu(struct evlist *evlist)  { @@ -446,7 +580,7 @@ static int test__checkevent_pmu(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config",    10 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config",    test_config(evsel, 10));  	TEST_ASSERT_VAL("wrong config1",    1 == evsel->core.attr.config1);  	TEST_ASSERT_VAL("wrong config2",    3 == evsel->core.attr.config2);  	TEST_ASSERT_VAL("wrong config3",    0 == evsel->core.attr.config3); @@ -464,21 +598,23 @@ static int test__checkevent_list(struct evlist *evlist)  {  	struct evsel *evsel = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", 3 <= evlist->core.nr_entries);  	/* r1 */ -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong config1", 0 == evsel->core.attr.config1); -	TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2); -	TEST_ASSERT_VAL("wrong config3", 0 == evsel->core.attr.config3); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +	TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT != evsel->core.attr.type); +	while (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { +		TEST_ASSERT_VAL("wrong config", test_config(evsel, 1)); +		TEST_ASSERT_VAL("wrong config1", 0 == evsel->core.attr.config1); +		TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2); +		TEST_ASSERT_VAL("wrong config3", 0 == evsel->core.attr.config3); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		evsel = evsel__next(evsel); +	}  	/* syscalls:sys_enter_openat:k */ -	evsel = evsel__next(evsel);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);  	TEST_ASSERT_VAL("wrong sample_type",  		PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type); @@ -491,7 +627,7 @@ static int test__checkevent_list(struct evlist *evlist)  	/* 1:1:hp */  	evsel = evsel__next(evsel);  	TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));  	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);  	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);  	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -508,14 +644,14 @@ static int test__checkevent_pmu_name(struct evlist *evlist)  	/* cpu/config=1,name=krava/u */  	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config",  1 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));  	TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "krava"));  	/* cpu/config=2/u" */  	evsel = evsel__next(evsel);  	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config",  2 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 2));  	TEST_ASSERT_VAL("wrong name",  			!strcmp(evsel__name(evsel), "cpu/config=2/u")); @@ -529,7 +665,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)  	/* cpu/config=1,call-graph=fp,time,period=100000/ */  	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config",  1 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));  	/*  	 * The period, time and callgraph value gets configured within evlist__config,  	 * while this test executes only parse events method. @@ -541,7 +677,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)  	/* cpu/config=2,call-graph=no,time=0,period=2000/ */  	evsel = evsel__next(evsel);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config",  2 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 2));  	/*  	 * The period, time and callgraph value gets configured within evlist__config,  	 * while this test executes only parse events method. @@ -558,7 +694,8 @@ static int test__checkevent_pmu_events(struct evlist *evlist)  	struct evsel *evsel = evlist__first(evlist);  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); +	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type || +				      strcmp(evsel->pmu_name, "cpu"));  	TEST_ASSERT_VAL("wrong exclude_user",  			!evsel->core.attr.exclude_user);  	TEST_ASSERT_VAL("wrong exclude_kernel", @@ -574,23 +711,28 @@ static int test__checkevent_pmu_events(struct evlist *evlist)  static int test__checkevent_pmu_events_mix(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); - -	/* pmu-event:u */ -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong exclude_user", -			!evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", -			evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); -	TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); +	struct evsel *evsel = NULL; +	/* +	 * The wild card event will be opened at least once, but it may be +	 * opened on each core PMU. +	 */ +	TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries >= 2); +	for (int i = 0; i < evlist->core.nr_entries - 1; i++) { +		evsel = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		/* pmu-event:u */ +		TEST_ASSERT_VAL("wrong exclude_user", +				!evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", +				evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); +		TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); +	}  	/* cpu/pmu-event/u*/  	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); +	TEST_ASSERT_VAL("wrong type", evsel__find_pmu(evsel)->is_core);  	TEST_ASSERT_VAL("wrong exclude_user",  			!evsel->core.attr.exclude_user);  	TEST_ASSERT_VAL("wrong exclude_kernel", @@ -661,11 +803,11 @@ static int test__checkterms_simple(struct list_head *terms)  	 */  	term = list_entry(term->list.next, struct parse_events_term, list);  	TEST_ASSERT_VAL("wrong type term", -			term->type_term == PARSE_EVENTS__TERM_TYPE_USER); +			term->type_term == PARSE_EVENTS__TERM_TYPE_RAW);  	TEST_ASSERT_VAL("wrong type val", -			term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); -	TEST_ASSERT_VAL("wrong val", term->val.num == 1); -	TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "read")); +			term->type_val == PARSE_EVENTS__TERM_TYPE_STR); +	TEST_ASSERT_VAL("wrong val", !strcmp(term->val.str, "read")); +	TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "raw"));  	/*  	 * r0xead @@ -675,11 +817,11 @@ static int test__checkterms_simple(struct list_head *terms)  	 */  	term = list_entry(term->list.next, struct parse_events_term, list);  	TEST_ASSERT_VAL("wrong type term", -			term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG); +			term->type_term == PARSE_EVENTS__TERM_TYPE_RAW);  	TEST_ASSERT_VAL("wrong type val", -			term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); -	TEST_ASSERT_VAL("wrong val", term->val.num == 0xead); -	TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config")); +			term->type_val == PARSE_EVENTS__TERM_TYPE_STR); +	TEST_ASSERT_VAL("wrong val", !strcmp(term->val.str, "r0xead")); +	TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "raw"));  	return TEST_OK;  } @@ -687,189 +829,207 @@ static int test__group1(struct evlist *evlist)  {  	struct evsel *evsel, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); - -	/* instructions:k */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (perf_pmus__num_core_pmus() * 2)); +	TEST_ASSERT_VAL("wrong number of groups", +			evlist__nr_groups(evlist) == perf_pmus__num_core_pmus()); -	/* cycles:upp */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	/* use of precise requires exclude_guest */ -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* instructions:k */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); +		TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +		/* cycles:upp */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		/* use of precise requires exclude_guest */ +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	}  	return TEST_OK;  }  static int test__group2(struct evlist *evlist)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel, *leader = NULL; -	TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus() + 1)); +	/* +	 * TODO: Currently the software event won't be grouped with the hardware +	 * event except for 1 PMU. +	 */  	TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); -	/* faults + :ku modifier */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - -	/* cache-references + :u modifier */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_REFERENCES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - -	/* cycles:k */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - +	evlist__for_each_entry(evlist, evsel) { +		if (evsel->core.attr.type == PERF_TYPE_SOFTWARE) { +			/* faults + :ku modifier */ +			leader = evsel; +			TEST_ASSERT_VAL("wrong config", +					test_config(evsel, PERF_COUNT_SW_PAGE_FAULTS)); +			TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +			TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +			TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +			TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +			TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +			TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +			TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +			TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +			TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +			TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +			continue; +		} +		if (evsel->core.attr.type == PERF_TYPE_HARDWARE && +		    test_config(evsel, PERF_COUNT_HW_CACHE_REFERENCES)) { +			/* cache-references + :u modifier */ +			TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +			TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +			TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +			TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +			TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +			TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +			if (evsel__has_leader(evsel, leader)) +				TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +			TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +			continue; +		} +		/* cycles:k */ +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	}  	return TEST_OK;  }  #ifdef HAVE_LIBTRACEEVENT  static int test__group3(struct evlist *evlist __maybe_unused)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel, *group1_leader = NULL, *group2_leader = NULL; -	TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus() + 2)); +	/* +	 * Currently the software event won't be grouped with the hardware event +	 * except for 1 PMU. This means there are always just 2 groups +	 * regardless of the number of core PMUs. +	 */  	TEST_ASSERT_VAL("wrong number of groups", 2 == evlist__nr_groups(evlist)); -	/* group1 syscalls:sys_enter_openat:H */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong sample_type", -		PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type); -	TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong group name", -		!strcmp(leader->group_name, "group1")); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - -	/* group1 cycles:kppp */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	/* use of precise requires exclude_guest */ -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 3); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - -	/* group2 cycles + G modifier */ -	evsel = leader = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong group name", -		!strcmp(leader->group_name, "group2")); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - -	/* group2 1:3 + G modifier */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 3 == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - -	/* instructions:u */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - +	evlist__for_each_entry(evlist, evsel) { +		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { +			/* group1 syscalls:sys_enter_openat:H */ +			group1_leader = evsel; +			TEST_ASSERT_VAL("wrong sample_type", +					evsel->core.attr.sample_type == PERF_TP_SAMPLE_TYPE); +			TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period); +			TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +			TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +			TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +			TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +			TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +			TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +			TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +			TEST_ASSERT_VAL("wrong group name", !strcmp(evsel->group_name, "group1")); +			TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +			TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +			TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +			continue; +		} +		if (evsel->core.attr.type == PERF_TYPE_HARDWARE && +		    test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)) { +			if (evsel->core.attr.exclude_user) { +				/* group1 cycles:kppp */ +				TEST_ASSERT_VAL("wrong exclude_user", +						evsel->core.attr.exclude_user); +				TEST_ASSERT_VAL("wrong exclude_kernel", +						!evsel->core.attr.exclude_kernel); +				TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +				/* use of precise requires exclude_guest */ +				TEST_ASSERT_VAL("wrong exclude guest", +						evsel->core.attr.exclude_guest); +				TEST_ASSERT_VAL("wrong exclude host", +						!evsel->core.attr.exclude_host); +				TEST_ASSERT_VAL("wrong precise_ip", +						evsel->core.attr.precise_ip == 3); +				if (evsel__has_leader(evsel, group1_leader)) { +					TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +					TEST_ASSERT_VAL("wrong group_idx", +							evsel__group_idx(evsel) == 1); +				} +				TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +			} else { +				/* group2 cycles + G modifier */ +				group2_leader = evsel; +				TEST_ASSERT_VAL("wrong exclude_kernel", +						!evsel->core.attr.exclude_kernel); +				TEST_ASSERT_VAL("wrong exclude_hv", +						!evsel->core.attr.exclude_hv); +				TEST_ASSERT_VAL("wrong exclude guest", +						!evsel->core.attr.exclude_guest); +				TEST_ASSERT_VAL("wrong exclude host", +						evsel->core.attr.exclude_host); +				TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +				TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +				if (evsel->core.nr_members == 2) { +					TEST_ASSERT_VAL("wrong group_idx", +							evsel__group_idx(evsel) == 0); +				} +				TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +			} +			continue; +		} +		if (evsel->core.attr.type == 1) { +			/* group2 1:3 + G modifier */ +			TEST_ASSERT_VAL("wrong config", test_config(evsel, 3)); +			TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +			TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +			TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +			TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +			TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +			TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +			if (evsel__has_leader(evsel, group2_leader)) +				TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +			TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +			continue; +		} +		/* instructions:u */ +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	}  	return TEST_OK;  }  #endif @@ -878,425 +1038,435 @@ static int test__group4(struct evlist *evlist __maybe_unused)  {  	struct evsel *evsel, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); - -	/* cycles:u + p */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	/* use of precise requires exclude_guest */ -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 1); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (perf_pmus__num_core_pmus() * 2)); +	TEST_ASSERT_VAL("wrong number of groups", +			perf_pmus__num_core_pmus() == evlist__nr_groups(evlist)); -	/* instructions:kp + p */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	/* use of precise requires exclude_guest */ -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles:u + p */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		/* use of precise requires exclude_guest */ +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 1); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +		/* instructions:kp + p */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); +		TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		/* use of precise requires exclude_guest */ +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	}  	return TEST_OK;  }  static int test__group5(struct evlist *evlist __maybe_unused)  { -	struct evsel *evsel, *leader; - -	TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong number of groups", 2 == evlist__nr_groups(evlist)); - -	/* cycles + G */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	struct evsel *evsel = NULL, *leader; -	/* instructions + G */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (5 * perf_pmus__num_core_pmus())); +	TEST_ASSERT_VAL("wrong number of groups", +			evlist__nr_groups(evlist) == (2 * perf_pmus__num_core_pmus())); -	/* cycles:G */ -	evsel = leader = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); -	TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); - -	/* instructions:G */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles + G */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); -	/* cycles */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		/* instructions + G */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +	} +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles:G */ +		evsel = leader = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); +		/* instructions:G */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	} +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +	}  	return TEST_OK;  }  static int test__group_gh1(struct evlist *evlist)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel = NULL, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus())); +	TEST_ASSERT_VAL("wrong number of groups", +			evlist__nr_groups(evlist) == perf_pmus__num_core_pmus()); -	/* cycles + :H group modifier */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); - -	/* cache-misses:G + :H group modifier */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles + :H group modifier */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		/* cache-misses:G + :H group modifier */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	}  	return TEST_OK;  }  static int test__group_gh2(struct evlist *evlist)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel = NULL, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); - -	/* cycles + :G group modifier */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus())); +	TEST_ASSERT_VAL("wrong number of groups", +			evlist__nr_groups(evlist) == perf_pmus__num_core_pmus()); -	/* cache-misses:H + :G group modifier */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles + :G group modifier */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		/* cache-misses:H + :G group modifier */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	}  	return TEST_OK;  }  static int test__group_gh3(struct evlist *evlist)  { -	struct evsel *evsel, *leader; - -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); +	struct evsel *evsel = NULL, *leader; -	/* cycles:G + :u group modifier */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus())); +	TEST_ASSERT_VAL("wrong number of groups", +			evlist__nr_groups(evlist) == perf_pmus__num_core_pmus()); -	/* cache-misses:H + :u group modifier */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles:G + :u group modifier */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		/* cache-misses:H + :u group modifier */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	}  	return TEST_OK;  }  static int test__group_gh4(struct evlist *evlist)  { -	struct evsel *evsel, *leader; - -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); +	struct evsel *evsel = NULL, *leader; -	/* cycles:G + :uG group modifier */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); -	TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus())); +	TEST_ASSERT_VAL("wrong number of groups", +			evlist__nr_groups(evlist) == perf_pmus__num_core_pmus()); -	/* cache-misses:H + :uG group modifier */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles:G + :uG group modifier */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); +		TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0); +		/* cache-misses:H + :uG group modifier */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); +	}  	return TEST_OK;  }  static int test__leader_sample1(struct evlist *evlist)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel = NULL, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus())); -	/* cycles - sampling group leader */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); - -	/* cache-misses - not sampling */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles - sampling group leader */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); -	/* branch-misses - not sampling */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); +		/* cache-misses - not sampling */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); +		/* branch-misses - not sampling */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); +	}  	return TEST_OK;  }  static int test__leader_sample2(struct evlist *evlist __maybe_unused)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel = NULL, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus())); -	/* instructions - sampling group leader */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); - -	/* branch-misses - not sampling */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); -	TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); -	TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* instructions - sampling group leader */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); +		/* branch-misses - not sampling */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); +		TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); +		TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); +	}  	return TEST_OK;  }  static int test__checkevent_pinned_modifier(struct evlist *evlist)  { -	struct evsel *evsel = evlist__first(evlist); +	struct evsel *evsel = NULL; -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); -	TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); -	TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == perf_pmus__num_core_pmus()); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		evsel = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); +		TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); +		TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); +		TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); +		TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned); +	}  	return test__checkevent_symbolic_name(evlist);  }  static int test__pinned_group(struct evlist *evlist)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel = NULL, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus())); -	/* cycles - group leader */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles - group leader */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		/* TODO: The group modifier is not copied to the split group leader. */ +		if (perf_pmus__num_core_pmus() == 1) +			TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned); -	/* cache-misses - can not be pinned, but will go on with the leader */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); - -	/* branch-misses - ditto */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); +		/* cache-misses - can not be pinned, but will go on with the leader */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); +		TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); +		/* branch-misses - ditto */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); +		TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); +	}  	return TEST_OK;  } @@ -1315,32 +1485,33 @@ static int test__checkevent_exclusive_modifier(struct evlist *evlist)  static int test__exclusive_group(struct evlist *evlist)  { -	struct evsel *evsel, *leader; +	struct evsel *evsel = NULL, *leader; -	TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus())); -	/* cycles - group leader */ -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong group name", !evsel->group_name); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive); +	for (int i = 0; i < perf_pmus__num_core_pmus(); i++) { +		/* cycles - group leader */ +		evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +		TEST_ASSERT_VAL("wrong group name", !evsel->group_name); +		TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); +		/* TODO: The group modifier is not copied to the split group leader. */ +		if (perf_pmus__num_core_pmus() == 1) +			TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive); -	/* cache-misses - can not be pinned, but will go on with the leader */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); - -	/* branch-misses - ditto */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); +		/* cache-misses - can not be pinned, but will go on with the leader */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); +		TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); +		/* branch-misses - ditto */ +		evsel = evsel__next(evsel); +		TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); +		TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); +	}  	return TEST_OK;  }  static int test__checkevent_breakpoint_len(struct evlist *evlist) @@ -1349,7 +1520,7 @@ static int test__checkevent_breakpoint_len(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));  	TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==  					 evsel->core.attr.bp_type);  	TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 == @@ -1364,7 +1535,7 @@ static int test__checkevent_breakpoint_len_w(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));  	TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==  					 evsel->core.attr.bp_type);  	TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 == @@ -1390,10 +1561,10 @@ static int test__checkevent_precise_max_modifier(struct evlist *evlist)  {  	struct evsel *evsel = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); +	TEST_ASSERT_VAL("wrong number of entries", +			evlist->core.nr_entries == (1 + perf_pmus__num_core_pmus()));  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", -			PERF_COUNT_SW_TASK_CLOCK == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_SW_TASK_CLOCK));  	return TEST_OK;  } @@ -1426,12 +1597,17 @@ static int test__checkevent_config_cache(struct evlist *evlist)  	struct evsel *evsel = evlist__first(evlist);  	TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "cachepmu")); -	return TEST_OK; +	return test__checkevent_genhw(evlist); +} + +static bool test__pmu_cpu_valid(void) +{ +	return !!perf_pmus__find("cpu");  }  static bool test__intel_pt_valid(void)  { -	return !!perf_pmu__find("intel_pt"); +	return !!perf_pmus__find("intel_pt");  }  static int test__intel_pt(struct evlist *evlist) @@ -1446,7 +1622,9 @@ static int test__checkevent_complex_name(struct evlist *evlist)  {  	struct evsel *evsel = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong complex name parsing", evsel__name_is(evsel, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks")); +	TEST_ASSERT_VAL("wrong complex name parsing", +			evsel__name_is(evsel, +				       "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks"));  	return TEST_OK;  } @@ -1456,7 +1634,7 @@ static int test__checkevent_raw_pmu(struct evlist *evlist)  	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);  	TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, 0x1a));  	return TEST_OK;  } @@ -1465,7 +1643,7 @@ static int test__sym_event_slash(struct evlist *evlist)  	struct evsel *evsel = evlist__first(evlist);  	TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); -	TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));  	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);  	return TEST_OK;  } @@ -1475,11 +1653,31 @@ static int test__sym_event_dc(struct evlist *evlist)  	struct evsel *evsel = evlist__first(evlist);  	TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); -	TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));  	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);  	return TEST_OK;  } +static int test__term_equal_term(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +	TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "name") == 0); +	return TEST_OK; +} + +static int test__term_equal_legacy(struct evlist *evlist) +{ +	struct evsel *evsel = evlist__first(evlist); + +	TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); +	TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); +	TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "l1d") == 0); +	return TEST_OK; +} +  #ifdef HAVE_LIBTRACEEVENT  static int count_tracepoints(void)  { @@ -1536,127 +1734,6 @@ static int test__all_tracepoints(struct evlist *evlist)  }  #endif /* HAVE_LIBTRACEVENT */ -static int test__hybrid_hw_event_with_pmu(struct evlist *evlist) -{ -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); -	return TEST_OK; -} - -static int test__hybrid_hw_group_event(struct evlist *evlist) -{ -	struct evsel *evsel, *leader; - -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); - -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	return TEST_OK; -} - -static int test__hybrid_sw_hw_group_event(struct evlist *evlist) -{ -	struct evsel *evsel, *leader; - -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); - -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	return TEST_OK; -} - -static int test__hybrid_hw_sw_group_event(struct evlist *evlist) -{ -	struct evsel *evsel, *leader; - -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); - -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	return TEST_OK; -} - -static int test__hybrid_group_modifier1(struct evlist *evlist) -{ -	struct evsel *evsel, *leader; - -	evsel = leader = evlist__first(evlist); -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); - -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config); -	TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); -	TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); -	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); -	return TEST_OK; -} - -static int test__hybrid_raw1(struct evlist *evlist) -{ -	struct evsel *evsel = evlist__first(evlist); - -	if (!perf_pmu__hybrid_mounted("cpu_atom")) { -		TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -		TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -		TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); -		return TEST_OK; -	} - -	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); - -	/* The type of second event is randome value */ -	evsel = evsel__next(evsel); -	TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); -	return TEST_OK; -} - -static int test__hybrid_raw2(struct evlist *evlist) -{ -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); -	return TEST_OK; -} - -static int test__hybrid_cache_event(struct evlist *evlist) -{ -	struct evsel *evsel = evlist__first(evlist); - -	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); -	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type); -	TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff)); -	return TEST_OK; -} -  struct evlist_test {  	const char *name;  	bool (*valid)(void); @@ -1973,26 +2050,110 @@ static const struct evlist_test test__events[] = {  		.check = test__exclusive_group,  		/* 7 */  	}, +	{ +		.name  = "cycles/name=name/", +		.check = test__term_equal_term, +		/* 8 */ +	}, +	{ +		.name  = "cycles/name=l1d/", +		.check = test__term_equal_legacy, +		/* 9 */ +	}, +	{ +		.name  = "mem:0/name=breakpoint/", +		.check = test__checkevent_breakpoint, +		/* 0 */ +	}, +	{ +		.name  = "mem:0:x/name=breakpoint/", +		.check = test__checkevent_breakpoint_x, +		/* 1 */ +	}, +	{ +		.name  = "mem:0:r/name=breakpoint/", +		.check = test__checkevent_breakpoint_r, +		/* 2 */ +	}, +	{ +		.name  = "mem:0:w/name=breakpoint/", +		.check = test__checkevent_breakpoint_w, +		/* 3 */ +	}, +	{ +		.name  = "mem:0/name=breakpoint/u", +		.check = test__checkevent_breakpoint_modifier_name, +		/* 4 */ +	}, +	{ +		.name  = "mem:0:x/name=breakpoint/k", +		.check = test__checkevent_breakpoint_x_modifier_name, +		/* 5 */ +	}, +	{ +		.name  = "mem:0:r/name=breakpoint/hp", +		.check = test__checkevent_breakpoint_r_modifier_name, +		/* 6 */ +	}, +	{ +		.name  = "mem:0:w/name=breakpoint/up", +		.check = test__checkevent_breakpoint_w_modifier_name, +		/* 7 */ +	}, +	{ +		.name  = "mem:0:rw/name=breakpoint/", +		.check = test__checkevent_breakpoint_rw, +		/* 8 */ +	}, +	{ +		.name  = "mem:0:rw/name=breakpoint/kp", +		.check = test__checkevent_breakpoint_rw_modifier_name, +		/* 9 */ +	}, +	{ +		.name  = "mem:0/1/name=breakpoint/", +		.check = test__checkevent_breakpoint_len, +		/* 0 */ +	}, +	{ +		.name  = "mem:0/2:w/name=breakpoint/", +		.check = test__checkevent_breakpoint_len_w, +		/* 1 */ +	}, +	{ +		.name  = "mem:0/4:rw/name=breakpoint/u", +		.check = test__checkevent_breakpoint_len_rw_modifier, +		/* 2 */ +	}, +	{ +		.name  = "mem:0/1/name=breakpoint1/,mem:0/4:rw/name=breakpoint2/", +		.check = test__checkevent_breakpoint_2_events, +		/* 3 */ +	},  };  static const struct evlist_test test__events_pmu[] = {  	{  		.name  = "cpu/config=10,config1,config2=3,period=1000/u", +		.valid = test__pmu_cpu_valid,  		.check = test__checkevent_pmu,  		/* 0 */  	},  	{  		.name  = "cpu/config=1,name=krava/u,cpu/config=2/u", +		.valid = test__pmu_cpu_valid,  		.check = test__checkevent_pmu_name,  		/* 1 */  	},  	{  		.name  = "cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/", +		.valid = test__pmu_cpu_valid,  		.check = test__checkevent_pmu_partial_time_callgraph,  		/* 2 */  	},  	{  		.name  = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp", +		.valid = test__pmu_cpu_valid,  		.check = test__checkevent_complex_name,  		/* 3 */  	}, @@ -2006,66 +2167,174 @@ static const struct evlist_test test__events_pmu[] = {  		.check = test__checkevent_raw_pmu,  		/* 5 */  	}, -}; - -struct terms_test { -	const char *str; -	int (*check)(struct list_head *terms); -}; - -static const struct terms_test test__terms[] = { -	[0] = { -		.str   = "config=10,config1,config2=3,config3=4,umask=1,read,r0xead", -		.check = test__checkterms_simple, +	{ +		.name  = "cpu/L1-dcache-load-miss/", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_genhw, +		/* 6 */ +	}, +	{ +		.name  = "cpu/L1-dcache-load-miss/kp", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_genhw_modifier, +		/* 7 */ +	}, +	{ +		.name  = "cpu/L1-dcache-misses,name=cachepmu/", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_config_cache, +		/* 8 */  	}, -}; - -static const struct evlist_test test__hybrid_events[] = {  	{ -		.name  = "cpu_core/cpu-cycles/", -		.check = test__hybrid_hw_event_with_pmu, +		.name  = "cpu/instructions/", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_symbolic_name, +		/* 9 */ +	}, +	{ +		.name  = "cpu/cycles,period=100000,config2/", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_symbolic_name_config,  		/* 0 */  	},  	{ -		.name  = "{cpu_core/cpu-cycles/,cpu_core/instructions/}", -		.check = test__hybrid_hw_group_event, +		.name  = "cpu/instructions/h", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_symbolic_name_modifier,  		/* 1 */  	},  	{ -		.name  = "{cpu-clock,cpu_core/cpu-cycles/}", -		.check = test__hybrid_sw_hw_group_event, +		.name  = "cpu/instructions/G", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_exclude_host_modifier,  		/* 2 */  	},  	{ -		.name  = "{cpu_core/cpu-cycles/,cpu-clock}", -		.check = test__hybrid_hw_sw_group_event, +		.name  = "cpu/instructions/H", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_exclude_guest_modifier,  		/* 3 */  	},  	{ -		.name  = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}", -		.check = test__hybrid_group_modifier1, +		.name  = "{cpu/instructions/k,cpu/cycles/upp}", +		.valid = test__pmu_cpu_valid, +		.check = test__group1,  		/* 4 */  	},  	{ -		.name  = "r1a", -		.check = test__hybrid_raw1, +		.name  = "{cpu/cycles/u,cpu/instructions/kp}:p", +		.valid = test__pmu_cpu_valid, +		.check = test__group4,  		/* 5 */  	},  	{ -		.name  = "cpu_core/r1a/", -		.check = test__hybrid_raw2, +		.name  = "{cpu/cycles/,cpu/cache-misses/G}:H", +		.valid = test__pmu_cpu_valid, +		.check = test__group_gh1,  		/* 6 */  	},  	{ -		.name  = "cpu_core/config=10,config1,config2=3,period=1000/u", -		.check = test__checkevent_pmu, +		.name  = "{cpu/cycles/,cpu/cache-misses/H}:G", +		.valid = test__pmu_cpu_valid, +		.check = test__group_gh2,  		/* 7 */  	},  	{ -		.name  = "cpu_core/LLC-loads/", -		.check = test__hybrid_cache_event, +		.name  = "{cpu/cycles/G,cpu/cache-misses/H}:u", +		.valid = test__pmu_cpu_valid, +		.check = test__group_gh3,  		/* 8 */  	}, +	{ +		.name  = "{cpu/cycles/G,cpu/cache-misses/H}:uG", +		.valid = test__pmu_cpu_valid, +		.check = test__group_gh4, +		/* 9 */ +	}, +	{ +		.name  = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:S", +		.valid = test__pmu_cpu_valid, +		.check = test__leader_sample1, +		/* 0 */ +	}, +	{ +		.name  = "{cpu/instructions/,cpu/branch-misses/}:Su", +		.valid = test__pmu_cpu_valid, +		.check = test__leader_sample2, +		/* 1 */ +	}, +	{ +		.name  = "cpu/instructions/uDp", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_pinned_modifier, +		/* 2 */ +	}, +	{ +		.name  = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:D", +		.valid = test__pmu_cpu_valid, +		.check = test__pinned_group, +		/* 3 */ +	}, +	{ +		.name  = "cpu/instructions/I", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_exclude_idle_modifier, +		/* 4 */ +	}, +	{ +		.name  = "cpu/instructions/kIG", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_exclude_idle_modifier_1, +		/* 5 */ +	}, +	{ +		.name  = "cpu/cycles/u", +		.valid = test__pmu_cpu_valid, +		.check = test__sym_event_slash, +		/* 6 */ +	}, +	{ +		.name  = "cpu/cycles/k", +		.valid = test__pmu_cpu_valid, +		.check = test__sym_event_dc, +		/* 7 */ +	}, +	{ +		.name  = "cpu/instructions/uep", +		.valid = test__pmu_cpu_valid, +		.check = test__checkevent_exclusive_modifier, +		/* 8 */ +	}, +	{ +		.name  = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:e", +		.valid = test__pmu_cpu_valid, +		.check = test__exclusive_group, +		/* 9 */ +	}, +	{ +		.name  = "cpu/cycles,name=name/", +		.valid = test__pmu_cpu_valid, +		.check = test__term_equal_term, +		/* 0 */ +	}, +	{ +		.name  = "cpu/cycles,name=l1d/", +		.valid = test__pmu_cpu_valid, +		.check = test__term_equal_legacy, +		/* 1 */ +	}, +}; + +struct terms_test { +	const char *str; +	int (*check)(struct list_head *terms); +}; + +static const struct terms_test test__terms[] = { +	[0] = { +		.str   = "config=10,config1,config2=3,config3=4,umask=1,read,r0xead", +		.check = test__checkterms_simple, +	},  };  static int test_event(const struct evlist_test *e) @@ -2091,7 +2360,7 @@ static int test_event(const struct evlist_test *e)  			 e->name, ret, err.str);  		parse_events_error__print(&err, e->name);  		ret = TEST_FAIL; -		if (strstr(err.str, "can't access trace events")) +		if (err.str && strstr(err.str, "can't access trace events"))  			ret = TEST_SKIP;  	} else {  		ret = e->check(evlist); @@ -2113,8 +2382,8 @@ static int test_event_fake_pmu(const char *str)  		return -ENOMEM;  	parse_events_error__init(&err); -	perf_pmu__test_parse_init(); -	ret = __parse_events(evlist, str, &err, &perf_pmu__fake, /*warn_if_reordered=*/true); +	ret = __parse_events(evlist, str, /*pmu_filter=*/NULL, &err, +			     &perf_pmu__fake, /*warn_if_reordered=*/true);  	if (ret) {  		pr_debug("failed to parse event '%s', err %d, str '%s'\n",  			 str, ret, err.str); @@ -2167,13 +2436,6 @@ static int test_term(const struct terms_test *t)  	INIT_LIST_HEAD(&terms); -	/* -	 * The perf_pmu__test_parse_init prepares perf_pmu_events_list -	 * which gets freed in parse_events_terms. -	 */ -	if (perf_pmu__test_parse_init()) -		return -1; -  	ret = parse_events_terms(&terms, t->str);  	if (ret) {  		pr_debug("failed to parse terms '%s', err %d\n", @@ -2208,99 +2470,88 @@ static int test__terms2(struct test_suite *test __maybe_unused, int subtest __ma  	return test_terms(test__terms, ARRAY_SIZE(test__terms));  } -static int test_pmu(void) +static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)  { -	struct stat st; -	char path[PATH_MAX]; -	int ret; +	struct perf_pmu *pmu = NULL; +	int ret = TEST_OK; -	snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/format/", -		 sysfs__mountpoint()); +	while ((pmu = perf_pmus__scan(pmu)) != NULL) { +		struct stat st; +		char path[PATH_MAX]; +		struct dirent *ent; +		DIR *dir; +		int err; -	ret = stat(path, &st); -	if (ret) -		pr_debug("omitting PMU cpu tests\n"); -	return !ret; -} +		snprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/", +			sysfs__mountpoint(), pmu->name); -static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused) -{ -	struct stat st; -	char path[PATH_MAX]; -	struct dirent *ent; -	DIR *dir; -	int ret; +		err = stat(path, &st); +		if (err) { +			pr_debug("skipping PMU %s events tests: %s\n", pmu->name, path); +			continue; +		} -	if (!test_pmu()) -		return TEST_SKIP; +		dir = opendir(path); +		if (!dir) { +			pr_debug("can't open pmu event dir: %s\n", path); +			ret = combine_test_results(ret, TEST_SKIP); +			continue; +		} -	snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/events/", -		 sysfs__mountpoint()); +		while ((ent = readdir(dir))) { +			struct evlist_test e = { .name = NULL, }; +			char name[2 * NAME_MAX + 1 + 12 + 3]; +			int test_ret; -	ret = stat(path, &st); -	if (ret) { -		pr_debug("omitting PMU cpu events tests: %s\n", path); -		return TEST_OK; -	} +			/* Names containing . are special and cannot be used directly */ +			if (strchr(ent->d_name, '.')) +				continue; -	dir = opendir(path); -	if (!dir) { -		pr_debug("can't open pmu event dir: %s\n", path); -		return TEST_FAIL; -	} +			snprintf(name, sizeof(name), "%s/event=%s/u", pmu->name, ent->d_name); -	ret = TEST_OK; -	while ((ent = readdir(dir))) { -		struct evlist_test e = { .name = NULL, }; -		char name[2 * NAME_MAX + 1 + 12 + 3]; -		int test_ret; +			e.name  = name; +			e.check = test__checkevent_pmu_events; -		/* Names containing . are special and cannot be used directly */ -		if (strchr(ent->d_name, '.')) -			continue; +			test_ret = test_event(&e); +			if (test_ret != TEST_OK) { +				pr_debug("Test PMU event failed for '%s'", name); +				ret = combine_test_results(ret, test_ret); +			} -		snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name); +			if (!is_pmu_core(pmu->name)) +				continue; -		e.name  = name; -		e.check = test__checkevent_pmu_events; +			/* +			 * Names containing '-' are recognized as prefixes and suffixes +			 * due to '-' being a legacy PMU separator. This fails when the +			 * prefix or suffix collides with an existing legacy token. For +			 * example, branch-brs has a prefix (branch) that collides with +			 * a PE_NAME_CACHE_TYPE token causing a parse error as a suffix +			 * isn't expected after this. As event names in the config +			 * slashes are allowed a '-' in the name we check this works +			 * above. +			 */ +			if (strchr(ent->d_name, '-')) +				continue; -		test_ret = test_event(&e); -		if (test_ret != TEST_OK) { -			pr_debug("Test PMU event failed for '%s'", name); -			ret = combine_test_results(ret, test_ret); +			snprintf(name, sizeof(name), "%s:u,%s/event=%s/u", +				 ent->d_name, pmu->name, ent->d_name); +			e.name  = name; +			e.check = test__checkevent_pmu_events_mix; +			test_ret = test_event(&e); +			if (test_ret != TEST_OK) { +				pr_debug("Test PMU event failed for '%s'", name); +				ret = combine_test_results(ret, test_ret); +			}  		} -		/* -		 * Names containing '-' are recognized as prefixes and suffixes -		 * due to '-' being a legacy PMU separator. This fails when the -		 * prefix or suffix collides with an existing legacy token. For -		 * example, branch-brs has a prefix (branch) that collides with -		 * a PE_NAME_CACHE_TYPE token causing a parse error as a suffix -		 * isn't expected after this. As event names in the config -		 * slashes are allowed a '-' in the name we check this works -		 * above. -		 */ -		if (strchr(ent->d_name, '-')) -			continue; -		snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name); -		e.name  = name; -		e.check = test__checkevent_pmu_events_mix; -		test_ret = test_event(&e); -		if (test_ret != TEST_OK) { -			pr_debug("Test PMU event failed for '%s'", name); -			ret = combine_test_results(ret, test_ret); -		} +		closedir(dir);  	} - -	closedir(dir);  	return ret;  }  static int test__pmu_events2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)  { -	if (!test_pmu()) -		return TEST_SKIP; -  	return test_events(test__events_pmu, ARRAY_SIZE(test__events_pmu));  } @@ -2362,14 +2613,6 @@ static bool test_alias(char **event, char **alias)  	return false;  } -static int test__hybrid(struct test_suite *test __maybe_unused, int subtest __maybe_unused) -{ -	if (!perf_pmu__has_hybrid()) -		return TEST_SKIP; - -	return test_events(test__hybrid_events, ARRAY_SIZE(test__hybrid_events)); -} -  static int test__checkevent_pmu_events_alias(struct evlist *evlist)  {  	struct evsel *evsel1 = evlist__first(evlist); @@ -2433,9 +2676,6 @@ static struct test_case tests__parse_events[] = {  	TEST_CASE_REASON("Test event parsing",  			 events2,  			 "permissions"), -	TEST_CASE_REASON("Test parsing of \"hybrid\" CPU events", -			 hybrid, -			"not hybrid"),  	TEST_CASE_REASON("Parsing of all PMU events from sysfs",  			 pmu_events,  			 "permissions"), diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c index c05148ea400c..2c28fb50dc24 100644 --- a/tools/perf/tests/parse-metric.c +++ b/tools/perf/tests/parse-metric.c @@ -11,7 +11,7 @@  #include "debug.h"  #include "expr.h"  #include "stat.h" -#include "pmu.h" +#include "pmus.h"  struct value {  	const char	*event; @@ -302,11 +302,8 @@ static int test__parse_metric(struct test_suite *test __maybe_unused, int subtes  	TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);  	TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0);  	TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0); - -	if (!perf_pmu__has_hybrid()) { -		TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0); -		TEST_ASSERT_VAL("test metric group", test_metric_group() == 0); -	} +	TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0); +	TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);  	return 0;  } diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg index fae26b1cf08f..b3075c168cb2 100755 --- a/tools/perf/tests/perf-targz-src-pkg +++ b/tools/perf/tests/perf-targz-src-pkg @@ -7,16 +7,17 @@  # be in such tarball, which sometimes gets broken when we move files around,  # like when we made some files that were in tools/perf/ available to other tools/  # codebases by moving it to tools/include/, etc. +set -e  PERF=$1  cd ${PERF}/../.. -make perf-targz-src-pkg > /dev/null +make perf-targz-src-pkg  TARBALL=$(ls -rt perf-*.tar.gz)  TMP_DEST=$(mktemp -d)  tar xf ${TARBALL} -C $TMP_DEST  rm -f ${TARBALL}  cd - > /dev/null -make -C $TMP_DEST/perf*/tools/perf > /dev/null +make -C $TMP_DEST/perf*/tools/perf  RC=$?  rm -rf ${TMP_DEST}  exit $RC diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index 1dff863b9711..64383fc34ef1 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -2,6 +2,7 @@  #include "math.h"  #include "parse-events.h"  #include "pmu.h" +#include "pmus.h"  #include "tests.h"  #include <errno.h>  #include <stdio.h> @@ -708,12 +709,9 @@ static int test__aliases(struct test_suite *test __maybe_unused,  	struct perf_pmu *pmu = NULL;  	unsigned long i; -	while ((pmu = perf_pmu__scan(pmu)) != NULL) { +	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {  		int count = 0; -		if (!is_pmu_core(pmu->name)) -			continue; -  		if (list_empty(&pmu->format)) {  			pr_debug2("skipping testing core PMU %s\n", pmu->name);  			continue; @@ -776,16 +774,8 @@ static int check_parse_id(const char *id, struct parse_events_error *error,  	for (cur = strchr(dup, '@') ; cur; cur = strchr(++cur, '@'))  		*cur = '/'; -	if (fake_pmu) { -		/* -		 * Every call to __parse_events will try to initialize the PMU -		 * state from sysfs and then clean it up at the end. Reset the -		 * PMU events to the test state so that we don't pick up -		 * erroneous prefixes and suffixes. -		 */ -		perf_pmu__test_parse_init(); -	} -	ret = __parse_events(evlist, dup, error, fake_pmu, /*warn_if_reordered=*/true); +	ret = __parse_events(evlist, dup, /*pmu_filter=*/NULL, error, fake_pmu, +			     /*warn_if_reordered=*/true);  	free(dup);  	evlist__delete(evlist); diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 3cf25f883df7..a4452639a3d4 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -86,17 +86,16 @@ static struct parse_events_term test_terms[] = {   * Prepare format directory data, exported by kernel   * at /sys/bus/event_source/devices/<dev>/format.   */ -static char *test_format_dir_get(void) +static char *test_format_dir_get(char *dir, size_t sz)  { -	static char dir[PATH_MAX];  	unsigned int i; -	snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX"); +	snprintf(dir, sz, "/tmp/perf-pmu-test-format-XXXXXX");  	if (!mkdtemp(dir))  		return NULL;  	for (i = 0; i < ARRAY_SIZE(test_formats); i++) { -		static char name[PATH_MAX]; +		char name[PATH_MAX];  		struct test_format *format = &test_formats[i];  		FILE *file; @@ -118,12 +117,13 @@ static char *test_format_dir_get(void)  /* Cleanup format directory. */  static int test_format_dir_put(char *dir)  { -	char buf[PATH_MAX]; -	snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir); +	char buf[PATH_MAX + 20]; + +	snprintf(buf, sizeof(buf), "rm -f %s/*\n", dir);  	if (system(buf))  		return -1; -	snprintf(buf, PATH_MAX, "rmdir %s\n", dir); +	snprintf(buf, sizeof(buf), "rmdir %s\n", dir);  	return system(buf);  } @@ -140,7 +140,8 @@ static struct list_head *test_terms_list(void)  static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe_unused)  { -	char *format = test_format_dir_get(); +	char dir[PATH_MAX]; +	char *format = test_format_dir_get(dir, sizeof(dir));  	LIST_HEAD(formats);  	struct list_head *terms = test_terms_list();  	int ret; diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c index 6b990ee38575..0ebc22ac8d5b 100644 --- a/tools/perf/tests/python-use.c +++ b/tools/perf/tests/python-use.c @@ -14,7 +14,7 @@ static int test__python_use(struct test_suite *test __maybe_unused, int subtest  	char *cmd;  	int ret; -	if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s", +	if (asprintf(&cmd, "echo \"import sys ; sys.path.insert(0, '%s'); import perf\" | %s %s",  		     PYTHONPATH, PYTHON, verbose > 0 ? "" : "2> /dev/null") < 0)  		return -1; diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh index 0ce22ea0a7f1..3383ca3399d4 100755 --- a/tools/perf/tests/shell/buildid.sh +++ b/tools/perf/tests/shell/buildid.sh @@ -83,12 +83,12 @@ check()  	# in case of pe-file.exe file  	echo $1 | grep ".exe"  	if [ $? -eq 0 ]; then -		if [ -x $1  -a ! -x $file ]; then +		if [ -x $1 ] && [ ! -x $file ]; then  			echo "failed: file ${file} executable does not exist"  			exit 1  		fi -		if [ ! -x $file -a ! -e $file ]; then +		if [ ! -x $file ] && [ ! -e $file ]; then  			echo "failed: file ${file} does not exist"  			exit 1  		fi @@ -136,10 +136,10 @@ test_record()  	log_err=$(mktemp /tmp/perf.log.err.XXX)  	perf="perf --buildid-dir ${build_id_dir}" -	echo "running: perf record $@" -	${perf} record --buildid-all -o ${data} $@ 1>${log_out} 2>${log_err} +	echo "running: perf record $*" +	${perf} record --buildid-all -o ${data} "$@" 1>${log_out} 2>${log_err}  	if [ $? -ne 0 ]; then -		echo "failed: record $@" +		echo "failed: record $*"  		echo "see log: ${log_err}"  		exit 1  	fi @@ -172,4 +172,4 @@ if [ ${run_pe} -eq 1 ]; then  	rm -r ${wineprefix}  fi -exit ${err} +exit 0 diff --git a/tools/perf/tests/shell/daemon.sh b/tools/perf/tests/shell/daemon.sh index 45fc24af5b07..4c598cfc5afa 100755 --- a/tools/perf/tests/shell/daemon.sh +++ b/tools/perf/tests/shell/daemon.sh @@ -11,11 +11,16 @@ check_line_first()  	local lock=$5  	local up=$6 -	local line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'` -	local line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'` -	local line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'` -	local line_lock=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'` -	local line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'` +	local line_name +	line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'` +	local line_base +	line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'` +	local line_output +	line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'` +	local line_lock +	line_lock=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'` +	local line_up +	line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`  	if [ "${name}" != "${line_name}" ]; then  		echo "FAILED: wrong name" @@ -54,13 +59,20 @@ check_line_other()  	local ack=$7  	local up=$8 -	local line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'` -	local line_run=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'` -	local line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'` -	local line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'` -	local line_control=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'` -	local line_ack=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $7 }'` -	local line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $8 }'` +	local line_name +	line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'` +	local line_run +	line_run=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'` +	local line_base +	line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'` +	local line_output +	line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'` +	local line_control +	line_control=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'` +	local line_ack +	line_ack=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $7 }'` +	local line_up +	line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $8 }'`  	if [ "${name}" != "${line_name}" ]; then  		echo "FAILED: wrong name" @@ -102,8 +114,10 @@ daemon_exit()  {  	local config=$1 -	local line=`perf daemon --config ${config} -x: | head -1` -	local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'` +	local line +	line=`perf daemon --config ${config} -x: | head -1` +	local pid +	pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`  	# Reset trap handler.  	trap - SIGINT SIGTERM @@ -123,7 +137,7 @@ daemon_start()  	perf daemon start --config ${config}  	# Clean up daemon if interrupted. -	trap "echo 'FAILED: Signal caught'; daemon_exit ${config}; exit 1" SIGINT SIGTERM +	trap 'echo "FAILED: Signal caught"; daemon_exit "${config}"; exit 1' SIGINT SIGTERM  	# wait for the session to ping  	local state="FAIL" @@ -144,8 +158,10 @@ test_list()  {  	echo "test daemon list" -	local config=$(mktemp /tmp/perf.daemon.config.XXX) -	local base=$(mktemp -d /tmp/perf.daemon.base.XXX) +	local config +	config=$(mktemp /tmp/perf.daemon.config.XXX) +	local base +	base=$(mktemp -d /tmp/perf.daemon.base.XXX)  	cat <<EOF > ${config}  [daemon] @@ -165,19 +181,22 @@ EOF  	# check first line  	# pid:daemon:base:base/output:base/lock -	local line=`perf daemon --config ${config} -x: | head -1` +	local line +	line=`perf daemon --config ${config} -x: | head -1`  	check_line_first ${line} daemon ${base} ${base}/output ${base}/lock "0"  	# check 1st session  	# pid:size:-e cpu-clock:base/size:base/size/output:base/size/control:base/size/ack:0 -	local line=`perf daemon --config ${config} -x: | head -2 | tail -1` +	local line +	line=`perf daemon --config ${config} -x: | head -2 | tail -1`  	check_line_other "${line}" size "-e cpu-clock -m 1 sleep 10" ${base}/session-size \  			 ${base}/session-size/output ${base}/session-size/control \  			 ${base}/session-size/ack "0"  	# check 2nd session  	# pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0 -	local line=`perf daemon --config ${config} -x: | head -3 | tail -1` +	local line +	line=`perf daemon --config ${config} -x: | head -3 | tail -1`  	check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \  			 ${base}/session-time/output ${base}/session-time/control \  			 ${base}/session-time/ack "0" @@ -193,8 +212,10 @@ test_reconfig()  {  	echo "test daemon reconfig" -	local config=$(mktemp /tmp/perf.daemon.config.XXX) -	local base=$(mktemp -d /tmp/perf.daemon.base.XXX) +	local config +	config=$(mktemp /tmp/perf.daemon.config.XXX) +	local base +	base=$(mktemp -d /tmp/perf.daemon.base.XXX)  	# prepare config  	cat <<EOF > ${config} @@ -215,10 +236,12 @@ EOF  	# check 2nd session  	# pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0 -	local line=`perf daemon --config ${config} -x: | head -3 | tail -1` +	local line +	line=`perf daemon --config ${config} -x: | head -3 | tail -1`  	check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \  			 ${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0" -	local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'` +	local pid +	pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`  	# prepare new config  	local config_new=${config}.new @@ -249,7 +272,8 @@ EOF  	# check reconfigured 2nd session  	# pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0 -	local line=`perf daemon --config ${config} -x: | head -3 | tail -1` +	local line +	line=`perf daemon --config ${config} -x: | head -3 | tail -1`  	check_line_other "${line}" time "-e cpu-clock -m 1 sleep 10" ${base}/session-time \  			 ${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0" @@ -276,7 +300,8 @@ EOF  		state=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`  	done -	local one=`perf daemon --config ${config} -x: | wc -l` +	local one +	one=`perf daemon --config ${config} -x: | wc -l`  	if [ ${one} -ne "1" ]; then  		echo "FAILED: wrong list output" @@ -312,8 +337,10 @@ test_stop()  {  	echo "test daemon stop" -	local config=$(mktemp /tmp/perf.daemon.config.XXX) -	local base=$(mktemp -d /tmp/perf.daemon.base.XXX) +	local config +	config=$(mktemp /tmp/perf.daemon.config.XXX) +	local base +	base=$(mktemp -d /tmp/perf.daemon.base.XXX)  	# prepare config  	cat <<EOF > ${config} @@ -332,8 +359,12 @@ EOF  	# start daemon  	daemon_start ${config} size -	local pid_size=`perf daemon --config ${config} -x: | head -2 | tail -1 | awk 'BEGIN { FS = ":" } ; { print $1 }'` -	local pid_time=`perf daemon --config ${config} -x: | head -3 | tail -1 | awk 'BEGIN { FS = ":" } ; { print $1 }'` +	local pid_size +	pid_size=`perf daemon --config ${config} -x: | head -2 | tail -1 | +		  awk 'BEGIN { FS = ":" } ; { print $1 }'` +	local pid_time +	pid_time=`perf daemon --config ${config} -x: | head -3 | tail -1 | +		  awk 'BEGIN { FS = ":" } ; { print $1 }'`  	# check that sessions are running  	if [ ! -d "/proc/${pid_size}" ]; then @@ -364,8 +395,10 @@ test_signal()  {  	echo "test daemon signal" -	local config=$(mktemp /tmp/perf.daemon.config.XXX) -	local base=$(mktemp -d /tmp/perf.daemon.base.XXX) +	local config +	config=$(mktemp /tmp/perf.daemon.config.XXX) +	local base +	base=$(mktemp -d /tmp/perf.daemon.base.XXX)  	# prepare config  	cat <<EOF > ${config} @@ -389,7 +422,7 @@ EOF  	daemon_exit ${config}  	# count is 2 perf.data for signals and 1 for perf record finished -	count=`ls ${base}/session-test/ | grep perf.data | wc -l` +	count=`ls ${base}/session-test/*perf.data* | wc -l`  	if [ ${count} -ne 3 ]; then  		error=1  		echo "FAILED: perf data no generated" @@ -403,8 +436,10 @@ test_ping()  {  	echo "test daemon ping" -	local config=$(mktemp /tmp/perf.daemon.config.XXX) -	local base=$(mktemp -d /tmp/perf.daemon.base.XXX) +	local config +	config=$(mktemp /tmp/perf.daemon.config.XXX) +	local base +	base=$(mktemp -d /tmp/perf.daemon.base.XXX)  	# prepare config  	cat <<EOF > ${config} @@ -426,7 +461,7 @@ EOF  	size=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`  	type=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'` -	if [ ${size} != "OK" -o ${type} != "OK" ]; then +	if [ ${size} != "OK" ] || [ ${type} != "OK" ]; then  		error=1  		echo "FAILED: daemon ping failed"  	fi @@ -442,8 +477,10 @@ test_lock()  {  	echo "test daemon lock" -	local config=$(mktemp /tmp/perf.daemon.config.XXX) -	local base=$(mktemp -d /tmp/perf.daemon.base.XXX) +	local config +	config=$(mktemp /tmp/perf.daemon.config.XXX) +	local base +	base=$(mktemp -d /tmp/perf.daemon.base.XXX)  	# prepare config  	cat <<EOF > ${config} diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py index 61f3059ca54b..ea55d5ea1ced 100644 --- a/tools/perf/tests/shell/lib/perf_json_output_lint.py +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py @@ -14,6 +14,7 @@ ap.add_argument('--system-wide', action='store_true')  ap.add_argument('--event', action='store_true')  ap.add_argument('--per-core', action='store_true')  ap.add_argument('--per-thread', action='store_true') +ap.add_argument('--per-cache', action='store_true')  ap.add_argument('--per-die', action='store_true')  ap.add_argument('--per-node', action='store_true')  ap.add_argument('--per-socket', action='store_true') @@ -47,12 +48,14 @@ def check_json_output(expected_items):        'counter-value': lambda x: is_counter_value(x),        'cgroup': lambda x: True,        'cpu': lambda x: isint(x), +      'cache': lambda x: True,        'die': lambda x: True,        'event': lambda x: True,        'event-runtime': lambda x: isfloat(x),        'interval': lambda x: isfloat(x),        'metric-unit': lambda x: True,        'metric-value': lambda x: isfloat(x), +      'metricgroup': lambda x: True,        'node': lambda x: True,        'pcnt-running': lambda x: isfloat(x),        'socket': lambda x: True, @@ -63,10 +66,12 @@ def check_json_output(expected_items):    for item in json.loads(input):      if expected_items != -1:        count = len(item) -      if count != expected_items and count >= 1 and count <= 4 and 'metric-value' in item: +      if count != expected_items and count >= 1 and count <= 6 and 'metric-value' in item:          # Events that generate >1 metric may have isolated metric -        # values and possibly other prefixes like interval, core and -        # aggregate-number. +        # values and possibly other prefixes like interval, core, +        # aggregate-number, or event-runtime/pcnt-running from multiplexing. +        pass +      elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:          pass        elif count != expected_items:          raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}' @@ -83,7 +88,7 @@ try:      expected_items = 7    elif args.interval or args.per_thread or args.system_wide_no_aggr:      expected_items = 8 -  elif args.per_core or args.per_socket or args.per_node or args.per_die: +  elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cache:      expected_items = 9    else:      # If no option is specified, don't check the number of items. diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py new file mode 100644 index 000000000000..50a34a9cc040 --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_metric_validation.py @@ -0,0 +1,574 @@ +#SPDX-License-Identifier: GPL-2.0 +import re +import csv +import json +import argparse +from pathlib import Path +import subprocess + +class Validator: +    def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''): +        self.rulefname = rulefname +        self.reportfname = reportfname +        self.rules = None +        self.collectlist:str = metrics +        self.metrics = self.__set_metrics(metrics) +        self.skiplist = set() +        self.tolerance = t + +        self.workloads = [x for x in workload.split(",") if x] +        self.wlidx = 0 # idx of current workloads +        self.allresults = dict() # metric results of all workload +        self.allignoremetrics = dict() # metrics with no results or negative results +        self.allfailtests = dict() +        self.alltotalcnt = dict() +        self.allpassedcnt = dict() +        self.allerrlist = dict() + +        self.results = dict() # metric results of current workload +        # vars for test pass/failure statistics +        self.ignoremetrics= set() # metrics with no results or negative results, neg result counts as a failed test +        self.failtests = dict() +        self.totalcnt = 0 +        self.passedcnt = 0 +        # vars for errors +        self.errlist = list() + +        # vars for Rule Generator +        self.pctgmetrics = set() # Percentage rule + +        # vars for debug +        self.datafname = datafname +        self.debug = debug +        self.fullrulefname = fullrulefname + +    def __set_metrics(self, metrics=''): +        if metrics != '': +            return set(metrics.split(",")) +        else: +            return set() + +    def read_json(self, filename: str) -> dict: +        try: +            with open(Path(filename).resolve(), "r") as f: +                data = json.loads(f.read()) +        except OSError as e: +            print(f"Error when reading file {e}") +            sys.exit() + +        return data + +    def json_dump(self, data, output_file): +        parent = Path(output_file).parent +        if not parent.exists(): +            parent.mkdir(parents=True) + +        with open(output_file, "w+") as output_file: +            json.dump(data, +                      output_file, +                      ensure_ascii=True, +                      indent=4) + +    def get_results(self, idx:int = 0): +        return self.results[idx] + +    def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list: +        """ +        Get bounds and tolerance from lb, ub, and error. +        If missing lb, use 0.0; missing ub, use float('inf); missing error, use self.tolerance. + +        @param lb: str/float, lower bound +        @param ub: str/float, upper bound +        @param error: float/str, error tolerance +        @returns: lower bound, return inf if the lower bound is a metric value and is not collected +                  upper bound, return -1 if the upper bound is a metric value and is not collected +                  tolerance, denormalized base on upper bound value +        """ +        # init ubv and lbv to invalid values +        def get_bound_value (bound, initval, ridx): +            val = initval +            if isinstance(bound, int) or isinstance(bound, float): +                val = bound +            elif isinstance(bound, str): +                if bound == '': +                    val = float("inf") +                elif bound in alias: +                    vall = self.get_value(alias[ub], ridx) +                    if vall: +                        val = vall[0] +                elif bound.replace('.', '1').isdigit(): +                    val = float(bound) +                else: +                    print("Wrong bound: {0}".format(bound)) +            else: +                print("Wrong bound: {0}".format(bound)) +            return val + +        ubv = get_bound_value(ub, -1, ridx) +        lbv = get_bound_value(lb, float('inf'), ridx) +        t = get_bound_value(error, self.tolerance, ridx) + +        # denormalize error threshold +        denormerr = t * ubv / 100 if ubv != 100 and ubv > 0 else t + +        return lbv, ubv, denormerr + +    def get_value(self, name:str, ridx:int = 0) -> list: +        """ +        Get value of the metric from self.results. +        If result of this metric is not provided, the metric name will be added into self.ignoremetics and self.errlist. +        All future test(s) on this metric will fail. + +        @param name: name of the metric +        @returns: list with value found in self.results; list is empty when value is not found. +        """ +        results = [] +        data = self.results[ridx] if ridx in self.results else self.results[0] +        if name not in self.ignoremetrics: +            if name in data: +                results.append(data[name]) +            elif name.replace('.', '1').isdigit(): +                results.append(float(name)) +            else: +                self.ignoremetrics.add(name) +        return results + +    def check_bound(self, val, lb, ub, err): +        return True if val <= ub + err and val >= lb - err else False + +    # Positive Value Sanity check +    def pos_val_test(self): +        """ +        Check if metrics value are non-negative. +        One metric is counted as one test. +        Failure: when metric value is negative or not provided. +        Metrics with negative value will be added into the self.failtests['PositiveValueTest'] and self.ignoremetrics. +        """ +        negmetric = dict() +        pcnt = 0 +        tcnt = 0 +        rerun = list() +        for name, val in self.get_results().items(): +            if val < 0: +                negmetric[name] = val +                rerun.append(name) +            else: +                pcnt += 1 +            tcnt += 1 +        if len(rerun) > 0 and len(rerun) < 20: +            second_results = dict() +            self.second_test(rerun, second_results) +            for name, val in second_results.items(): +                if name not in negmetric: continue +                if val >= 0: +                    del negmetric[name] +                    pcnt += 1 + +        self.failtests['PositiveValueTest']['Total Tests'] = tcnt +        self.failtests['PositiveValueTest']['Passed Tests'] = pcnt +        if len(negmetric.keys()): +            self.ignoremetrics.update(negmetric.keys()) +            negmessage = ["{0}(={1:.4f})".format(name, val) for name, val in negmetric.items()] +            self.failtests['PositiveValueTest']['Failed Tests'].append({'NegativeValue': negmessage}) + +        return + +    def evaluate_formula(self, formula:str, alias:dict, ridx:int = 0): +        """ +        Evaluate the value of formula. + +        @param formula: the formula to be evaluated +        @param alias: the dict has alias to metric name mapping +        @returns: value of the formula is success; -1 if the one or more metric value not provided +        """ +        stack = [] +        b = 0 +        errs = [] +        sign = "+" +        f = str() + +        #TODO: support parenthesis? +        for i in range(len(formula)): +            if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'): +                s = alias[formula[b:i]] if i+1 < len(formula) else alias[formula[b:]] +                v = self.get_value(s, ridx) +                if not v: +                    errs.append(s) +                else: +                    f = f + "{0}(={1:.4f})".format(s, v[0]) +                    if sign == "*": +                        stack[-1] = stack[-1] * v +                    elif sign == "/": +                        stack[-1] = stack[-1] / v +                    elif sign == '-': +                        stack.append(-v[0]) +                    else: +                        stack.append(v[0]) +                if i + 1 < len(formula): +                    sign = formula[i] +                    f += sign +                    b = i + 1 + +        if len(errs) > 0: +            return -1, "Metric value missing: "+','.join(errs) + +        val = sum(stack) +        return val, f + +    # Relationships Tests +    def relationship_test(self, rule: dict): +        """ +        Validate if the metrics follow the required relationship in the rule. +        eg. lower_bound <= eval(formula)<= upper_bound +        One rule is counted as ont test. +        Failure: when one or more metric result(s) not provided, or when formula evaluated outside of upper/lower bounds. + +        @param rule: dict with metric name(+alias), formula, and required upper and lower bounds. +        """ +        alias = dict() +        for m in rule['Metrics']: +            alias[m['Alias']] = m['Name'] +        lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex']) +        val, f = self.evaluate_formula(rule['Formula'], alias, ridx=rule['RuleIndex']) +        if val == -1: +            self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Description':f}) +        elif not self.check_bound(val, lbv, ubv, t): +            lb = rule['RangeLower'] +            ub = rule['RangeUpper'] +            if isinstance(lb, str): +                if lb in alias: +                    lb = alias[lb] +            if isinstance(ub, str): +                if ub in alias: +                    ub = alias[ub] +            self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Formula':f, +                                                                       'RangeLower': lb, 'LowerBoundValue': self.get_value(lb), +                                                                       'RangeUpper': ub, 'UpperBoundValue':self.get_value(ub), +                                                                       'ErrorThreshold': t, 'CollectedValue': val}) +        else: +            self.passedcnt += 1 +            self.failtests['RelationshipTest']['Passed Tests'] += 1 +        self.totalcnt += 1 +        self.failtests['RelationshipTest']['Total Tests'] += 1 + +        return + + +    # Single Metric Test +    def single_test(self, rule:dict): +        """ +        Validate if the metrics are in the required value range. +        eg. lower_bound <= metrics_value <= upper_bound +        One metric is counted as one test in this type of test. +        One rule may include one or more metrics. +        Failure: when the metric value not provided or the value is outside the bounds. +        This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest']. + +        @param rule: dict with metrics to validate and the value range requirement +        """ +        lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold']) +        metrics = rule['Metrics'] +        passcnt = 0 +        totalcnt = 0 +        faillist = list() +        failures = dict() +        rerun = list() +        for m in metrics: +            totalcnt += 1 +            result = self.get_value(m['Name']) +            if len(result) > 0 and self.check_bound(result[0], lbv, ubv, t) or m['Name'] in self.skiplist: +                passcnt += 1 +            else: +                failures[m['Name']] = result +                rerun.append(m['Name']) + +        if len(rerun) > 0 and len(rerun) < 20: +            second_results = dict() +            self.second_test(rerun, second_results) +            for name, val in second_results.items(): +                if name not in failures: continue +                if self.check_bound(val, lbv, ubv, t): +                    passcnt += 1 +                    del failures[name] +                else: +                    failures[name] = val +                    self.results[0][name] = val + +        self.totalcnt += totalcnt +        self.passedcnt += passcnt +        self.failtests['SingleMetricTest']['Total Tests'] += totalcnt +        self.failtests['SingleMetricTest']['Passed Tests'] += passcnt +        if len(failures.keys()) != 0: +            faillist = [{'MetricName':name, 'CollectedValue':val} for name, val in failures.items()] +            self.failtests['SingleMetricTest']['Failed Tests'].append({'RuleIndex':rule['RuleIndex'], +                                                                       'RangeLower': rule['RangeLower'], +                                                                       'RangeUpper': rule['RangeUpper'], +                                                                       'ErrorThreshold':rule['ErrorThreshold'], +                                                                       'Failure':faillist}) + +        return + +    def create_report(self): +        """ +        Create final report and write into a JSON file. +        """ +        alldata = list() +        for i in range(0, len(self.workloads)): +            reportstas = {"Total Rule Count": self.alltotalcnt[i], "Passed Rule Count": self.allpassedcnt[i]} +            data = {"Metric Validation Statistics": reportstas, "Tests in Category": self.allfailtests[i], +                    "Errors":self.allerrlist[i]} +            alldata.append({"Workload": self.workloads[i], "Report": data}) + +        json_str = json.dumps(alldata, indent=4) +        print("Test validation finished. Final report: ") +        print(json_str) + +        if self.debug: +            allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} for i in range(0, len(self.workloads))] +            self.json_dump(allres, self.datafname) + +    def check_rule(self, testtype, metric_list): +        """ +        Check if the rule uses metric(s) that not exist in current platform. + +        @param metric_list: list of metrics from the rule. +        @return: False when find one metric out in Metric file. (This rule should not skipped.) +                 True when all metrics used in the rule are found in Metric file. +        """ +        if testtype == "RelationshipTest": +            for m in metric_list: +                if m['Name'] not in self.metrics: +                    return False +        return True + +    # Start of Collector and Converter +    def convert(self, data: list, metricvalues:dict): +        """ +        Convert collected metric data from the -j output to dict of {metric_name:value}. +        """ +        for json_string in data: +            try: +                result =json.loads(json_string) +                if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "": +                    name = result["metric-unit"].split("  ")[1] if len(result["metric-unit"].split("  ")) > 1 \ +                        else result["metric-unit"] +                    metricvalues[name.lower()] = float(result["metric-value"]) +            except ValueError as error: +                continue +        return + +    def _run_perf(self, metric, workload: str): +        tool = 'perf' +        command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"] +        wl = workload.split() +        command.extend(wl) +        print(" ".join(command)) +        cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8') +        data = [x+'}' for x in cmd.stderr.split('}\n') if x] +        return data + + +    def collect_perf(self, workload: str): +        """ +        Collect metric data with "perf stat -M" on given workload with -a and -j. +        """ +        self.results = dict() +        print(f"Starting perf collection") +        print(f"Long workload: {workload}") +        collectlist = dict() +        if self.collectlist != "": +            collectlist[0] = {x for x in self.collectlist.split(",")} +        else: +            collectlist[0] = set(list(self.metrics)) +        # Create metric set for relationship rules +        for rule in self.rules: +            if rule["TestType"] == "RelationshipTest": +                metrics = [m["Name"] for m in rule["Metrics"]] +                if not any(m not in collectlist[0] for m in metrics): +                    collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))] + +        for idx, metrics in collectlist.items(): +            if idx == 0: wl = "true" +            else: wl = workload +            for metric in metrics: +                data = self._run_perf(metric, wl) +                if idx not in self.results: self.results[idx] = dict() +                self.convert(data, self.results[idx]) +        return + +    def second_test(self, collectlist, second_results): +        workload = self.workloads[self.wlidx] +        for metric in collectlist: +            data = self._run_perf(metric, workload) +            self.convert(data, second_results) + +    # End of Collector and Converter + +    # Start of Rule Generator +    def parse_perf_metrics(self): +        """ +        Read and parse perf metric file: +        1) find metrics with '1%' or '100%' as ScaleUnit for Percent check +        2) create metric name list +        """ +        command = ['perf', 'list', '-j', '--details', 'metrics'] +        cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') +        try: +            data = json.loads(cmd.stdout) +            for m in data: +                if 'MetricName' not in m: +                    print("Warning: no metric name") +                    continue +                name = m['MetricName'].lower() +                self.metrics.add(name) +                if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'): +                    self.pctgmetrics.add(name.lower()) +        except ValueError as error: +            print(f"Error when parsing metric data") +            sys.exit() + +        return + +    def remove_unsupported_rules(self, rules): +        new_rules = [] +        for rule in rules: +            add_rule = True +            for m in rule["Metrics"]: +                if m["Name"] in self.skiplist or m["Name"] not in self.metrics: +                    add_rule = False +                    break +            if add_rule: +                new_rules.append(rule) +        return new_rules + +    def create_rules(self): +        """ +        Create full rules which includes: +        1) All the rules from the "relationshi_rules" file +        2) SingleMetric rule for all the 'percent' metrics + +        Reindex all the rules to avoid repeated RuleIndex +        """ +        data = self.read_json(self.rulefname) +        rules = data['RelationshipRules'] +        self.skiplist = set([name.lower() for name in data['SkipList']]) +        self.rules = self.remove_unsupported_rules(rules) +        pctgrule = {'RuleIndex':0, +                    'TestType':'SingleMetricTest', +                    'RangeLower':'0', +                    'RangeUpper': '100', +                    'ErrorThreshold': self.tolerance, +                    'Description':'Metrics in percent unit have value with in [0, 100]', +                    'Metrics': [{'Name': m.lower()} for m in self.pctgmetrics]} +        self.rules.append(pctgrule) + +        # Re-index all rules to avoid repeated RuleIndex +        idx = 1 +        for r in self.rules: +            r['RuleIndex'] = idx +            idx += 1 + +        if self.debug: +            #TODO: need to test and generate file name correctly +            data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self.metrics]} +            self.json_dump(data, self.fullrulefname) + +        return +    # End of Rule Generator + +    def _storewldata(self, key): +        ''' +        Store all the data of one workload into the corresponding data structure for all workloads. +        @param key: key to the dictionaries (index of self.workloads). +        ''' +        self.allresults[key] = self.results +        self.allignoremetrics[key] = self.ignoremetrics +        self.allfailtests[key] = self.failtests +        self.alltotalcnt[key] = self.totalcnt +        self.allpassedcnt[key] = self.passedcnt +        self.allerrlist[key] = self.errlist + +    #Initialize data structures before data validation of each workload +    def _init_data(self): + +        testtypes = ['PositiveValueTest', 'RelationshipTest', 'SingleMetricTest'] +        self.results = dict() +        self.ignoremetrics= set() +        self.errlist = list() +        self.failtests = {k:{'Total Tests':0, 'Passed Tests':0, 'Failed Tests':[]} for k in testtypes} +        self.totalcnt = 0 +        self.passedcnt = 0 + +    def test(self): +        ''' +        The real entry point of the test framework. +        This function loads the validation rule JSON file and Standard Metric file to create rules for +        testing and namemap dictionaries. +        It also reads in result JSON file for testing. + +        In the test process, it passes through each rule and launch correct test function bases on the +        'TestType' field of the rule. + +        The final report is written into a JSON file. +        ''' +        if not self.collectlist: +            self.parse_perf_metrics() +        self.create_rules() +        for i in range(0, len(self.workloads)): +            self.wlidx = i +            self._init_data() +            self.collect_perf(self.workloads[i]) +            # Run positive value test +            self.pos_val_test() +            for r in self.rules: +                # skip rules that uses metrics not exist in this platform +                testtype = r['TestType'] +                if not self.check_rule(testtype, r['Metrics']): +                    continue +                if  testtype == 'RelationshipTest': +                    self.relationship_test(r) +                elif testtype == 'SingleMetricTest': +                    self.single_test(r) +                else: +                    print("Unsupported Test Type: ", testtype) +                    self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex']) +            self._storewldata(i) +            print("Workload: ", self.workloads[i]) +            print("Total metrics collected: ", self.failtests['PositiveValueTest']['Total Tests']) +            print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests']) +            print("Total Test Count: ", self.totalcnt) +            print("Passed Test Count: ", self.passedcnt) + +        self.create_report() +        return sum(self.alltotalcnt.values()) != sum(self.allpassedcnt.values()) +# End of Class Validator + + +def main() -> None: +    parser = argparse.ArgumentParser(description="Launch metric value validation") + +    parser.add_argument("-rule", help="Base validation rule file", required=True) +    parser.add_argument("-output_dir", help="Path for validator output file, report file", required=True) +    parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_true", default=False) +    parser.add_argument("-wl", help="Workload to run while data collection", default="true") +    parser.add_argument("-m", help="Metric list to validate", default="") +    args = parser.parse_args() +    outpath = Path(args.output_dir) +    reportf = Path.joinpath(outpath, 'perf_report.json') +    fullrule = Path.joinpath(outpath, 'full_rule.json') +    datafile = Path.joinpath(outpath, 'perf_data.json') + +    validator = Validator(args.rule, reportf, debug=args.debug, +                        datafname=datafile, fullrulefname=fullrule, workload=args.wl, +                        metrics=args.m) +    ret = validator.test() + +    return ret + + +if __name__ == "__main__": +    import sys +    sys.exit(main()) + + + diff --git a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json new file mode 100644 index 000000000000..eb6f59e018b7 --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json @@ -0,0 +1,398 @@ +{ +    "SkipList": [ +        "tsx_aborted_cycles", +        "tsx_transactional_cycles", +        "C2_Pkg_Residency", +        "C6_Pkg_Residency", +        "C1_Core_Residency", +        "C6_Core_Residency", +        "tma_false_sharing", +        "tma_remote_cache", +        "tma_contested_accesses" +    ], +    "RelationshipRules": [ +        { +            "RuleIndex": 1, +            "Formula": "a+b", +            "TestType": "RelationshipTest", +            "RangeLower": "c", +            "RangeUpper": "c", +            "ErrorThreshold": 5.0, +            "Description": "Intel(R) Optane(TM) Persistent Memory(PMEM)  bandwidth total includes Intel(R) Optane(TM) Persistent Memory(PMEM) read bandwidth and Intel(R) Optane(TM) Persistent Memory(PMEM) write bandwidth", +            "Metrics": [ +                { +                    "Name": "pmem_memory_bandwidth_read", +                    "Alias": "a" +                }, +                { +                    "Name": "pmem_memory_bandwidth_write", +                    "Alias": "b" +                }, +                { +                    "Name": "pmem_memory_bandwidth_total", +                    "Alias": "c" +                } +            ] +        }, +        { +            "RuleIndex": 2, +            "Formula": "a+b", +            "TestType": "RelationshipTest", +            "RangeLower": "c", +            "RangeUpper": "c", +            "ErrorThreshold": 5.0, +            "Description": "DDR memory bandwidth total includes DDR memory read bandwidth and DDR memory write bandwidth", +            "Metrics": [ +                { +                    "Name": "memory_bandwidth_read", +                    "Alias": "a" +                }, +                { +                    "Name": "memory_bandwidth_write", +                    "Alias": "b" +                }, +                { +                    "Name": "memory_bandwidth_total", +                    "Alias": "c" +                } +            ] +        }, +        { +            "RuleIndex": 3, +            "Formula": "a+b", +            "TestType": "RelationshipTest", +            "RangeLower": "100", +            "RangeUpper": "100", +            "ErrorThreshold": 5.0, +            "Description": "Total memory read accesses includes memory reads from last level cache (LLC) addressed to local DRAM and memory reads from the last level cache (LLC) addressed to remote DRAM.", +            "Metrics": [ +                { +                    "Name": "numa_reads_addressed_to_local_dram", +                    "Alias": "a" +                }, +                { +                    "Name": "numa_reads_addressed_to_remote_dram", +                    "Alias": "b" +                } +            ] +        }, +        { +            "RuleIndex": 4, +            "Formula": "a", +            "TestType": "SingleMetricTest", +            "RangeLower": "0.125", +            "RangeUpper": "", +            "ErrorThreshold": "", +            "Description": "", +            "Metrics": [ +                { +                    "Name": "cpi", +                    "Alias": "a" +                } +            ] +        }, +        { +            "RuleIndex": 5, +            "Formula": "", +            "TestType": "SingleMetricTest", +            "RangeLower": "0", +            "RangeUpper": "1", +            "ErrorThreshold": 5.0, +            "Description": "Ratio values should be within value range [0,1)", +            "Metrics": [ +                { +                    "Name": "loads_per_instr", +                    "Alias": "" +                }, +                { +                    "Name": "stores_per_instr", +                    "Alias": "" +                }, +                { +                    "Name": "l1d_mpi", +                    "Alias": "" +                }, +                { +                    "Name": "l1d_demand_data_read_hits_per_instr", +                    "Alias": "" +                }, +                { +                    "Name": "l1_i_code_read_misses_with_prefetches_per_instr", +                    "Alias": "" +                }, +                { +                    "Name": "l2_demand_data_read_hits_per_instr", +                    "Alias": "" +                }, +                { +                    "Name": "l2_mpi", +                    "Alias": "" +                }, +                { +                    "Name": "l2_demand_data_read_mpi", +                    "Alias": "" +                }, +                { +                    "Name": "l2_demand_code_mpi", +                    "Alias": "" +                } +            ] +        }, +        { +            "RuleIndex": 6, +            "Formula": "a+b+c+d", +            "TestType": "RelationshipTest", +            "RangeLower": "100", +            "RangeUpper": "100", +            "ErrorThreshold": 5.0, +            "Description": "Sum of TMA level 1 metrics should be 100%", +            "Metrics": [ +                { +                    "Name": "tma_frontend_bound", +                    "Alias": "a" +                }, +                { +                    "Name": "tma_bad_speculation", +                    "Alias": "b" +                }, +                { +                    "Name": "tma_backend_bound", +                    "Alias": "c" +                }, +                { +                    "Name": "tma_retiring", +                    "Alias": "d" +                } +            ] +        }, +        { +            "RuleIndex": 7, +            "Formula": "a+b", +            "TestType": "RelationshipTest", +            "RangeLower": "c", +            "RangeUpper": "c", +            "ErrorThreshold": 5.0, +            "Description": "Sum of the level 2 children should equal level 1 parent", +            "Metrics": [ +                { +                    "Name": "tma_fetch_latency", +                    "Alias": "a" +                }, +                { +                    "Name": "tma_fetch_bandwidth", +                    "Alias": "b" +                }, +                { +                    "Name": "tma_frontend_bound", +                    "Alias": "c" +                } +            ] +        }, +        { +            "RuleIndex": 8, +            "Formula": "a+b", +            "TestType": "RelationshipTest", +            "RangeLower": "c", +            "RangeUpper": "c", +            "ErrorThreshold": 5.0, +            "Description": "Sum of the level 2 children should equal level 1 parent", +            "Metrics": [ +                { +                    "Name": "tma_branch_mispredicts", +                    "Alias": "a" +                }, +                { +                    "Name": "tma_machine_clears", +                    "Alias": "b" +                }, +                { +                    "Name": "tma_bad_speculation", +                    "Alias": "c" +                } +            ] +        }, +        { +            "RuleIndex": 9, +            "Formula": "a+b", +            "TestType": "RelationshipTest", +            "RangeLower": "c", +            "RangeUpper": "c", +            "ErrorThreshold": 5.0, +            "Description": "Sum of the level 2 children should equal level 1 parent", +            "Metrics": [ +                { +                    "Name": "tma_memory_bound", +                    "Alias": "a" +                }, +                { +                    "Name": "tma_core_bound", +                    "Alias": "b" +                }, +                { +                    "Name": "tma_backend_bound", +                    "Alias": "c" +                } +            ] +        }, +        { +            "RuleIndex": 10, +            "Formula": "a+b", +            "TestType": "RelationshipTest", +            "RangeLower": "c", +            "RangeUpper": "c", +            "ErrorThreshold": 5.0, +            "Description": "Sum of the level 2 children should equal level 1 parent", +            "Metrics": [ +                { +                    "Name": "tma_light_operations", +                    "Alias": "a" +                }, +                { +                    "Name": "tma_heavy_operations", +                    "Alias": "b" +                }, +                { +                    "Name": "tma_retiring", +                    "Alias": "c" +                } +            ] +        }, +        { +            "RuleIndex": 11, +            "Formula": "a+b+c", +            "TestType": "RelationshipTest", +            "RangeLower": "100", +            "RangeUpper": "100", +            "ErrorThreshold": 5.0, +            "Description": "The all_requests includes the memory_page_empty, memory_page_misses, and memory_page_hits equals.", +            "Metrics": [ +                { +                    "Name": "memory_page_empty_vs_all_requests", +                    "Alias": "a" +                }, +                { +                    "Name": "memory_page_misses_vs_all_requests", +                    "Alias": "b" +                }, +                { +                    "Name": "memory_page_hits_vs_all_requests", +                    "Alias": "c" +                } +            ] +        }, +        { +            "RuleIndex": 12, +            "Formula": "a-b", +            "TestType": "RelationshipTest", +            "RangeLower": "0", +            "RangeUpper": "", +            "ErrorThreshold": 5.0, +            "Description": "CPU utilization in kernel mode should always be <= cpu utilization", +            "Metrics": [ +                { +                    "Name": "cpu_utilization", +                    "Alias": "a" +                }, +                { +                    "Name": "cpu_utilization_in_kernel_mode", +                    "Alias": "b" +                } +            ] +        }, +        { +            "RuleIndex": 13, +            "Formula": "a-b", +            "TestType": "RelationshipTest", +            "RangeLower": "0", +            "RangeUpper": "", +            "ErrorThreshold": 5.0, +            "Description": "Total L2 misses per instruction should be >= L2 demand data read misses per instruction", +            "Metrics": [ +                { +                    "Name": "l2_mpi", +                    "Alias": "a" +                }, +                { +                    "Name": "l2_demand_data_read_mpi", +                    "Alias": "b" +                } +            ] +        }, +        { +            "RuleIndex": 14, +            "Formula": "a-b", +            "TestType": "RelationshipTest", +            "RangeLower": "0", +            "RangeUpper": "", +            "ErrorThreshold": 5.0, +            "Description": "Total L2 misses per instruction should be >= L2 demand code misses per instruction", +            "Metrics": [ +                { +                    "Name": "l2_mpi", +                    "Alias": "a" +                }, +                { +                    "Name": "l2_demand_code_mpi", +                    "Alias": "b" +                } +            ] +        }, +        { +            "RuleIndex": 15, +            "Formula": "b+c+d", +            "TestType": "RelationshipTest", +            "RangeLower": "a", +            "RangeUpper": "a", +            "ErrorThreshold": 5.0, +            "Description": "L3 data read, rfo, code misses per instruction equals total L3 misses per instruction.", +            "Metrics": [ +                { +                    "Name": "llc_mpi", +                    "Alias": "a" +                }, +                { +                    "Name": "llc_data_read_mpi_demand_plus_prefetch", +                    "Alias": "b" +                }, +                { +                    "Name": "llc_rfo_read_mpi_demand_plus_prefetch", +                    "Alias": "c" +                }, +                { +                    "Name": "llc_code_read_mpi_demand_plus_prefetch", +                    "Alias": "d" +                } +            ] +        }, +        { +            "RuleIndex": 16, +            "Formula": "a", +            "TestType": "SingleMetricTest", +            "RangeLower": "0", +            "RangeUpper": "8", +            "ErrorThreshold": 0.0, +            "Description": "Setting generous range for allowable frequencies", +            "Metrics": [ +                { +                    "Name": "uncore_freq", +                    "Alias": "a" +                } +            ] +        }, +        { +            "RuleIndex": 17, +            "Formula": "a", +            "TestType": "SingleMetricTest", +            "RangeLower": "0", +            "RangeUpper": "8", +            "ErrorThreshold": 0.0, +            "Description": "Setting generous range for allowable frequencies", +            "Metrics": [ +                { +                    "Name": "cpu_operating_frequency", +                    "Alias": "a" +                } +            ] +        } +    ] +}
\ No newline at end of file diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh new file mode 100644 index 000000000000..698343f0ecf9 --- /dev/null +++ b/tools/perf/tests/shell/lib/stat_output.sh @@ -0,0 +1,169 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Return true if perf_event_paranoid is > $1 and not running as root. +function ParanoidAndNotRoot() +{ +	 [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ] +} + +# $1 name $2 extra_opt +check_no_args() +{ +        echo -n "Checking $1 output: no args " +        perf stat $2 true +        commachecker --no-args +        echo "[Success]" +} + +check_system_wide() +{ +	echo -n "Checking $1 output: system wide " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat -a $2 true +	commachecker --system-wide +	echo "[Success]" +} + +check_system_wide_no_aggr() +{ +	echo -n "Checking $1 output: system wide no aggregation " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat -A -a --no-merge $2 true +	commachecker --system-wide-no-aggr +	echo "[Success]" +} + +check_interval() +{ +	echo -n "Checking $1 output: interval " +	perf stat -I 1000 $2 true +	commachecker --interval +	echo "[Success]" +} + +check_event() +{ +	echo -n "Checking $1 output: event " +	perf stat -e cpu-clock $2 true +	commachecker --event +	echo "[Success]" +} + +check_per_core() +{ +	echo -n "Checking $1 output: per core " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat --per-core -a $2 true +	commachecker --per-core +	echo "[Success]" +} + +check_per_thread() +{ +	echo -n "Checking $1 output: per thread " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat --per-thread -a $2 true +	commachecker --per-thread +	echo "[Success]" +} + +check_per_cache_instance() +{ +	echo -n "Checking $1 output: per cache instance " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat --per-cache -a $2 true +	commachecker --per-cache +	echo "[Success]" +} + +check_per_die() +{ +	echo -n "Checking $1 output: per die " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat --per-die -a $2 true +	commachecker --per-die +	echo "[Success]" +} + +check_per_node() +{ +	echo -n "Checking $1 output: per node " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat --per-node -a $2 true +	commachecker --per-node +	echo "[Success]" +} + +check_per_socket() +{ +	echo -n "Checking $1 output: per socket " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoid and not root" +		return +	fi +	perf stat --per-socket -a $2 true +	commachecker --per-socket +	echo "[Success]" +} + +# The perf stat options for per-socket, per-core, per-die +# and -A ( no_aggr mode ) uses the info fetched from this +# directory: "/sys/devices/system/cpu/cpu*/topology". For +# example, socket value is fetched from "physical_package_id" +# file in topology directory. +# Reference: cpu__get_topology_int in util/cpumap.c +# If the platform doesn't expose topology information, values +# will be set to -1. For example, incase of pSeries platform +# of powerpc, value for  "physical_package_id" is restricted +# and set to -1. Check here validates the socket-id read from +# topology file before proceeding further + +FILE_LOC="/sys/devices/system/cpu/cpu*/topology/" +FILE_NAME="physical_package_id" + +function check_for_topology() +{ +	if ! ParanoidAndNotRoot 0 +	then +		socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1` +		[ -z $socket_file ] && { +			echo 0 +			return +		} +		socket_id=`cat $socket_file` +		[ $socket_id == -1 ] && { +			echo 1 +			return +		} +	fi +	echo 0 +} diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh index be5fcafb26aa..f2cc187b6186 100755 --- a/tools/perf/tests/shell/lock_contention.sh +++ b/tools/perf/tests/shell/lock_contention.sh @@ -11,14 +11,14 @@ result=$(mktemp /tmp/__perf_test.result.XXXXX)  cleanup() {  	rm -f ${perfdata}  	rm -f ${result} -	trap - exit term int +	trap - EXIT TERM INT  }  trap_cleanup() {  	cleanup  	exit ${err}  } -trap trap_cleanup exit term int +trap trap_cleanup EXIT TERM INT  check() {  	if [ `id -u` != 0 ]; then @@ -40,8 +40,8 @@ test_record()  	perf lock record -o ${perfdata} -- perf bench sched messaging > /dev/null 2>&1  	# the output goes to the stderr and we expect only 1 output (-E 1)  	perf lock contention -i ${perfdata} -E 1 -q 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l) +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"  		err=1  		exit  	fi @@ -58,8 +58,8 @@ test_bpf()  	# the perf lock contention output goes to the stderr  	perf lock con -a -b -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] BPF result count is not 1:" $(cat "${result}" | wc -l) +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"  		err=1  		exit  	fi @@ -70,8 +70,8 @@ test_record_concurrent()  	echo "Testing perf lock record and perf lock contention at the same time"  	perf lock record -o- -- perf bench sched messaging 2> /dev/null | \  	perf lock contention -i- -E 1 -q 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l) +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"  		err=1  		exit  	fi @@ -81,8 +81,8 @@ test_aggr_task()  {  	echo "Testing perf lock contention --threads"  	perf lock contention -i ${perfdata} -t -E 1 -q 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l) +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"  		err=1  		exit  	fi @@ -93,8 +93,8 @@ test_aggr_task()  	# the perf lock contention output goes to the stderr  	perf lock con -a -b -t -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] BPF result count is not 1:" $(cat "${result}" | wc -l) +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"  		err=1  		exit  	fi @@ -104,8 +104,8 @@ test_aggr_addr()  {  	echo "Testing perf lock contention --lock-addr"  	perf lock contention -i ${perfdata} -l -E 1 -q 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l) +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"  		err=1  		exit  	fi @@ -116,8 +116,8 @@ test_aggr_addr()  	# the perf lock contention output goes to the stderr  	perf lock con -a -b -l -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] BPF result count is not 1:" $(cat "${result}" | wc -l) +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"  		err=1  		exit  	fi @@ -127,8 +127,8 @@ test_type_filter()  {  	echo "Testing perf lock contention --type-filter (w/ spinlock)"  	perf lock contention -i ${perfdata} -Y spinlock -q 2> ${result} -	if [ $(grep -c -v spinlock "${result}") != "0" ]; then -		echo "[Fail] Recorded result should not have non-spinlocks:" $(cat "${result}") +	if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then +		echo "[Fail] Recorded result should not have non-spinlocks:" "$(cat "${result}")"  		err=1  		exit  	fi @@ -138,8 +138,8 @@ test_type_filter()  	fi  	perf lock con -a -b -Y spinlock -q -- perf bench sched messaging > /dev/null 2> ${result} -	if [ $(grep -c -v spinlock "${result}") != "0" ]; then -		echo "[Fail] BPF result should not have non-spinlocks:" $(cat "${result}") +	if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then +		echo "[Fail] BPF result should not have non-spinlocks:" "$(cat "${result}")"  		err=1  		exit  	fi @@ -149,7 +149,7 @@ test_lock_filter()  {  	echo "Testing perf lock contention --lock-filter (w/ tasklist_lock)"  	perf lock contention -i ${perfdata} -l -q 2> ${result} -	if [ $(grep -c tasklist_lock "${result}") != "1" ]; then +	if [ "$(grep -c tasklist_lock "${result}")" != "1" ]; then  		echo "[Skip] Could not find 'tasklist_lock'"  		return  	fi @@ -159,8 +159,8 @@ test_lock_filter()  	# find out the type of tasklist_lock  	local type=$(head -1 "${result}" | awk '{ print $8 }' | sed -e 's/:.*//') -	if [ $(grep -c -v "${type}" "${result}") != "0" ]; then -		echo "[Fail] Recorded result should not have non-${type} locks:" $(cat "${result}") +	if [ "$(grep -c -v "${type}" "${result}")" != "0" ]; then +		echo "[Fail] Recorded result should not have non-${type} locks:" "$(cat "${result}")"  		err=1  		exit  	fi @@ -170,8 +170,8 @@ test_lock_filter()  	fi  	perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging > /dev/null 2> ${result} -	if [ $(grep -c -v "${type}" "${result}") != "0" ]; then -		echo "[Fail] BPF result should not have non-${type} locks:" $(cat "${result}") +	if [ "$(grep -c -v "${type}" "${result}")" != "0" ]; then +		echo "[Fail] BPF result should not have non-${type} locks:" "$(cat "${result}")"  		err=1  		exit  	fi @@ -181,14 +181,14 @@ test_stack_filter()  {  	echo "Testing perf lock contention --callstack-filter (w/ unix_stream)"  	perf lock contention -i ${perfdata} -v -q 2> ${result} -	if [ $(grep -c unix_stream "${result}") == "0" ]; then +	if [ "$(grep -c unix_stream "${result}")" = "0" ]; then  		echo "[Skip] Could not find 'unix_stream'"  		return  	fi  	perf lock contention -i ${perfdata} -E 1 -S unix_stream -q 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] Recorded result should have a lock from unix_stream:" $(cat "${result}") +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] Recorded result should have a lock from unix_stream:" "$(cat "${result}")"  		err=1  		exit  	fi @@ -198,8 +198,8 @@ test_stack_filter()  	fi  	perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] BPF result should have a lock from unix_stream:" $(cat "${result}") +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] BPF result should have a lock from unix_stream:" "$(cat "${result}")"  		err=1  		exit  	fi @@ -209,14 +209,14 @@ test_aggr_task_stack_filter()  {  	echo "Testing perf lock contention --callstack-filter with task aggregation"  	perf lock contention -i ${perfdata} -v -q 2> ${result} -	if [ $(grep -c unix_stream "${result}") == "0" ]; then +	if [ "$(grep -c unix_stream "${result}")" = "0" ]; then  		echo "[Skip] Could not find 'unix_stream'"  		return  	fi  	perf lock contention -i ${perfdata} -t -E 1 -S unix_stream -q 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] Recorded result should have a task from unix_stream:" $(cat "${result}") +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] Recorded result should have a task from unix_stream:" "$(cat "${result}")"  		err=1  		exit  	fi @@ -226,8 +226,8 @@ test_aggr_task_stack_filter()  	fi  	perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result} -	if [ $(cat "${result}" | wc -l) != "1" ]; then -		echo "[Fail] BPF result should have a task from unix_stream:" $(cat "${result}") +	if [ "$(cat "${result}" | wc -l)" != "1" ]; then +		echo "[Fail] BPF result should have a task from unix_stream:" "$(cat "${result}")"  		err=1  		exit  	fi diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index bbb5b3d185fa..89214a6d9951 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -10,11 +10,11 @@  # SPDX-License-Identifier: GPL-2.0  # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017 -. $(dirname $0)/lib/probe.sh -. $(dirname $0)/lib/probe_vfs_getname.sh +. "$(dirname "$0")/lib/probe.sh" +. "$(dirname "$0")/lib/probe_vfs_getname.sh"  libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g') -nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254 +nm -Dg $libc 2>/dev/null | grep -F -q inet_pton || exit 254  event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?' @@ -23,7 +23,7 @@ add_libc_inet_pton_event() {  	event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \  			grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))") -	if [ $? -ne 0 -o -z "$event_name" ] ; then +	if [ $? -ne 0 ] || [ -z "$event_name" ] ; then  		printf "FAIL: could not add event\n"  		return 1  	fi @@ -94,7 +94,7 @@ delete_libc_inet_pton_event() {  }  # Check for IPv6 interface existence -ip a sh lo | fgrep -q inet6 || exit 2 +ip a sh lo | grep -F -q inet6 || exit 2  skip_if_no_perf_probe && \  add_libc_inet_pton_event && \ diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh index 1341437e1bd9..7f664f1889d9 100755 --- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh +++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh @@ -9,11 +9,11 @@  # SPDX-License-Identifier: GPL-2.0  # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017 -. $(dirname $0)/lib/probe.sh +. "$(dirname "$0")/lib/probe.sh"  skip_if_no_perf_probe || exit 2 -. $(dirname $0)/lib/probe_vfs_getname.sh +. "$(dirname "$0")/lib/probe_vfs_getname.sh"  record_open_file() {  	echo "Recording open file:" diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh index fb78b6251a4e..34a0701fee05 100755 --- a/tools/perf/tests/shell/stat+csv_output.sh +++ b/tools/perf/tests/shell/stat+csv_output.sh @@ -6,7 +6,8 @@  set -e -skip_test=0 +. $(dirname $0)/lib/stat_output.sh +  csv_sep=@  stat_output=$(mktemp /tmp/__perf_test.stat_output.csv.XXXXX) @@ -35,11 +36,12 @@ function commachecker()  	;; "--interval")	exp=7  	;; "--per-thread")	exp=7  	;; "--system-wide-no-aggr")	exp=7 -				[ $(uname -m) = "s390x" ] && exp='^[6-7]$' +				[ "$(uname -m)" = "s390x" ] && exp='^[6-7]$'  	;; "--per-core")	exp=8  	;; "--per-socket")	exp=8  	;; "--per-node")	exp=8  	;; "--per-die")		exp=8 +	;; "--per-cache")	exp=8  	esac  	while read line @@ -62,168 +64,22 @@ function commachecker()  	return 0  } -# Return true if perf_event_paranoid is > $1 and not running as root. -function ParanoidAndNotRoot() -{ -	 [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ] -} - -check_no_args() -{ -	echo -n "Checking CSV output: no args " -	perf stat -x$csv_sep -o "${stat_output}" true -        commachecker --no-args -	echo "[Success]" -} - -check_system_wide() -{ -	echo -n "Checking CSV output: system wide " -	if ParanoidAndNotRoot 0 -	then -		echo "[Skip] paranoid and not root" -		return -	fi -	perf stat -x$csv_sep -a -o "${stat_output}" true -        commachecker --system-wide -	echo "[Success]" -} - -check_system_wide_no_aggr() -{ -	echo -n "Checking CSV output: system wide no aggregation " -	if ParanoidAndNotRoot 0 -	then -		echo "[Skip] paranoid and not root" -		return -	fi -	perf stat -x$csv_sep -A -a --no-merge -o "${stat_output}" true -        commachecker --system-wide-no-aggr -	echo "[Success]" -} - -check_interval() -{ -	echo -n "Checking CSV output: interval " -	perf stat -x$csv_sep -I 1000 -o "${stat_output}" true -        commachecker --interval -	echo "[Success]" -} - - -check_event() -{ -	echo -n "Checking CSV output: event " -	perf stat -x$csv_sep -e cpu-clock -o "${stat_output}" true -        commachecker --event -	echo "[Success]" -} - -check_per_core() -{ -	echo -n "Checking CSV output: per core " -	if ParanoidAndNotRoot 0 -	then -		echo "[Skip] paranoid and not root" -		return -	fi -	perf stat -x$csv_sep --per-core -a -o "${stat_output}" true -        commachecker --per-core -	echo "[Success]" -} - -check_per_thread() -{ -	echo -n "Checking CSV output: per thread " -	if ParanoidAndNotRoot 0 -	then -		echo "[Skip] paranoid and not root" -		return -	fi -	perf stat -x$csv_sep --per-thread -a -o "${stat_output}" true -        commachecker --per-thread -	echo "[Success]" -} - -check_per_die() -{ -	echo -n "Checking CSV output: per die " -	if ParanoidAndNotRoot 0 -	then -		echo "[Skip] paranoid and not root" -		return -	fi -	perf stat -x$csv_sep --per-die -a -o "${stat_output}" true -        commachecker --per-die -	echo "[Success]" -} - -check_per_node() -{ -	echo -n "Checking CSV output: per node " -	if ParanoidAndNotRoot 0 -	then -		echo "[Skip] paranoid and not root" -		return -	fi -	perf stat -x$csv_sep --per-node -a -o "${stat_output}" true -        commachecker --per-node -	echo "[Success]" -} - -check_per_socket() -{ -	echo -n "Checking CSV output: per socket " -	if ParanoidAndNotRoot 0 -	then -		echo "[Skip] paranoid and not root" -		return -	fi -	perf stat -x$csv_sep --per-socket -a -o "${stat_output}" true -        commachecker --per-socket -	echo "[Success]" -} - -# The perf stat options for per-socket, per-core, per-die -# and -A ( no_aggr mode ) uses the info fetched from this -# directory: "/sys/devices/system/cpu/cpu*/topology". For -# example, socket value is fetched from "physical_package_id" -# file in topology directory. -# Reference: cpu__get_topology_int in util/cpumap.c -# If the platform doesn't expose topology information, values -# will be set to -1. For example, incase of pSeries platform -# of powerpc, value for  "physical_package_id" is restricted -# and set to -1. Check here validates the socket-id read from -# topology file before proceeding further - -FILE_LOC="/sys/devices/system/cpu/cpu*/topology/" -FILE_NAME="physical_package_id" - -check_for_topology() -{ -	if ! ParanoidAndNotRoot 0 -	then -		socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1` -		[ -z $socket_file ] && return 0 -		socket_id=`cat $socket_file` -		[ $socket_id == -1 ] && skip_test=1 -		return 0 -	fi -} +perf_cmd="-x$csv_sep -o ${stat_output}" -check_for_topology -check_no_args -check_system_wide -check_interval -check_event -check_per_thread -check_per_node +skip_test=$(check_for_topology) +check_no_args "CSV" "$perf_cmd" +check_system_wide "CSV" "$perf_cmd" +check_interval "CSV" "$perf_cmd" +check_event "CSV" "$perf_cmd" +check_per_thread "CSV" "$perf_cmd" +check_per_node "CSV" "$perf_cmd"  if [ $skip_test -ne 1 ]  then -	check_system_wide_no_aggr -	check_per_core -	check_per_die -	check_per_socket +	check_system_wide_no_aggr "CSV" "$perf_cmd" +	check_per_core "CSV" "$perf_cmd" +	check_per_cache_instance "CSV" "$perf_cmd" +	check_per_die "CSV" "$perf_cmd" +	check_per_socket "CSV" "$perf_cmd"  else  	echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"  fi diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh index f3e4967cc72e..196e22672c50 100755 --- a/tools/perf/tests/shell/stat+json_output.sh +++ b/tools/perf/tests/shell/stat+json_output.sh @@ -40,7 +40,7 @@ trap trap_cleanup EXIT TERM INT  # Return true if perf_event_paranoid is > $1 and not running as root.  function ParanoidAndNotRoot()  { -	 [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ] +	 [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]  }  check_no_args() @@ -120,6 +120,18 @@ check_per_thread()  	echo "[Success]"  } +check_per_cache_instance() +{ +	echo -n "Checking json output: per cache_instance " +	if ParanoidAndNotRoot 0 +	then +		echo "[Skip] paranoia and not root" +		return +	fi +	perf stat -j --per-cache -a true 2>&1 | $PYTHON $pythonchecker --per-cache +	echo "[Success]" +} +  check_per_die()  {  	echo -n "Checking json output: per die " @@ -197,6 +209,7 @@ if [ $skip_test -ne 1 ]  then  	check_system_wide_no_aggr  	check_per_core +	check_per_cache_instance  	check_per_die  	check_per_socket  else diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh index e6e35fc6c882..0e9cba84e757 100755 --- a/tools/perf/tests/shell/stat+shadow_stat.sh +++ b/tools/perf/tests/shell/stat+shadow_stat.sh @@ -33,7 +33,7 @@ test_global_aggr()  		fi  		# use printf for rounding and a leading zero -		res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)` +		res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`  		if [ "$ipc" != "$res" ]; then  			echo "IPC is different: $res != $ipc  ($num / $cyc)"  			exit 1 @@ -67,7 +67,7 @@ test_no_aggr()  		fi  		# use printf for rounding and a leading zero -		res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)` +		res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`  		if [ "$ipc" != "$res" ]; then  			echo "IPC is different for $cpu: $res != $ipc  ($num / $cyc)"  			exit 1 diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh new file mode 100755 index 000000000000..f972b31fa0c2 --- /dev/null +++ b/tools/perf/tests/shell/stat+std_output.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# perf stat STD output linter +# SPDX-License-Identifier: GPL-2.0 +# Tests various perf stat STD output commands for +# default event and metricgroup + +set -e + +. $(dirname $0)/lib/stat_output.sh + +stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX) + +event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses) +event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches") +skip_metric=("stalled cycles per insn" "tma_") + +cleanup() { +  rm -f "${stat_output}" + +  trap - EXIT TERM INT +} + +trap_cleanup() { +  cleanup +  exit 1 +} +trap trap_cleanup EXIT TERM INT + +function commachecker() +{ +	local -i cnt=0 +	local prefix=1 + +	case "$1" +	in "--interval")	prefix=2 +	;; "--per-thread")	prefix=2 +	;; "--system-wide-no-aggr")	prefix=2 +	;; "--per-core")	prefix=3 +	;; "--per-socket")	prefix=3 +	;; "--per-node")	prefix=3 +	;; "--per-die")		prefix=3 +	;; "--per-cache")	prefix=3 +	esac + +	while read line +	do +		# Ignore initial "started on" comment. +		x=${line:0:1} +		[ "$x" = "#" ] && continue +		# Ignore initial blank line. +		[ "$line" = "" ] && continue +		# Ignore "Performance counter stats" +		x=${line:0:25} +		[ "$x" = "Performance counter stats" ] && continue +		# Ignore "seconds time elapsed" and break +		[[ "$line" == *"time elapsed"* ]] && break + +		main_body=$(echo $line | cut -d' ' -f$prefix-) +		x=${main_body%#*} +		[ "$x" = "" ] && continue + +		# Skip metrics without event name +		y=${main_body#*#} +		for i in "${!skip_metric[@]}"; do +			[[ "$y" == *"${skip_metric[$i]}"* ]] && break +		done +		[[ "$y" == *"${skip_metric[$i]}"* ]] && continue + +		# Check default event +		for i in "${!event_name[@]}"; do +			[[ "$x" == *"${event_name[$i]}"* ]] && break +		done + +		[[ ! "$x" == *"${event_name[$i]}"* ]] && { +			echo "Unknown event name in $line" 1>&2 +			exit 1; +		} + +		# Check event metric if it exists +		[[ ! "$main_body" == *"#"* ]] && continue +		[[ ! "$main_body" == *"${event_metric[$i]}"* ]] && { +			echo "wrong event metric. expected ${event_metric[$i]} in $line" 1>&2 +			exit 1; +		} +	done < "${stat_output}" +	return 0 +} + +perf_cmd="-o ${stat_output}" + +skip_test=$(check_for_topology) +check_no_args "STD" "$perf_cmd" +check_system_wide "STD" "$perf_cmd" +check_interval "STD" "$perf_cmd" +check_per_thread "STD" "$perf_cmd" +check_per_node "STD" "$perf_cmd" +if [ $skip_test -ne 1 ] +then +	check_system_wide_no_aggr "STD" "$perf_cmd" +	check_per_core "STD" "$perf_cmd" +	check_per_cache_instance "STD" "$perf_cmd" +	check_per_die "STD" "$perf_cmd" +	check_per_socket "STD" "$perf_cmd" +else +	echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid" +fi +cleanup +exit 0 diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh index b154fbb15d54..3f1e67795490 100755 --- a/tools/perf/tests/shell/stat.sh +++ b/tools/perf/tests/shell/stat.sh @@ -103,10 +103,54 @@ test_topdown_weak_groups() {    echo "Topdown weak groups test [Success]"  } +test_cputype() { +  # Test --cputype argument. +  echo "cputype test" + +  # Bogus PMU should fail. +  if perf stat --cputype="123" -e instructions true > /dev/null 2>&1 +  then +    echo "cputype test [Bogus PMU didn't fail]" +    err=1 +    return +  fi + +  # Find a known PMU for cputype. +  pmu="" +  for i in cpu cpu_atom armv8_pmuv3_0 +  do +    if test -d "/sys/devices/$i" +    then +      pmu="$i" +      break +    fi +    if perf stat -e "$i/instructions/" true > /dev/null 2>&1 +    then +      pmu="$i" +      break +    fi +  done +  if test "x$pmu" = "x" +  then +    echo "cputype test [Skipped known PMU not found]" +    return +  fi + +  # Test running with cputype produces output. +  if ! perf stat --cputype="$pmu" -e instructions true 2>&1 | grep -E -q "instructions" +  then +    echo "cputype test [Failed count missed with given filter]" +    err=1 +    return +  fi +  echo "cputype test [Success]" +} +  test_default_stat  test_stat_record_report  test_stat_record_script  test_stat_repeat_weak_groups  test_topdown_groups  test_topdown_weak_groups +test_cputype  exit $err diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh index 22e9cb294b40..54774525e18a 100755 --- a/tools/perf/tests/shell/stat_all_metrics.sh +++ b/tools/perf/tests/shell/stat_all_metrics.sh @@ -6,20 +6,20 @@ err=0  for m in $(perf list --raw-dump metrics); do    echo "Testing $m"    result=$(perf stat -M "$m" true 2>&1) -  if [[ "$result" =~ "${m:0:50}" ]] || [[ "$result" =~ "<not supported>" ]] +  if [[ "$result" =~ ${m:0:50} ]] || [[ "$result" =~ "<not supported>" ]]    then      continue    fi    # Failed so try system wide.    result=$(perf stat -M "$m" -a sleep 0.01 2>&1) -  if [[ "$result" =~ "${m:0:50}" ]] +  if [[ "$result" =~ ${m:0:50} ]]    then      continue    fi    # Failed again, possibly the workload was too small so retry with something    # longer.    result=$(perf stat -M "$m" perf bench internals synthesize 2>&1) -  if [[ "$result" =~ "${m:0:50}" ]] +  if [[ "$result" =~ ${m:0:50} ]]    then      continue    fi diff --git a/tools/perf/tests/shell/stat_all_pfm.sh b/tools/perf/tests/shell/stat_all_pfm.sh new file mode 100755 index 000000000000..4d004f777a6e --- /dev/null +++ b/tools/perf/tests/shell/stat_all_pfm.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# perf all libpfm4 events test +# SPDX-License-Identifier: GPL-2.0 + +if perf version --build-options | grep HAVE_LIBPFM | grep -q OFF +then +  echo "Skipping, no libpfm4 support" +  exit 2 +fi + +err=0 +for p in $(perf list --raw-dump pfm) +do +  if echo "$p" | grep -q unc_ +  then +    echo "Skipping uncore event '$p' that may require additional options." +    continue +  fi +  echo "Testing $p" +  result=$(perf stat --pfm-events "$p" true 2>&1) +  x=$? +  if echo "$result" | grep -q "failed to parse event $p : invalid or missing unit mask" +  then +    continue +  fi +  if test "$x" -ne "0" +  then +    echo "Unexpected exit code '$x'" +    err=1 +  fi +  if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" +  then +    # We failed to see the event and it is supported. Possibly the workload was +    # too small so retry with something longer. +    result=$(perf stat --pfm-events "$p" perf bench internals synthesize 2>&1) +    x=$? +    if test "$x" -ne "0" +    then +      echo "Unexpected exit code '$x'" +      err=1 +    fi +    if ! echo "$result" | grep -q "$p" +    then +      echo "Event '$p' not printed in:" +      echo "$result" +      err=1 +    fi +  fi +done + +exit "$err" diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh new file mode 100755 index 000000000000..ad94c936de7e --- /dev/null +++ b/tools/perf/tests/shell/stat_metrics_values.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# perf metrics value validation +# SPDX-License-Identifier: GPL-2.0 +if [ "x$PYTHON" == "x" ] +then +	if which python3 > /dev/null +	then +		PYTHON=python3 +	else +		echo Skipping test, python3 not detected please set environment variable PYTHON. +		exit 2 +	fi +fi + +grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; } + +pythonvalidator=$(dirname $0)/lib/perf_metric_validation.py +rulefile=$(dirname $0)/lib/perf_metric_validation_rules.json +tmpdir=$(mktemp -d /tmp/__perf_test.program.XXXXX) +workload="perf bench futex hash -r 2 -s" + +# Add -debug, save data file and full rule file +echo "Launch python validation script $pythonvalidator" +echo "Output will be stored in: $tmpdir" +$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}" +ret=$? +rm -rf $tmpdir + +exit $ret + diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh index e61d8deaa0c4..66dfdfdad553 100755 --- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh +++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh @@ -9,13 +9,14 @@ TEST_PROGRAM="perf test -w leafloop"  cleanup_files()  { -	rm -f $PERF_DATA +	rm -f "$PERF_DATA"  } -trap cleanup_files exit term int +trap cleanup_files EXIT TERM INT  # Add a 1 second delay to skip samples that are not in the leaf() function -perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null & +# shellcheck disable=SC2086 +perf record -o "$PERF_DATA" --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &  PID=$!  echo " + Recording (PID=$PID)..." @@ -33,8 +34,8 @@ wait $PID  # 	76c leafloop  # ... -perf script -i $PERF_DATA -F comm,ip,sym | head -n4 -perf script -i $PERF_DATA -F comm,ip,sym | head -n4 | \ +perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 +perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 | \  	awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||  						       sym[1] != "parent" ||  						       sym[2] != "leafloop") exit 1 }' diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh index 482009e17bda..f1bf5621160f 100755 --- a/tools/perf/tests/shell/test_arm_coresight.sh +++ b/tools/perf/tests/shell/test_arm_coresight.sh @@ -28,11 +28,11 @@ cleanup_files()  	rm -f ${perfdata}  	rm -f ${file}  	rm -f "${perfdata}.old" -	trap - exit term int +	trap - EXIT TERM INT  	exit $glb_err  } -trap cleanup_files exit term int +trap cleanup_files EXIT TERM INT  record_touch_file() {  	echo "Recording trace (only user mode) with path: CPU$2 => $1" @@ -89,7 +89,7 @@ is_device_sink() {  	# cannot support perf PMU.  	echo "$1" | grep -E -q -v "tpiu" -	if [ $? -eq 0 -a -e "$1/enable_sink" ]; then +	if [ $? -eq 0 ] && [ -e "$1/enable_sink" ]; then  		pmu_dev="/sys/bus/event_source/devices/cs_etm/sinks/$2" diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh index aa094d71f5b4..03d5c7d12ee5 100755 --- a/tools/perf/tests/shell/test_arm_spe.sh +++ b/tools/perf/tests/shell/test_arm_spe.sh @@ -27,7 +27,7 @@ cleanup_files()  	exit $glb_err  } -trap cleanup_files exit term int +trap cleanup_files EXIT TERM INT  arm_spe_report() {  	if [ $2 = 0 ]; then diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh index 1c49d8293003..09908d71c994 100755 --- a/tools/perf/tests/shell/test_brstack.sh +++ b/tools/perf/tests/shell/test_brstack.sh @@ -18,7 +18,7 @@ cleanup() {  	rm -rf $TMPDIR  } -trap cleanup exit term int +trap cleanup EXIT TERM INT  test_user_branches() {  	echo "Testing user branch stack sampling" @@ -47,17 +47,17 @@ test_user_branches() {  # first argument <arg0> is the argument passed to "--branch-stack <arg0>,save_type,u"  # second argument are the expected branch types for the given filter  test_filter() { -	local filter=$1 -	local expect=$2 +	test_filter_filter=$1 +	test_filter_expect=$2 -	echo "Testing branch stack filtering permutation ($filter,$expect)" +	echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)" -	perf record -o $TMPDIR/perf.data --branch-filter $filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1 +	perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1  	perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script  	# fail if we find any branch type that doesn't match any of the expected ones  	# also consider UNKNOWN branch types (-) -	if grep -E -vm1 "^[^ ]*/($expect|-|( *))/.*$" $TMPDIR/perf.script; then +	if grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" $TMPDIR/perf.script; then  		return 1  	fi  } diff --git a/tools/perf/tests/shell/test_perf_data_converter_json.sh b/tools/perf/tests/shell/test_perf_data_converter_json.sh new file mode 100755 index 000000000000..72ac6c83231c --- /dev/null +++ b/tools/perf/tests/shell/test_perf_data_converter_json.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# 'perf data convert --to-json' command test +# SPDX-License-Identifier: GPL-2.0 + +set -e + +err=0 + +if [ "$PYTHON" = "" ] ; then +	if which python3 > /dev/null ; then +		PYTHON=python3 +	elif which python > /dev/null ; then +		PYTHON=python +	else +		echo Skipping test, python not detected please set environment variable PYTHON. +		exit 2 +	fi +fi + +perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +result=$(mktemp /tmp/__perf_test.output.json.XXXXX) + +cleanup() +{ +	rm -f "${perfdata}" +	rm -f "${result}" +	trap - exit term int +} + +trap_cleanup() +{ +	cleanup +	exit ${err} +} +trap trap_cleanup exit term int + +test_json_converter_command() +{ +	echo "Testing Perf Data Convertion Command to JSON" +	perf record -o "$perfdata" -F 99 -g -- perf test -w noploop > /dev/null 2>&1 +	perf data convert --to-json "$result" --force -i "$perfdata" >/dev/null 2>&1 +	if [ $(cat "${result}" | wc -l) -gt "0" ] ; then +		echo "Perf Data Converter Command to JSON [SUCCESS]" +	else +		echo "Perf Data Converter Command to JSON [FAILED]" +		err=1 +		exit +	fi +} + +validate_json_format() +{ +	echo "Validating Perf Data Converted JSON file" +	if [ -f "$result" ] ; then +		if $PYTHON -c  "import json; json.load(open('$result'))" >/dev/null 2>&1 ; then +			echo "The file contains valid JSON format [SUCCESS]" +		else +			echo "The file does not contain valid JSON format [FAILED]" +			err=1 +			exit +		fi +	else +		echo "File not found [FAILED]" +		err=2 +		exit +	fi +} + +test_json_converter_command +validate_json_format + +exit ${err} diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh index a98e4ab66040..0095abbe20ca 100755 --- a/tools/perf/tests/shell/test_task_analyzer.sh +++ b/tools/perf/tests/shell/test_task_analyzer.sh @@ -5,12 +5,18 @@  tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)  err=0 +# set PERF_EXEC_PATH to find scripts in the source directory +perfdir=$(dirname "$0")/../.. +if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then +  export PERF_EXEC_PATH=$perfdir +fi +  cleanup() {    rm -f perf.data    rm -f perf.data.old    rm -f csv    rm -f csvsummary -  rm -rf $tmpdir +  rm -rf "$tmpdir"    trap - exit term int  } @@ -21,7 +27,7 @@ trap_cleanup() {  trap trap_cleanup exit term int  report() { -	if [ $1 = 0 ]; then +	if [ "$1" = 0 ]; then  		echo "PASS: \"$2\""  	else  		echo "FAIL: \"$2\" Error message: \"$3\"" @@ -31,109 +37,127 @@ report() {  check_exec_0() {  	if [ $? != 0 ]; then -		report 1 "invokation of ${$1} command failed" +		report 1 "invocation of $1 command failed"  	fi  }  find_str_or_fail() { -	grep -q "$1" $2 -	if [ $? != 0 ]; then -		report 1 $3 "Failed to find required string:'${1}'." +	grep -q "$1" "$2" +	if [ "$?" != 0 ]; then +		report 1 "$3" "Failed to find required string:'${1}'."  	else -		report 0 $3 +		report 0 "$3"  	fi  } +# check if perf is compiled with libtraceevent support +skip_no_probe_record_support() { +	perf record -e "sched:sched_switch" -a -- sleep 1 2>&1 | grep "libtraceevent is necessary for tracepoint support" && return 2 +	return 0 +} +  prepare_perf_data() {  	# 1s should be sufficient to catch at least some switches  	perf record -e sched:sched_switch -a -- sleep 1 > /dev/null 2>&1 +	# check if perf data file got created in above step. +	if [ ! -e "perf.data" ]; then +		printf "FAIL: perf record failed to create \"perf.data\" \n" +		return 1 +	fi  }  # check standard inkvokation with no arguments  test_basic() {  	out="$tmpdir/perf.out" -	perf script report task-analyzer > $out -	check_exec_0 "perf" -	find_str_or_fail "Comm" $out ${FUNCNAME[0]} +	perf script report task-analyzer > "$out" +	check_exec_0 "perf script report task-analyzer" +	find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"  }  test_ns_rename(){  	out="$tmpdir/perf.out" -	perf script report task-analyzer --ns --rename-comms-by-tids 0:random > $out -	check_exec_0 "perf" -	find_str_or_fail "Comm" $out ${FUNCNAME[0]} +	perf script report task-analyzer --ns --rename-comms-by-tids 0:random > "$out" +	check_exec_0 "perf script report task-analyzer --ns --rename-comms-by-tids 0:random" +	find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"  }  test_ms_filtertasks_highlight(){  	out="$tmpdir/perf.out"  	perf script report task-analyzer --ms --filter-tasks perf --highlight-tasks perf \ -	> $out -	check_exec_0 "perf" -	find_str_or_fail "Comm" $out ${FUNCNAME[0]} +	> "$out" +	check_exec_0 "perf script report task-analyzer --ms --filter-tasks perf --highlight-tasks perf" +	find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"  }  test_extended_times_timelimit_limittasks() {  	out="$tmpdir/perf.out"  	perf script report task-analyzer --extended-times --time-limit :99999 \ -	--limit-to-tasks perf > $out -	check_exec_0 "perf" -	find_str_or_fail "Out-Out" $out ${FUNCNAME[0]} +	--limit-to-tasks perf > "$out" +	check_exec_0 "perf script report task-analyzer --extended-times --time-limit :99999 --limit-to-tasks perf" +	find_str_or_fail "Out-Out" "$out" "${FUNCNAME[0]}"  }  test_summary() {  	out="$tmpdir/perf.out" -	perf script report task-analyzer --summary > $out -	check_exec_0 "perf" -	find_str_or_fail "Summary" $out ${FUNCNAME[0]} +	perf script report task-analyzer --summary > "$out" +	check_exec_0 "perf script report task-analyzer --summary" +	find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"  }  test_summaryextended() {  	out="$tmpdir/perf.out" -	perf script report task-analyzer --summary-extended > $out -	check_exec_0 "perf" -	find_str_or_fail "Inter Task Times" $out ${FUNCNAME[0]} +	perf script report task-analyzer --summary-extended > "$out" +	check_exec_0 "perf script report task-analyzer --summary-extended" +	find_str_or_fail "Inter Task Times" "$out" "${FUNCNAME[0]}"  }  test_summaryonly() {  	out="$tmpdir/perf.out" -	perf script report task-analyzer --summary-only > $out -	check_exec_0 "perf" -	find_str_or_fail "Summary" $out ${FUNCNAME[0]} +	perf script report task-analyzer --summary-only > "$out" +	check_exec_0 "perf script report task-analyzer --summary-only" +	find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"  }  test_extended_times_summary_ns() {  	out="$tmpdir/perf.out" -	perf script report task-analyzer --extended-times --summary --ns > $out -	check_exec_0 "perf" -	find_str_or_fail "Out-Out" $out ${FUNCNAME[0]} -	find_str_or_fail "Summary" $out ${FUNCNAME[0]} +	perf script report task-analyzer --extended-times --summary --ns > "$out" +	check_exec_0 "perf script report task-analyzer --extended-times --summary --ns" +	find_str_or_fail "Out-Out" "$out" "${FUNCNAME[0]}" +	find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"  }  test_csv() {  	perf script report task-analyzer --csv csv > /dev/null -	check_exec_0 "perf" -	find_str_or_fail "Comm;" csv ${FUNCNAME[0]} +	check_exec_0 "perf script report task-analyzer --csv csv" +	find_str_or_fail "Comm;" csv "${FUNCNAME[0]}"  }  test_csv_extended_times() {  	perf script report task-analyzer --csv csv --extended-times > /dev/null -	check_exec_0 "perf" -	find_str_or_fail "Out-Out;" csv ${FUNCNAME[0]} +	check_exec_0 "perf script report task-analyzer --csv csv --extended-times" +	find_str_or_fail "Out-Out;" csv "${FUNCNAME[0]}"  }  test_csvsummary() {  	perf script report task-analyzer --csv-summary csvsummary > /dev/null -	check_exec_0 "perf" -	find_str_or_fail "Comm;" csvsummary ${FUNCNAME[0]} +	check_exec_0 "perf script report task-analyzer --csv-summary csvsummary" +	find_str_or_fail "Comm;" csvsummary "${FUNCNAME[0]}"  }  test_csvsummary_extended() {  	perf script report task-analyzer --csv-summary csvsummary --summary-extended \  	>/dev/null -	check_exec_0 "perf" -	find_str_or_fail "Out-Out;" csvsummary ${FUNCNAME[0]} +	check_exec_0 "perf script report task-analyzer --csv-summary csvsummary --summary-extended" +	find_str_or_fail "Out-Out;" csvsummary "${FUNCNAME[0]}"  } +skip_no_probe_record_support +err=$? +if [ $err -ne 0 ]; then +	echo "WARN: Skipping tests. No libtraceevent support" +	cleanup +	exit $err +fi  prepare_perf_data  test_basic  test_ns_rename diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c index b3bd14b025a8..e52b031bedc5 100644 --- a/tools/perf/tests/switch-tracking.c +++ b/tools/perf/tests/switch-tracking.c @@ -20,7 +20,7 @@  #include "tests.h"  #include "util/mmap.h"  #include "util/sample.h" -#include "pmu.h" +#include "pmus.h"  static int spin_sleep(void)  { @@ -375,17 +375,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub  	cpu_clocks_evsel = evlist__last(evlist);  	/* Second event */ -	if (perf_pmu__has_hybrid()) { -		cycles = "cpu_core/cycles/u"; -		err = parse_event(evlist, cycles); -		if (err) { -			cycles = "cpu_atom/cycles/u"; -			pr_debug("Trying %s\n", cycles); -			err = parse_event(evlist, cycles); -		} -	} else { -		err = parse_event(evlist, cycles); -	} +	err = parse_event(evlist, cycles);  	if (err) {  		pr_debug("Failed to parse event %s\n", cycles);  		goto out_err; diff --git a/tools/perf/tests/symbols.c b/tools/perf/tests/symbols.c index 2d1aa42d36a9..16e1c5502b09 100644 --- a/tools/perf/tests/symbols.c +++ b/tools/perf/tests/symbols.c @@ -38,7 +38,6 @@ static int init_test_info(struct test_info *ti)  static void exit_test_info(struct test_info *ti)  {  	thread__put(ti->thread); -	machine__delete_threads(ti->machine);  	machine__delete(ti->machine);  } diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 9a0f3904e53d..f424c0b7f43f 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -118,15 +118,13 @@ DECLARE_SUITE(bpf);  DECLARE_SUITE(session_topology);  DECLARE_SUITE(thread_map_synthesize);  DECLARE_SUITE(thread_map_remove); -DECLARE_SUITE(cpu_map_synthesize); +DECLARE_SUITE(cpu_map);  DECLARE_SUITE(synthesize_stat_config);  DECLARE_SUITE(synthesize_stat);  DECLARE_SUITE(synthesize_stat_round);  DECLARE_SUITE(event_update);  DECLARE_SUITE(event_times);  DECLARE_SUITE(backward_ring_buffer); -DECLARE_SUITE(cpu_map_print); -DECLARE_SUITE(cpu_map_merge);  DECLARE_SUITE(sdt_event);  DECLARE_SUITE(is_printable_array);  DECLARE_SUITE(bitmap_print); diff --git a/tools/perf/tests/thread-maps-share.c b/tools/perf/tests/thread-maps-share.c index 858e725318a9..faf980b26252 100644 --- a/tools/perf/tests/thread-maps-share.c +++ b/tools/perf/tests/thread-maps-share.c @@ -42,13 +42,13 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s  	TEST_ASSERT_VAL("failed to create threads",  			leader && t1 && t2 && t3 && other); -	maps = leader->maps; +	maps = thread__maps(leader);  	TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4);  	/* test the maps pointer is shared */ -	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(t1->maps)); -	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(t2->maps)); -	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(t3->maps)); +	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t1))); +	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t2))); +	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t3)));  	/*  	 * Verify the other leader was created by previous call. @@ -70,10 +70,11 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s  	machine__remove_thread(machine, other);  	machine__remove_thread(machine, other_leader); -	other_maps = other->maps; +	other_maps = thread__maps(other);  	TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2); -	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(other_maps) == RC_CHK_ACCESS(other_leader->maps)); +	TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(other_maps) == +					    RC_CHK_ACCESS(thread__maps(other_leader)));  	/* release thread group */  	thread__put(t3); diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index c4630cfc80ea..9dee63734e66 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -8,7 +8,7 @@  #include "session.h"  #include "evlist.h"  #include "debug.h" -#include "pmu.h" +#include "pmus.h"  #include <linux/err.h>  #define TEMPL "/tmp/perf-test-XXXXXX" @@ -41,18 +41,8 @@ static int session_write_header(char *path)  	session = perf_session__new(&data, NULL);  	TEST_ASSERT_VAL("can't get session", !IS_ERR(session)); -	if (!perf_pmu__has_hybrid()) { -		session->evlist = evlist__new_default(); -		TEST_ASSERT_VAL("can't get evlist", session->evlist); -	} else { -		struct parse_events_error err; - -		session->evlist = evlist__new(); -		TEST_ASSERT_VAL("can't get evlist", session->evlist); -		parse_events_error__init(&err); -		parse_events(session->evlist, "cpu_core/cycles/", &err); -		parse_events_error__exit(&err); -	} +	session->evlist = evlist__new_default(); +	TEST_ASSERT_VAL("can't get evlist", session->evlist);  	perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);  	perf_header__set_feat(&session->header, HEADER_NRCPUS);  | 
