diff options
Diffstat (limited to 'tools/perf/bench/numa.c')
| -rw-r--r-- | tools/perf/bench/numa.c | 138 | 
1 files changed, 103 insertions, 35 deletions
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index f2640179ada9..d5289fa58a4f 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -34,6 +34,7 @@  #include <linux/numa.h>  #include <linux/zalloc.h> +#include "../util/header.h"  #include <numa.h>  #include <numaif.h> @@ -54,7 +55,7 @@  struct thread_data {  	int			curr_cpu; -	cpu_set_t		bind_cpumask; +	cpu_set_t		*bind_cpumask;  	int			bind_node;  	u8			*process_data;  	int			process_nr; @@ -266,71 +267,117 @@ static bool node_has_cpus(int node)  	return ret;  } -static cpu_set_t bind_to_cpu(int target_cpu) +static cpu_set_t *bind_to_cpu(int target_cpu)  { -	cpu_set_t orig_mask, mask; -	int ret; +	int nrcpus = numa_num_possible_cpus(); +	cpu_set_t *orig_mask, *mask; +	size_t size; -	ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); -	BUG_ON(ret); +	orig_mask = CPU_ALLOC(nrcpus); +	BUG_ON(!orig_mask); +	size = CPU_ALLOC_SIZE(nrcpus); +	CPU_ZERO_S(size, orig_mask); + +	if (sched_getaffinity(0, size, orig_mask)) +		goto err_out; -	CPU_ZERO(&mask); +	mask = CPU_ALLOC(nrcpus); +	if (!mask) +		goto err_out; + +	CPU_ZERO_S(size, mask);  	if (target_cpu == -1) {  		int cpu;  		for (cpu = 0; cpu < g->p.nr_cpus; cpu++) -			CPU_SET(cpu, &mask); +			CPU_SET_S(cpu, size, mask);  	} else { -		BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus); -		CPU_SET(target_cpu, &mask); +		if (target_cpu < 0 || target_cpu >= g->p.nr_cpus) +			goto err; + +		CPU_SET_S(target_cpu, size, mask);  	} -	ret = sched_setaffinity(0, sizeof(mask), &mask); -	BUG_ON(ret); +	if (sched_setaffinity(0, size, mask)) +		goto err;  	return orig_mask; + +err: +	CPU_FREE(mask); +err_out: +	CPU_FREE(orig_mask); + +	/* BUG_ON due to failure in allocation of orig_mask/mask */ +	BUG_ON(-1); +	return NULL;  } -static cpu_set_t bind_to_node(int target_node) +static cpu_set_t *bind_to_node(int target_node)  { -	cpu_set_t orig_mask, mask; +	int nrcpus = numa_num_possible_cpus(); +	size_t size; +	cpu_set_t *orig_mask, *mask;  	int cpu; -	int ret; -	ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); -	BUG_ON(ret); +	orig_mask = CPU_ALLOC(nrcpus); +	BUG_ON(!orig_mask); +	size = CPU_ALLOC_SIZE(nrcpus); +	CPU_ZERO_S(size, orig_mask); -	CPU_ZERO(&mask); +	if (sched_getaffinity(0, size, orig_mask)) +		goto err_out; + +	mask = CPU_ALLOC(nrcpus); +	if (!mask) +		goto err_out; + +	CPU_ZERO_S(size, mask);  	if (target_node == NUMA_NO_NODE) {  		for (cpu = 0; cpu < g->p.nr_cpus; cpu++) -			CPU_SET(cpu, &mask); +			CPU_SET_S(cpu, size, mask);  	} else {  		struct bitmask *cpumask = numa_allocate_cpumask(); -		BUG_ON(!cpumask); +		if (!cpumask) +			goto err; +  		if (!numa_node_to_cpus(target_node, cpumask)) {  			for (cpu = 0; cpu < (int)cpumask->size; cpu++) {  				if (numa_bitmask_isbitset(cpumask, cpu)) -					CPU_SET(cpu, &mask); +					CPU_SET_S(cpu, size, mask);  			}  		}  		numa_free_cpumask(cpumask);  	} -	ret = sched_setaffinity(0, sizeof(mask), &mask); -	BUG_ON(ret); +	if (sched_setaffinity(0, size, mask)) +		goto err;  	return orig_mask; + +err: +	CPU_FREE(mask); +err_out: +	CPU_FREE(orig_mask); + +	/* BUG_ON due to failure in allocation of orig_mask/mask */ +	BUG_ON(-1); +	return NULL;  } -static void bind_to_cpumask(cpu_set_t mask) +static void bind_to_cpumask(cpu_set_t *mask)  {  	int ret; +	size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus()); -	ret = sched_setaffinity(0, sizeof(mask), &mask); -	BUG_ON(ret); +	ret = sched_setaffinity(0, size, mask); +	if (ret) { +		CPU_FREE(mask); +		BUG_ON(ret); +	}  }  static void mempol_restore(void) @@ -376,7 +423,7 @@ do {							\  static u8 *alloc_data(ssize_t bytes0, int map_flags,  		      int init_zero, int init_cpu0, int thp, int init_random)  { -	cpu_set_t orig_mask; +	cpu_set_t *orig_mask = NULL;  	ssize_t bytes;  	u8 *buf;  	int ret; @@ -434,6 +481,7 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,  	/* Restore affinity: */  	if (init_cpu0) {  		bind_to_cpumask(orig_mask); +		CPU_FREE(orig_mask);  		mempol_restore();  	} @@ -585,10 +633,16 @@ static int parse_setup_cpu_list(void)  			return -1;  		} +		if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) { +			printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n"); +			return -1; +		} +  		BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);  		BUG_ON(bind_cpu_0 > bind_cpu_1);  		for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) { +			size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);  			int i;  			for (i = 0; i < mul; i++) { @@ -608,10 +662,15 @@ static int parse_setup_cpu_list(void)  					tprintf("%2d", bind_cpu);  				} -				CPU_ZERO(&td->bind_cpumask); +				td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus); +				BUG_ON(!td->bind_cpumask); +				CPU_ZERO_S(size, td->bind_cpumask);  				for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) { -					BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus); -					CPU_SET(cpu, &td->bind_cpumask); +					if (cpu < 0 || cpu >= g->p.nr_cpus) { +						CPU_FREE(td->bind_cpumask); +						BUG_ON(-1); +					} +					CPU_SET_S(cpu, size, td->bind_cpumask);  				}  				t++;  			} @@ -752,8 +811,6 @@ static int parse_nodes_opt(const struct option *opt __maybe_unused,  	return parse_node_list(arg);  } -#define BIT(x) (1ul << x) -  static inline uint32_t lfsr_32(uint32_t lfsr)  {  	const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31); @@ -1241,7 +1298,7 @@ static void *worker_thread(void *__tdata)  		 * by migrating to CPU#0:  		 */  		if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) { -			cpu_set_t orig_mask; +			cpu_set_t *orig_mask;  			int target_cpu;  			int this_cpu; @@ -1265,6 +1322,7 @@ static void *worker_thread(void *__tdata)  				printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);  			bind_to_cpumask(orig_mask); +			CPU_FREE(orig_mask);  		}  		if (details >= 3) { @@ -1398,21 +1456,31 @@ static void init_thread_data(void)  	for (t = 0; t < g->p.nr_tasks; t++) {  		struct thread_data *td = g->threads + t; +		size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);  		int cpu;  		/* Allow all nodes by default: */  		td->bind_node = NUMA_NO_NODE;  		/* Allow all CPUs by default: */ -		CPU_ZERO(&td->bind_cpumask); +		td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus); +		BUG_ON(!td->bind_cpumask); +		CPU_ZERO_S(cpuset_size, td->bind_cpumask);  		for (cpu = 0; cpu < g->p.nr_cpus; cpu++) -			CPU_SET(cpu, &td->bind_cpumask); +			CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);  	}  }  static void deinit_thread_data(void)  {  	ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; +	int t; + +	/* Free the bind_cpumask allocated for thread_data */ +	for (t = 0; t < g->p.nr_tasks; t++) { +		struct thread_data *td = g->threads + t; +		CPU_FREE(td->bind_cpumask); +	}  	free_data(g->threads, size);  }  | 
