summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-11-17 10:47:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-11-17 10:47:45 -0800
commitbe1dd6692adbdb1d70da47da124ac8376bba5ad5 (patch)
tree60c89b11bb1a2a74f37c336bc07d7415d672dd8c /tools
parent9dacf44c3837b7f1cf460de904f352714e7cd107 (diff)
parent568beb27959b0515d325ea1c6cf211eed2d66740 (diff)
Merge tag 'perf-tools-fixes-for-v5.10-2020-11-17' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tools fixes from Arnaldo Carvalho de Melo: - Fix file corruption due to event deletion in 'perf inject'. - Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy', silencing perf build warning. - Avoid an msan warning in a copied stack in 'perf test'. - Correct tracepoint field name "flags" in ARM's CS-ETM hardware tracing 'perf test' entry. - Update branch sample pattern for cs-etm to cope with excluding guest in userspace counting. - Don't free "lock_seq_stat" if read_count isn't zero in 'perf lock'. * tag 'perf-tools-fixes-for-v5.10-2020-11-17' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: perf test: Avoid an msan warning in a copied stack. perf inject: Fix file corruption due to event deletion perf test: Update branch sample pattern for cs-etm perf test: Fix a typo in cs-etm testing tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' perf lock: Don't free "lock_seq_stat" if read_count isn't zero perf lock: Correct field name "flags"
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/x86/lib/memcpy_64.S8
-rw-r--r--tools/arch/x86/lib/memset_64.S11
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c7
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S3
-rw-r--r--tools/perf/bench/mem-memset-x86-64-asm.S3
-rw-r--r--tools/perf/builtin-inject.c12
-rw-r--r--tools/perf/builtin-lock.c4
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh4
-rw-r--r--tools/perf/util/include/linux/linkage.h7
9 files changed, 34 insertions, 25 deletions
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index 0b5b8ae56bd9..1e299ac73c86 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -16,8 +16,6 @@
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
*/
-.weak memcpy
-
/*
* memcpy - Copy a memory block.
*
@@ -30,7 +28,7 @@
* rax original destination
*/
SYM_FUNC_START_ALIAS(__memcpy)
-SYM_FUNC_START_LOCAL(memcpy)
+SYM_FUNC_START_WEAK(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
@@ -51,14 +49,14 @@ EXPORT_SYMBOL(__memcpy)
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
-SYM_FUNC_START(memcpy_erms)
+SYM_FUNC_START_LOCAL(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
ret
SYM_FUNC_END(memcpy_erms)
-SYM_FUNC_START(memcpy_orig)
+SYM_FUNC_START_LOCAL(memcpy_orig)
movq %rdi, %rax
cmpq $0x20, %rdx
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
index fd5d25a474b7..0bfd26e4ca9e 100644
--- a/tools/arch/x86/lib/memset_64.S
+++ b/tools/arch/x86/lib/memset_64.S
@@ -4,8 +4,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
-
-.weak memset
+#include <asm/export.h>
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
@@ -18,7 +17,7 @@
*
* rax original destination
*/
-SYM_FUNC_START_ALIAS(memset)
+SYM_FUNC_START_WEAK(memset)
SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -44,6 +43,8 @@ SYM_FUNC_START(__memset)
ret
SYM_FUNC_END(__memset)
SYM_FUNC_END_ALIAS(memset)
+EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
@@ -56,7 +57,7 @@ SYM_FUNC_END_ALIAS(memset)
*
* rax original destination
*/
-SYM_FUNC_START(memset_erms)
+SYM_FUNC_START_LOCAL(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
@@ -65,7 +66,7 @@ SYM_FUNC_START(memset_erms)
ret
SYM_FUNC_END(memset_erms)
-SYM_FUNC_START(memset_orig)
+SYM_FUNC_START_LOCAL(memset_orig)
movq %rdi,%r10
/* expand byte value */
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 4e40402a4f81..478078fb0f22 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -38,6 +38,13 @@ static int sample_ustack(struct perf_sample *sample,
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
+#ifdef MEMORY_SANITIZER
+ /*
+ * Copying the stack may copy msan poison, avoid false positives in the
+ * unwinder by removing the poison here.
+ */
+ __msan_unpoison(buf, stack_size);
+#endif
stack->data = (char *) buf;
stack->size = stack_size;
return 0;
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
index 9ad015a1e202..6eb45a2aa8db 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
@@ -2,6 +2,9 @@
/* Various wrappers to make the kernel .S file build in user-space: */
+// memcpy_orig and memcpy_erms are being defined as SYM_L_LOCAL but we need it
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#define memcpy MEMCPY /* don't hide glibc's memcpy() */
#define altinstr_replacement text
#define globl p2align 4; .globl
diff --git a/tools/perf/bench/mem-memset-x86-64-asm.S b/tools/perf/bench/mem-memset-x86-64-asm.S
index d550bd526162..6f093c483842 100644
--- a/tools/perf/bench/mem-memset-x86-64-asm.S
+++ b/tools/perf/bench/mem-memset-x86-64-asm.S
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+// memset_orig and memset_erms are being defined as SYM_L_LOCAL but we need it
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#define memset MEMSET /* don't hide glibc's memset() */
#define altinstr_replacement text
#define globl p2align 4; .globl
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 452a75fe68e5..0462dc8db2e3 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -779,25 +779,15 @@ static int __cmd_inject(struct perf_inject *inject)
dsos__hit_all(session);
/*
* The AUX areas have been removed and replaced with
- * synthesized hardware events, so clear the feature flag and
- * remove the evsel.
+ * synthesized hardware events, so clear the feature flag.
*/
if (inject->itrace_synth_opts.set) {
- struct evsel *evsel;
-
perf_header__clear_feat(&session->header,
HEADER_AUXTRACE);
if (inject->itrace_synth_opts.last_branch ||
inject->itrace_synth_opts.add_last_branch)
perf_header__set_feat(&session->header,
HEADER_BRANCH_STACK);
- evsel = perf_evlist__id2evsel_strict(session->evlist,
- inject->aux_id);
- if (evsel) {
- pr_debug("Deleting %s\n", evsel__name(evsel));
- evlist__remove(session->evlist, evsel);
- evsel__delete(evsel);
- }
}
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index f0a1dbacb46c..a2f1e53f37a7 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -406,7 +406,7 @@ static int report_lock_acquire_event(struct evsel *evsel,
struct lock_seq_stat *seq;
const char *name = evsel__strval(evsel, sample, "name");
u64 tmp = evsel__intval(evsel, sample, "lockdep_addr");
- int flag = evsel__intval(evsel, sample, "flag");
+ int flag = evsel__intval(evsel, sample, "flags");
memcpy(&addr, &tmp, sizeof(void *));
@@ -621,7 +621,7 @@ static int report_lock_release_event(struct evsel *evsel,
case SEQ_STATE_READ_ACQUIRED:
seq->read_count--;
BUG_ON(seq->read_count < 0);
- if (!seq->read_count) {
+ if (seq->read_count) {
ls->nr_release++;
goto end;
}
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 8d84fdbed6a6..18fde2f179cd 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -44,7 +44,7 @@ perf_script_branch_samples() {
# touch 6512 1 branches:u: ffffb22082e0 strcmp+0xa0 (/lib/aarch64-linux-gnu/ld-2.27.so)
# touch 6512 1 branches:u: ffffb2208320 strcmp+0xe0 (/lib/aarch64-linux-gnu/ld-2.27.so)
perf script -F,-time -i ${perfdata} | \
- egrep " +$1 +[0-9]+ .* +branches:([u|k]:)? +"
+ egrep " +$1 +[0-9]+ .* +branches:(.*:)? +"
}
perf_report_branch_samples() {
@@ -105,7 +105,7 @@ arm_cs_iterate_devices() {
# `> device_name = 'tmc_etf0'
device_name=$(basename $path)
- if is_device_sink $path $devce_name; then
+ if is_device_sink $path $device_name; then
record_touch_file $device_name $2 &&
perf_script_branch_samples touch &&
diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h
index b8a5159361b4..5acf053fca7d 100644
--- a/tools/perf/util/include/linux/linkage.h
+++ b/tools/perf/util/include/linux/linkage.h
@@ -25,6 +25,7 @@
/* SYM_L_* -- linkage of symbols */
#define SYM_L_GLOBAL(name) .globl name
+#define SYM_L_WEAK(name) .weak name
#define SYM_L_LOCAL(name) /* nothing */
#define ALIGN __ALIGN
@@ -84,6 +85,12 @@
SYM_END(name, SYM_T_FUNC)
#endif
+/* SYM_FUNC_START_WEAK -- use for weak functions */
+#ifndef SYM_FUNC_START_WEAK
+#define SYM_FUNC_START_WEAK(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)
+#endif
+
/*
* SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
* SYM_FUNC_START_WEAK, ...