summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-04-26 15:53:36 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2023-04-26 15:53:36 -0400
commit48b1893ae38bd6d46a9dcfc7b85c70a143fb8cab (patch)
treed69a46c054bf8a8558b17cdb9510ae5285296ba6 /tools/testing
parent807b758496e42ada4ba3f3defcfbac88afcd64f8 (diff)
parent457bd7af1a17182e7f1f97eeb5d9107f8699e99d (diff)
Merge tag 'kvm-x86-pmu-6.4' of https://github.com/kvm-x86/linux into HEAD
KVM x86 PMU changes for 6.4: - Disallow virtualizing legacy LBRs if architectural LBRs are available, the two are mutually exclusive in hardware - Disallow writes to immutable feature MSRs (notably PERF_CAPABILITIES) after KVM_RUN, and overhaul the vmx_pmu_caps selftest to better validate PERF_CAPABILITIES - Apply PMU filters to emulated events and add test coverage to the pmu_event_filter selftest - Misc cleanups and fixes
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h45
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c252
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c230
3 files changed, 373 insertions, 154 deletions
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 90387ddcb2a9..e1d65d933310 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -928,14 +928,45 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
-static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index,
- uint64_t msr_value)
-{
- int r = _vcpu_set_msr(vcpu, msr_index, msr_value);
-
- TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
-}
+/*
+ * Assert on an MSR access(es) and pretty print the MSR name when possible.
+ * Note, the caller provides the stringified name so that the name of macro is
+ * printed, not the value the macro resolves to (due to macro expansion).
+ */
+#define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \
+do { \
+ if (__builtin_constant_p(msr)) { \
+ TEST_ASSERT(cond, fmt, str, args); \
+ } else if (!(cond)) { \
+ char buf[16]; \
+ \
+ snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \
+ TEST_ASSERT(cond, fmt, buf, args); \
+ } \
+} while (0)
+/*
+ * Returns true if KVM should return the last written value when reading an MSR
+ * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that
+ * is changing, etc. This is NOT an exhaustive list! The intent is to filter
+ * out MSRs that are not durable _and_ that a selftest wants to write.
+ */
+static inline bool is_durable_msr(uint32_t msr)
+{
+ return msr != MSR_IA32_TSC;
+}
+
+#define vcpu_set_msr(vcpu, msr, val) \
+do { \
+ uint64_t r, v = val; \
+ \
+ TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
+ "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
+ if (!is_durable_msr(msr)) \
+ break; \
+ r = vcpu_get_msr(vcpu, msr); \
+ TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\
+} while (0)
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 2feef25ba691..8cec5c8aca8a 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -54,6 +54,21 @@
#define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
+
+/*
+ * "Retired instructions", from Processor Programming Reference
+ * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
+ * Preliminary Processor Programming Reference (PPR) for AMD Family
+ * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
+ * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
+ * B1 Processors Volume 1 of 2.
+ * --- and ---
+ * "Instructions retired", from the Intel SDM, volume 3,
+ * "Pre-defined Architectural Performance Events."
+ */
+
+#define INST_RETIRED EVENT(0xc0, 0)
+
/*
* This event list comprises Intel's eight architectural events plus
* AMD's "retired branch instructions" for Zen[123] (and possibly
@@ -61,7 +76,7 @@
*/
static const uint64_t event_list[] = {
EVENT(0x3c, 0),
- EVENT(0xc0, 0),
+ INST_RETIRED,
EVENT(0x3c, 1),
EVENT(0x2e, 0x4f),
EVENT(0x2e, 0x41),
@@ -71,13 +86,21 @@ static const uint64_t event_list[] = {
AMD_ZEN_BR_RETIRED,
};
+struct {
+ uint64_t loads;
+ uint64_t stores;
+ uint64_t loads_stores;
+ uint64_t branches_retired;
+ uint64_t instructions_retired;
+} pmc_results;
+
/*
* If we encounter a #GP during the guest PMU sanity check, then the guest
* PMU is not functional. Inform the hypervisor via GUEST_SYNC(0).
*/
static void guest_gp_handler(struct ex_regs *regs)
{
- GUEST_SYNC(0);
+ GUEST_SYNC(-EFAULT);
}
/*
@@ -92,12 +115,23 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
wrmsr(msr, v);
if (rdmsr(msr) != v)
- GUEST_SYNC(0);
+ GUEST_SYNC(-EIO);
v ^= bits_to_flip;
wrmsr(msr, v);
if (rdmsr(msr) != v)
- GUEST_SYNC(0);
+ GUEST_SYNC(-EIO);
+}
+
+static void run_and_measure_loop(uint32_t msr_base)
+{
+ const uint64_t branches_retired = rdmsr(msr_base + 0);
+ const uint64_t insn_retired = rdmsr(msr_base + 1);
+
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+
+ pmc_results.branches_retired = rdmsr(msr_base + 0) - branches_retired;
+ pmc_results.instructions_retired = rdmsr(msr_base + 1) - insn_retired;
}
static void intel_guest_code(void)
@@ -105,19 +139,18 @@ static void intel_guest_code(void)
check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
check_msr(MSR_P6_EVNTSEL0, 0xffff);
check_msr(MSR_IA32_PMC0, 0xffff);
- GUEST_SYNC(1);
+ GUEST_SYNC(0);
for (;;) {
- uint64_t br0, br1;
-
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
- br0 = rdmsr(MSR_IA32_PMC0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_IA32_PMC0);
- GUEST_SYNC(br1 - br0);
+ wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
+ ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED);
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
+
+ run_and_measure_loop(MSR_IA32_PMC0);
+ GUEST_SYNC(0);
}
}
@@ -130,18 +163,17 @@ static void amd_guest_code(void)
{
check_msr(MSR_K7_EVNTSEL0, 0xffff);
check_msr(MSR_K7_PERFCTR0, 0xffff);
- GUEST_SYNC(1);
+ GUEST_SYNC(0);
for (;;) {
- uint64_t br0, br1;
-
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
- br0 = rdmsr(MSR_K7_PERFCTR0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_K7_PERFCTR0);
- GUEST_SYNC(br1 - br0);
+ wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
+ ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED);
+
+ run_and_measure_loop(MSR_K7_PERFCTR0);
+ GUEST_SYNC(0);
}
}
@@ -161,6 +193,19 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
return uc.args[1];
}
+static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu)
+{
+ uint64_t r;
+
+ memset(&pmc_results, 0, sizeof(pmc_results));
+ sync_global_to_guest(vcpu->vm, pmc_results);
+
+ r = run_vcpu_to_sync(vcpu);
+ TEST_ASSERT(!r, "Unexpected sync value: 0x%lx", r);
+
+ sync_global_from_guest(vcpu->vm, pmc_results);
+}
+
/*
* In a nested environment or if the vPMU is disabled, the guest PMU
* might not work as architected (accessing the PMU MSRs may raise
@@ -171,13 +216,13 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
*/
static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
{
- bool success;
+ uint64_t r;
vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
- success = run_vcpu_to_sync(vcpu);
+ r = run_vcpu_to_sync(vcpu);
vm_install_exception_handler(vcpu->vm, GP_VECTOR, NULL);
- return success;
+ return !r;
}
static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
@@ -237,91 +282,101 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
return f;
}
+#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
+do { \
+ uint64_t br = pmc_results.branches_retired; \
+ uint64_t ir = pmc_results.instructions_retired; \
+ \
+ if (br && br != NUM_BRANCHES) \
+ pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
+ __func__, br, NUM_BRANCHES); \
+ TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
+ __func__, br); \
+ TEST_ASSERT(ir, "%s: Instructions retired = %lu (expected > 0)", \
+ __func__, ir); \
+} while (0)
+
+#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \
+do { \
+ uint64_t br = pmc_results.branches_retired; \
+ uint64_t ir = pmc_results.instructions_retired; \
+ \
+ TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \
+ __func__, br); \
+ TEST_ASSERT(!ir, "%s: Instructions retired = %lu (expected 0)", \
+ __func__, ir); \
+} while (0)
+
static void test_without_filter(struct kvm_vcpu *vcpu)
{
- uint64_t count = run_vcpu_to_sync(vcpu);
+ run_vcpu_and_sync_pmc_results(vcpu);
- if (count != NUM_BRANCHES)
- pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
- __func__, count, NUM_BRANCHES);
- TEST_ASSERT(count, "Allowed PMU event is not counting");
+ ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
-static uint64_t test_with_filter(struct kvm_vcpu *vcpu,
- struct kvm_pmu_event_filter *f)
+static void test_with_filter(struct kvm_vcpu *vcpu,
+ struct kvm_pmu_event_filter *f)
{
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
- return run_vcpu_to_sync(vcpu);
+ run_vcpu_and_sync_pmc_results(vcpu);
}
static void test_amd_deny_list(struct kvm_vcpu *vcpu)
{
uint64_t event = EVENT(0x1C2, 0);
struct kvm_pmu_event_filter *f;
- uint64_t count;
f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
- count = test_with_filter(vcpu, f);
-
+ test_with_filter(vcpu, f);
free(f);
- if (count != NUM_BRANCHES)
- pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
- __func__, count, NUM_BRANCHES);
- TEST_ASSERT(count, "Allowed PMU event is not counting");
+
+ ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_member_deny_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
- uint64_t count = test_with_filter(vcpu, f);
+ test_with_filter(vcpu, f);
free(f);
- if (count)
- pr_info("%s: Branch instructions retired = %lu (expected 0)\n",
- __func__, count);
- TEST_ASSERT(!count, "Disallowed PMU Event is counting");
+
+ ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
static void test_member_allow_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
- uint64_t count = test_with_filter(vcpu, f);
+ test_with_filter(vcpu, f);
free(f);
- if (count != NUM_BRANCHES)
- pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
- __func__, count, NUM_BRANCHES);
- TEST_ASSERT(count, "Allowed PMU event is not counting");
+
+ ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
- uint64_t count;
+ remove_event(f, INST_RETIRED);
remove_event(f, INTEL_BR_RETIRED);
remove_event(f, AMD_ZEN_BR_RETIRED);
- count = test_with_filter(vcpu, f);
+ test_with_filter(vcpu, f);
free(f);
- if (count != NUM_BRANCHES)
- pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
- __func__, count, NUM_BRANCHES);
- TEST_ASSERT(count, "Allowed PMU event is not counting");
+
+ ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
- uint64_t count;
+ remove_event(f, INST_RETIRED);
remove_event(f, INTEL_BR_RETIRED);
remove_event(f, AMD_ZEN_BR_RETIRED);
- count = test_with_filter(vcpu, f);
+ test_with_filter(vcpu, f);
free(f);
- if (count)
- pr_info("%s: Branch instructions retired = %lu (expected 0)\n",
- __func__, count);
- TEST_ASSERT(!count, "Disallowed PMU Event is counting");
+
+ ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
/*
@@ -450,51 +505,30 @@ static bool supports_event_mem_inst_retired(void)
#define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true)
-struct perf_counter {
- union {
- uint64_t raw;
- struct {
- uint64_t loads:22;
- uint64_t stores:22;
- uint64_t loads_stores:20;
- };
- };
-};
-
-static uint64_t masked_events_guest_test(uint32_t msr_base)
+static void masked_events_guest_test(uint32_t msr_base)
{
- uint64_t ld0, ld1, st0, st1, ls0, ls1;
- struct perf_counter c;
- int val;
-
/*
- * The acutal value of the counters don't determine the outcome of
+ * The actual value of the counters don't determine the outcome of
* the test. Only that they are zero or non-zero.
*/
- ld0 = rdmsr(msr_base + 0);
- st0 = rdmsr(msr_base + 1);
- ls0 = rdmsr(msr_base + 2);
+ const uint64_t loads = rdmsr(msr_base + 0);
+ const uint64_t stores = rdmsr(msr_base + 1);
+ const uint64_t loads_stores = rdmsr(msr_base + 2);
+ int val;
+
__asm__ __volatile__("movl $0, %[v];"
"movl %[v], %%eax;"
"incl %[v];"
: [v]"+m"(val) :: "eax");
- ld1 = rdmsr(msr_base + 0);
- st1 = rdmsr(msr_base + 1);
- ls1 = rdmsr(msr_base + 2);
-
- c.loads = ld1 - ld0;
- c.stores = st1 - st0;
- c.loads_stores = ls1 - ls0;
-
- return c.raw;
+ pmc_results.loads = rdmsr(msr_base + 0) - loads;
+ pmc_results.stores = rdmsr(msr_base + 1) - stores;
+ pmc_results.loads_stores = rdmsr(msr_base + 2) - loads_stores;
}
static void intel_masked_events_guest_code(void)
{
- uint64_t r;
-
for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -507,16 +541,13 @@ static void intel_masked_events_guest_code(void)
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7);
- r = masked_events_guest_test(MSR_IA32_PMC0);
-
- GUEST_SYNC(r);
+ masked_events_guest_test(MSR_IA32_PMC0);
+ GUEST_SYNC(0);
}
}
static void amd_masked_events_guest_code(void)
{
- uint64_t r;
-
for (;;) {
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL1, 0);
@@ -529,26 +560,22 @@ static void amd_masked_events_guest_code(void)
wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE);
- r = masked_events_guest_test(MSR_K7_PERFCTR0);
-
- GUEST_SYNC(r);
+ masked_events_guest_test(MSR_K7_PERFCTR0);
+ GUEST_SYNC(0);
}
}
-static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu,
- const uint64_t masked_events[],
- const int nmasked_events)
+static void run_masked_events_test(struct kvm_vcpu *vcpu,
+ const uint64_t masked_events[],
+ const int nmasked_events)
{
struct kvm_pmu_event_filter *f;
- struct perf_counter r;
f = create_pmu_event_filter(masked_events, nmasked_events,
KVM_PMU_EVENT_ALLOW,
KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
- r.raw = test_with_filter(vcpu, f);
+ test_with_filter(vcpu, f);
free(f);
-
- return r;
}
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
@@ -673,7 +700,6 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
int nevents)
{
int ntests = ARRAY_SIZE(test_cases);
- struct perf_counter c;
int i, n;
for (i = 0; i < ntests; i++) {
@@ -685,13 +711,15 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
n = append_test_events(test, events, nevents);
- c = run_masked_events_test(vcpu, events, n);
- TEST_ASSERT(bool_eq(c.loads, test->flags & ALLOW_LOADS) &&
- bool_eq(c.stores, test->flags & ALLOW_STORES) &&
- bool_eq(c.loads_stores,
+ run_masked_events_test(vcpu, events, n);
+
+ TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) &&
+ bool_eq(pmc_results.stores, test->flags & ALLOW_STORES) &&
+ bool_eq(pmc_results.loads_stores,
test->flags & ALLOW_LOADS_STORES),
- "%s loads: %u, stores: %u, loads + stores: %u",
- test->msg, c.loads, c.stores, c.loads_stores);
+ "%s loads: %lu, stores: %lu, loads + stores: %lu",
+ test->msg, pmc_results.loads, pmc_results.stores,
+ pmc_results.loads_stores);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
index c280ba1e6572..3009b3e5254d 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
@@ -14,12 +14,11 @@
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <sys/ioctl.h>
+#include <linux/bitmap.h>
+
#include "kvm_util.h"
#include "vmx.h"
-#define PMU_CAP_FW_WRITES (1ULL << 13)
-#define PMU_CAP_LBR_FMT 0x3f
-
union perf_capabilities {
struct {
u64 lbr_format:6;
@@ -36,59 +35,220 @@ union perf_capabilities {
u64 capabilities;
};
-static void guest_code(void)
+/*
+ * The LBR format and most PEBS features are immutable, all other features are
+ * fungible (if supported by the host and KVM).
+ */
+static const union perf_capabilities immutable_caps = {
+ .lbr_format = -1,
+ .pebs_trap = 1,
+ .pebs_arch_reg = 1,
+ .pebs_format = -1,
+ .pebs_baseline = 1,
+};
+
+static const union perf_capabilities format_caps = {
+ .lbr_format = -1,
+ .pebs_format = -1,
+};
+
+static void guest_code(uint64_t current_val)
{
- wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
+ uint8_t vector;
+ int i;
+
+ vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, current_val);
+ GUEST_ASSERT_2(vector == GP_VECTOR, current_val, vector);
+
+ vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, 0);
+ GUEST_ASSERT_2(vector == GP_VECTOR, 0, vector);
+
+ for (i = 0; i < 64; i++) {
+ vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES,
+ current_val ^ BIT_ULL(i));
+ GUEST_ASSERT_2(vector == GP_VECTOR,
+ current_val ^ BIT_ULL(i), vector);
+ }
+
+ GUEST_DONE();
}
-int main(int argc, char *argv[])
+/*
+ * Verify that guest WRMSRs to PERF_CAPABILITIES #GP regardless of the value
+ * written, that the guest always sees the userspace controlled value, and that
+ * PERF_CAPABILITIES is immutable after KVM_RUN.
+ */
+static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
{
- struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
- int ret;
- union perf_capabilities host_cap;
- uint64_t val;
+ struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ struct ucall uc;
+ int r, i;
- host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
- host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT);
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
- /* Create VM */
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
+ vcpu_args_set(vcpu, 1, host_cap.capabilities);
+ vcpu_run(vcpu);
- TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
- TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT_2(uc, "val = 0x%lx, vector = %lu");
+ break;
+ case UCALL_DONE:
+ break;
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
- /* testcase 1, set capabilities when we have PDCM bit */
- vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities);
- /* check capabilities can be retrieved with KVM_GET_MSR */
- ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
- /* check whatever we write with KVM_SET_MSR is _not_ modified */
- vcpu_run(vcpu);
- ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+ r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
+ TEST_ASSERT(!r, "Post-KVM_RUN write '0' didn't fail");
+
+ for (i = 0; i < 64; i++) {
+ r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES,
+ host_cap.capabilities ^ BIT_ULL(i));
+ TEST_ASSERT(!r, "Post-KVM_RUN write '0x%llx'didn't fail",
+ host_cap.capabilities ^ BIT_ULL(i));
+ }
+
+ kvm_vm_free(vm);
+}
+
+/*
+ * Verify KVM allows writing PERF_CAPABILITIES with all KVM-supported features
+ * enabled, as well as '0' (to disable all features).
+ */
+static void test_basic_perf_capabilities(union perf_capabilities host_cap)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
- /* testcase 2, check valid LBR formats are accepted */
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
- ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0);
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
+
+ kvm_vm_free(vm);
+}
- vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
- ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
+static void test_fungible_perf_capabilities(union perf_capabilities host_cap)
+{
+ const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities;
+
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
+ int bit;
+
+ for_each_set_bit(bit, &fungible_caps, 64) {
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, BIT_ULL(bit));
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES,
+ host_cap.capabilities & ~BIT_ULL(bit));
+ }
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
+
+ kvm_vm_free(vm);
+}
+
+/*
+ * Verify KVM rejects attempts to set unsupported and/or immutable features in
+ * PERF_CAPABILITIES. Note, LBR format and PEBS format need to be validated
+ * separately as they are multi-bit values, e.g. toggling or setting a single
+ * bit can generate a false positive without dedicated safeguards.
+ */
+static void test_immutable_perf_capabilities(union perf_capabilities host_cap)
+{
+ const uint64_t reserved_caps = (~host_cap.capabilities |
+ immutable_caps.capabilities) &
+ ~format_caps.capabilities;
+
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
+ union perf_capabilities val = host_cap;
+ int r, bit;
+
+ for_each_set_bit(bit, &reserved_caps, 64) {
+ r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES,
+ host_cap.capabilities ^ BIT_ULL(bit));
+ TEST_ASSERT(!r, "%s immutable feature 0x%llx (bit %d) didn't fail",
+ host_cap.capabilities & BIT_ULL(bit) ? "Setting" : "Clearing",
+ BIT_ULL(bit), bit);
+ }
/*
- * Testcase 3, check that an "invalid" LBR format is rejected. Only an
- * exact match of the host's format (and 0/disabled) is allowed.
+ * KVM only supports the host's native LBR format, as well as '0' (to
+ * disable LBR support). Verify KVM rejects all other LBR formats.
*/
- for (val = 1; val <= PMU_CAP_LBR_FMT; val++) {
- if (val == (host_cap.capabilities & PMU_CAP_LBR_FMT))
+ for (val.lbr_format = 1; val.lbr_format; val.lbr_format++) {
+ if (val.lbr_format == host_cap.lbr_format)
continue;
- ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val);
- TEST_ASSERT(!ret, "Bad LBR FMT = 0x%lx didn't fail", val);
+ r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities);
+ TEST_ASSERT(!r, "Bad LBR FMT = 0x%x didn't fail, host = 0x%x",
+ val.lbr_format, host_cap.lbr_format);
}
- printf("Completed perf capability tests.\n");
+ /* Ditto for the PEBS format. */
+ for (val.pebs_format = 1; val.pebs_format; val.pebs_format++) {
+ if (val.pebs_format == host_cap.pebs_format)
+ continue;
+
+ r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities);
+ TEST_ASSERT(!r, "Bad PEBS FMT = 0x%x didn't fail, host = 0x%x",
+ val.pebs_format, host_cap.pebs_format);
+ }
+
+ kvm_vm_free(vm);
+}
+
+/*
+ * Test that LBR MSRs are writable when LBRs are enabled, and then verify that
+ * disabling the vPMU via CPUID also disables LBR support. Set bits 2:0 of
+ * LBR_TOS as those bits are writable across all uarch implementations (arch
+ * LBRs will need to poke a different MSR).
+ */
+static void test_lbr_perf_capabilities(union perf_capabilities host_cap)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int r;
+
+ if (!host_cap.lbr_format)
+ return;
+
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
+ vcpu_set_msr(vcpu, MSR_LBR_TOS, 7);
+
+ vcpu_clear_cpuid_entry(vcpu, X86_PROPERTY_PMU_VERSION.function);
+
+ r = _vcpu_set_msr(vcpu, MSR_LBR_TOS, 7);
+ TEST_ASSERT(!r, "Writing LBR_TOS should fail after disabling vPMU");
+
kvm_vm_free(vm);
}
+
+int main(int argc, char *argv[])
+{
+ union perf_capabilities host_cap;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
+
+ TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
+ TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
+
+ host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
+
+ TEST_ASSERT(host_cap.full_width_write,
+ "Full-width writes should always be supported");
+
+ test_basic_perf_capabilities(host_cap);
+ test_fungible_perf_capabilities(host_cap);
+ test_immutable_perf_capabilities(host_cap);
+ test_guest_wrmsr_perf_capabilities(host_cap);
+ test_lbr_perf_capabilities(host_cap);
+}