diff options
author | Brijesh Singh <brijesh.singh@amd.com> | 2024-01-25 22:11:07 -0600 |
---|---|---|
committer | Borislav Petkov (AMD) <bp@alien8.de> | 2024-01-29 17:26:30 +0100 |
commit | 1f568d36361b4891696280b719ca4b142db872ba (patch) | |
tree | 6642d7119bd4c48bf12f17febec63fef2765671d /arch/x86/virt | |
parent | 94b36bc244bb134ec616dd3f2d37343cd8c1be54 (diff) |
x86/fault: Add helper for dumping RMP entries
This information will be useful for debugging things like page faults
due to RMP access violations and RMPUPDATE failures.
[ mdr: move helper to standalone patch, rework dump logic as suggested
by Boris. ]
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240126041126.1927228-8-michael.roth@amd.com
Diffstat (limited to 'arch/x86/virt')
-rw-r--r-- | arch/x86/virt/svm/sev.c | 99 |
1 files changed, 89 insertions, 10 deletions
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index 7669b2ff0ec7..c74266e039b2 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -35,16 +35,21 @@ * Family 19h Model 01h, Rev B1 processor. */ struct rmpentry { - u64 assigned : 1, - pagesize : 1, - immutable : 1, - rsvd1 : 9, - gpa : 39, - asid : 10, - vmsa : 1, - validated : 1, - rsvd2 : 1; - u64 rsvd3; + union { + struct { + u64 assigned : 1, + pagesize : 1, + immutable : 1, + rsvd1 : 9, + gpa : 39, + asid : 10, + vmsa : 1, + validated : 1, + rsvd2 : 1; + }; + u64 lo; + }; + u64 hi; } __packed; /* @@ -263,3 +268,77 @@ int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) return 0; } EXPORT_SYMBOL_GPL(snp_lookup_rmpentry); + +/* + * Dump the raw RMP entry for a particular PFN. These bits are documented in the + * PPR for a particular CPU model and provide useful information about how a + * particular PFN is being utilized by the kernel/firmware at the time certain + * unexpected events occur, such as RMP faults. + */ +static void dump_rmpentry(u64 pfn) +{ + u64 pfn_i, pfn_end; + struct rmpentry *e; + int level; + + e = __snp_lookup_rmpentry(pfn, &level); + if (IS_ERR(e)) { + pr_err("Failed to read RMP entry for PFN 0x%llx, error %ld\n", + pfn, PTR_ERR(e)); + return; + } + + if (e->assigned) { + pr_info("PFN 0x%llx, RMP entry: [0x%016llx - 0x%016llx]\n", + pfn, e->lo, e->hi); + return; + } + + /* + * If the RMP entry for a particular PFN is not in an assigned state, + * then it is sometimes useful to get an idea of whether or not any RMP + * entries for other PFNs within the same 2MB region are assigned, since + * those too can affect the ability to access a particular PFN in + * certain situations, such as when the PFN is being accessed via a 2MB + * mapping in the host page table. + */ + pfn_i = ALIGN_DOWN(pfn, PTRS_PER_PMD); + pfn_end = pfn_i + PTRS_PER_PMD; + + pr_info("PFN 0x%llx unassigned, dumping non-zero entries in 2M PFN region: [0x%llx - 0x%llx]\n", + pfn, pfn_i, pfn_end); + + while (pfn_i < pfn_end) { + e = __snp_lookup_rmpentry(pfn_i, &level); + if (IS_ERR(e)) { + pr_err("Error %ld reading RMP entry for PFN 0x%llx\n", + PTR_ERR(e), pfn_i); + pfn_i++; + continue; + } + + if (e->lo || e->hi) + pr_info("PFN: 0x%llx, [0x%016llx - 0x%016llx]\n", pfn_i, e->lo, e->hi); + pfn_i++; + } +} + +void snp_dump_hva_rmpentry(unsigned long hva) +{ + unsigned long paddr; + unsigned int level; + pgd_t *pgd; + pte_t *pte; + + pgd = __va(read_cr3_pa()); + pgd += pgd_index(hva); + pte = lookup_address_in_pgd(pgd, hva, &level); + + if (!pte) { + pr_err("Can't dump RMP entry for HVA %lx: no PTE/PFN found\n", hva); + return; + } + + paddr = PFN_PHYS(pte_pfn(*pte)) | (hva & ~page_level_mask(level)); + dump_rmpentry(PHYS_PFN(paddr)); +} |