diff options
Diffstat (limited to 'arch/x86/coco/sev/core.c')
-rw-r--r-- | arch/x86/coco/sev/core.c | 269 |
1 files changed, 166 insertions, 103 deletions
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index de1df0cb45da..c5b0148b8c0a 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -92,6 +92,9 @@ static struct ghcb *boot_ghcb __section(".data"); /* Bitmap of SEV features supported by the hypervisor */ static u64 sev_hv_features __ro_after_init; +/* Secrets page physical address from the CC blob */ +static u64 secrets_pa __ro_after_init; + /* #VC handler runtime per-CPU data */ struct sev_es_runtime_data { struct ghcb ghcb_page; @@ -141,33 +144,6 @@ static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); static DEFINE_PER_CPU(u64, svsm_caa_pa); -struct sev_config { - __u64 debug : 1, - - /* - * Indicates when the per-CPU GHCB has been created and registered - * and thus can be used by the BSP instead of the early boot GHCB. - * - * For APs, the per-CPU GHCB is created before they are started - * and registered upon startup, so this flag can be used globally - * for the BSP and APs. - */ - ghcbs_initialized : 1, - - /* - * Indicates when the per-CPU SVSM CA is to be used instead of the - * boot SVSM CA. - * - * For APs, the per-CPU SVSM CA is created as part of the AP - * bringup, so this flag can be used globally for the BSP and APs. - */ - use_cas : 1, - - __reserved : 61; -}; - -static struct sev_config sev_cfg __read_mostly; - static __always_inline bool on_vc_stack(struct pt_regs *regs) { unsigned long sp = regs->sp; @@ -722,45 +698,13 @@ void noinstr __sev_es_nmi_complete(void) __sev_put_ghcb(&state); } -static u64 __init get_secrets_page(void) -{ - u64 pa_data = boot_params.cc_blob_address; - struct cc_blob_sev_info info; - void *map; - - /* - * The CC blob contains the address of the secrets page, check if the - * blob is present. - */ - if (!pa_data) - return 0; - - map = early_memremap(pa_data, sizeof(info)); - if (!map) { - pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n"); - return 0; - } - memcpy(&info, map, sizeof(info)); - early_memunmap(map, sizeof(info)); - - /* smoke-test the secrets page passed */ - if (!info.secrets_phys || info.secrets_len != PAGE_SIZE) - return 0; - - return info.secrets_phys; -} - static u64 __init get_snp_jump_table_addr(void) { struct snp_secrets_page *secrets; void __iomem *mem; - u64 pa, addr; + u64 addr; - pa = get_secrets_page(); - if (!pa) - return 0; - - mem = ioremap_encrypted(pa, PAGE_SIZE); + mem = ioremap_encrypted(secrets_pa, PAGE_SIZE); if (!mem) { pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n"); return 0; @@ -1010,6 +954,137 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end) set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); } +static void set_pte_enc(pte_t *kpte, int level, void *va) +{ + struct pte_enc_desc d = { + .kpte = kpte, + .pte_level = level, + .va = va, + .encrypt = true + }; + + prepare_pte_enc(&d); + set_pte_enc_mask(kpte, d.pfn, d.new_pgprot); +} + +static void unshare_all_memory(void) +{ + unsigned long addr, end, size, ghcb; + struct sev_es_runtime_data *data; + unsigned int npages, level; + bool skipped_addr; + pte_t *pte; + int cpu; + + /* Unshare the direct mapping. */ + addr = PAGE_OFFSET; + end = PAGE_OFFSET + get_max_mapped(); + + while (addr < end) { + pte = lookup_address(addr, &level); + size = page_level_size(level); + npages = size / PAGE_SIZE; + skipped_addr = false; + + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) { + addr += size; + continue; + } + + /* + * Ensure that all the per-CPU GHCBs are made private at the + * end of the unsharing loop so that the switch to the slower + * MSR protocol happens last. + */ + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + ghcb = (unsigned long)&data->ghcb_page; + + if (addr <= ghcb && ghcb <= addr + size) { + skipped_addr = true; + break; + } + } + + if (!skipped_addr) { + set_pte_enc(pte, level, (void *)addr); + snp_set_memory_private(addr, npages); + } + addr += size; + } + + /* Unshare all bss decrypted memory. */ + addr = (unsigned long)__start_bss_decrypted; + end = (unsigned long)__start_bss_decrypted_unused; + npages = (end - addr) >> PAGE_SHIFT; + + for (; addr < end; addr += PAGE_SIZE) { + pte = lookup_address(addr, &level); + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) + continue; + + set_pte_enc(pte, level, (void *)addr); + } + addr = (unsigned long)__start_bss_decrypted; + snp_set_memory_private(addr, npages); + + __flush_tlb_all(); +} + +/* Stop new private<->shared conversions */ +void snp_kexec_begin(void) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + /* + * Crash kernel ends up here with interrupts disabled: can't wait for + * conversions to finish. + * + * If race happened, just report and proceed. + */ + if (!set_memory_enc_stop_conversion()) + pr_warn("Failed to stop shared<->private conversions\n"); +} + +void snp_kexec_finish(void) +{ + struct sev_es_runtime_data *data; + unsigned int level, cpu; + unsigned long size; + struct ghcb *ghcb; + pte_t *pte; + + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + unshare_all_memory(); + + /* + * Switch to using the MSR protocol to change per-CPU GHCBs to + * private. All the per-CPU GHCBs have been switched back to private, + * so can't do any more GHCB calls to the hypervisor beyond this point + * until the kexec'ed kernel starts running. + */ + boot_ghcb = NULL; + sev_cfg.ghcbs_initialized = false; + + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + ghcb = &data->ghcb_page; + pte = lookup_address((unsigned long)ghcb, &level); + size = page_level_size(level); + set_pte_enc(pte, level, (void *)ghcb); + snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE)); + } +} + static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa) { int ret; @@ -1331,35 +1406,39 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd) return 0; } +/* Writes to the SVSM CAA MSR are ignored */ +static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write) +{ + if (write) + return ES_OK; + + regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa)); + regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa)); + + return ES_OK; +} + static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { struct pt_regs *regs = ctxt->regs; enum es_result ret; - u64 exit_info_1; + bool write; /* Is it a WRMSR? */ - exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0; + write = ctxt->insn.opcode.bytes[1] == 0x30; - if (regs->cx == MSR_SVSM_CAA) { - /* Writes to the SVSM CAA msr are ignored */ - if (exit_info_1) - return ES_OK; - - regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa)); - regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa)); - - return ES_OK; - } + if (regs->cx == MSR_SVSM_CAA) + return __vc_handle_msr_caa(regs, write); ghcb_set_rcx(ghcb, regs->cx); - if (exit_info_1) { + if (write) { ghcb_set_rax(ghcb, regs->ax); ghcb_set_rdx(ghcb, regs->dx); } - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0); + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0); - if ((ret == ES_OK) && (!exit_info_1)) { + if ((ret == ES_OK) && !write) { regs->ax = ghcb->save.rax; regs->dx = ghcb->save.rdx; } @@ -2300,6 +2379,11 @@ bool __head snp_init(struct boot_params *bp) if (!cc_info) return false; + if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE) + secrets_pa = cc_info->secrets_phys; + else + return false; + setup_cpuid_table(cc_info); svsm_setup(cc_info); @@ -2374,23 +2458,6 @@ static int __init report_snp_info(void) } arch_initcall(report_snp_info); -static int __init init_sev_config(char *str) -{ - char *s; - - while ((s = strsep(&str, ","))) { - if (!strcmp(s, "debug")) { - sev_cfg.debug = true; - continue; - } - - pr_info("SEV command-line option '%s' was not recognized\n", s); - } - - return 1; -} -__setup("sev=", init_sev_config); - static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input) { /* If (new) lengths have been returned, propagate them up */ @@ -2441,7 +2508,8 @@ int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, } EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req); -int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio) +int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input, + struct snp_guest_request_ioctl *rio) { struct ghcb_state state; struct es_em_ctxt ctxt; @@ -2465,12 +2533,12 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn vc_ghcb_invalidate(ghcb); - if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { + if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { ghcb_set_rax(ghcb, input->data_gpa); ghcb_set_rbx(ghcb, input->data_npages); } - ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa); + ret = sev_es_ghcb_hv_call(ghcb, &ctxt, req->exit_code, input->req_gpa, input->resp_gpa); if (ret) goto e_put; @@ -2485,7 +2553,7 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN): /* Number of expected pages are returned in RBX */ - if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { + if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { input->data_npages = ghcb_get_rbx(ghcb); ret = -ENOSPC; break; @@ -2513,16 +2581,11 @@ static struct platform_device sev_guest_device = { static int __init snp_init_platform_device(void) { struct sev_guest_platform_data data; - u64 gpa; if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return -ENODEV; - gpa = get_secrets_page(); - if (!gpa) - return -ENODEV; - - data.secrets_gpa = gpa; + data.secrets_gpa = secrets_pa; if (platform_device_add_data(&sev_guest_device, &data, sizeof(data))) return -ENODEV; |