diff options
Diffstat (limited to 'arch/x86/platform/pvh/head.S')
-rw-r--r-- | arch/x86/platform/pvh/head.S | 50 |
1 files changed, 28 insertions, 22 deletions
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S index 64fca49cd88f..4733a5f467b8 100644 --- a/arch/x86/platform/pvh/head.S +++ b/arch/x86/platform/pvh/head.S @@ -6,7 +6,9 @@ .code32 .text +#ifdef CONFIG_X86_32 #define _pa(x) ((x) - __START_KERNEL_map) +#endif #define rva(x) ((x) - pvh_start_xen) #include <linux/elfnote.h> @@ -52,7 +54,7 @@ #define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8) #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) -SYM_CODE_START_LOCAL(pvh_start_xen) +SYM_CODE_START(pvh_start_xen) UNWIND_HINT_END_OF_STACK cld @@ -72,8 +74,7 @@ SYM_CODE_START_LOCAL(pvh_start_xen) movl $0, %esp leal rva(gdt)(%ebp), %eax - leal rva(gdt_start)(%ebp), %ecx - movl %ecx, 2(%eax) + addl %eax, 2(%eax) lgdt (%eax) mov $PVH_DS_SEL,%eax @@ -103,10 +104,23 @@ SYM_CODE_START_LOCAL(pvh_start_xen) btsl $_EFER_LME, %eax wrmsr + /* + * Reuse the non-relocatable symbol emitted for the ELF note to + * subtract the build time physical address of pvh_start_xen() from + * its actual runtime address, without relying on absolute 32-bit ELF + * relocations, as these are not supported by the linker when running + * in -pie mode, and should be avoided in .head.text in general. + */ mov %ebp, %ebx - subl $_pa(pvh_start_xen), %ebx /* offset */ + subl rva(xen_elfnote_phys32_entry)(%ebp), %ebx jz .Lpagetable_done + /* + * Store the resulting load offset in phys_base. __pa() needs + * phys_base set to calculate the hypercall page in xen_pvh_init(). + */ + movl %ebx, rva(phys_base)(%ebp) + /* Fixup page-tables for relocation. */ leal rva(pvh_init_top_pgt)(%ebp), %edi movl $PTRS_PER_PGD, %ecx @@ -165,20 +179,12 @@ SYM_CODE_START_LOCAL(pvh_start_xen) xor %edx, %edx wrmsr - /* - * Calculate load offset and store in phys_base. __pa() needs - * phys_base set to calculate the hypercall page in xen_pvh_init(). - */ - movq %rbp, %rbx - subq $_pa(pvh_start_xen), %rbx - movq %rbx, phys_base(%rip) - call xen_prepare_pvh - /* - * Clear phys_base. __startup_64 will *add* to its value, - * so reset to 0. - */ - xor %rbx, %rbx - movq %rbx, phys_base(%rip) + /* Call xen_prepare_pvh() via the kernel virtual mapping */ + leaq xen_prepare_pvh(%rip), %rax + subq phys_base(%rip), %rax + addq $__START_KERNEL_map, %rax + ANNOTATE_RETPOLINE_SAFE + call *%rax /* startup_64 expects boot_params in %rsi. */ lea pvh_bootparams(%rip), %rsi @@ -217,8 +223,8 @@ SYM_CODE_END(pvh_start_xen) .section ".init.data","aw" .balign 8 SYM_DATA_START_LOCAL(gdt) - .word gdt_end - gdt_start - .long _pa(gdt_start) /* x86-64 will overwrite if relocated. */ + .word gdt_end - gdt_start - 1 + .long gdt_start - gdt .word 0 SYM_DATA_END(gdt) SYM_DATA_START_LOCAL(gdt_start) @@ -300,5 +306,5 @@ SYM_DATA_END(pvh_level2_kernel_pgt) .long KERNEL_IMAGE_SIZE - 1) #endif - ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, - _ASM_PTR (pvh_start_xen - __START_KERNEL_map)) + ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, .global xen_elfnote_phys32_entry; + xen_elfnote_phys32_entry: _ASM_PTR xen_elfnote_phys32_entry_value - .) |