From 480e5926ce3bb61ec229be2dab08bdce8abb8d2e Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 20 Sep 2011 17:07:28 +0200 Subject: [S390] kvm: fix address mode switching 598841ca9919d008b520114d8a4378c4ce4e40a1 ([S390] use gmap address spaces for kvm guest images) changed kvm to use a separate address space for kvm guests. This address space was switched in __vcpu_run In some cases (preemption, page fault) there is the possibility that this address space switch is lost. The typical symptom was a huge amount of validity intercepts or random guest addressing exceptions. Fix this by doing the switch in sie_loop and sie_exit and saving the address space in the gmap structure itself. Also use the preempt notifier. Signed-off-by: Christian Borntraeger Acked-by: Avi Kivity Signed-off-by: Heiko Carstens --- arch/s390/include/asm/pgtable.h | 2 ++ arch/s390/kernel/asm-offsets.c | 3 +++ arch/s390/kernel/entry64.S | 6 ++++++ arch/s390/kvm/kvm-s390.c | 4 ++-- arch/s390/mm/pgtable.c | 10 ++-------- 5 files changed, 15 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 519eb5f187ef..c0cb794bb365 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) * struct gmap_struct - guest address space * @mm: pointer to the parent mm_struct * @table: pointer to the page directory + * @asce: address space control element for gmap page table * @crst_list: list of all crst tables used in the guest address space */ struct gmap { struct list_head list; struct mm_struct *mm; unsigned long *table; + unsigned long asce; struct list_head crst_list; }; diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 532fd4322156..2b45591e1582 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -10,6 +10,7 @@ #include #include #include +#include /* * Make sure that the compiler is new enough. We want a compiler that @@ -126,6 +127,7 @@ int main(void) DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); + DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); @@ -151,6 +153,7 @@ int main(void) DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); + DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); #endif /* CONFIG_32BIT */ return 0; } diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 5f729d627cef..713da0760538 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -1076,6 +1076,11 @@ sie_loop: lg %r14,__LC_THREAD_INFO # pointer thread_info struct tm __TI_flags+7(%r14),_TIF_EXIT_SIE jnz sie_exit + lg %r14,__LC_GMAP # get gmap pointer + ltgr %r14,%r14 + jz sie_gmap + lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce +sie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer SPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) @@ -1083,6 +1088,7 @@ sie_done: SPP __LC_CMF_HPP # set host id lg %r14,__LC_THREAD_INFO # pointer thread_info struct sie_exit: + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f17296e4fc89..b4eced131e5c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -263,10 +263,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); restore_access_regs(vcpu->arch.guest_acrs); + gmap_enable(vcpu->arch.gmap); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + gmap_disable(vcpu->arch.gmap); save_fp_regs(&vcpu->arch.guest_fpregs); save_access_regs(vcpu->arch.guest_acrs); restore_fp_regs(&vcpu->arch.host_fpregs); @@ -461,7 +463,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) local_irq_disable(); kvm_guest_enter(); local_irq_enable(); - gmap_enable(vcpu->arch.gmap); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { @@ -470,7 +471,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); - gmap_disable(vcpu->arch.gmap); local_irq_disable(); kvm_guest_exit(); local_irq_enable(); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4d1f2bce87b3..f69ff3c13496 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm) table = (unsigned long *) page_to_phys(page); crst_table_init(table, _REGION1_ENTRY_EMPTY); gmap->table = table; + gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | __pa(table); list_add(&gmap->list, &mm->context.gmap_list); return gmap; @@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free); */ void gmap_enable(struct gmap *gmap) { - /* Load primary space page table origin. */ - S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | - _ASCE_USER_BITS | __pa(gmap->table); - asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); S390_lowcore.gmap = (unsigned long) gmap; } EXPORT_SYMBOL_GPL(gmap_enable); @@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable); */ void gmap_disable(struct gmap *gmap) { - /* Load primary space page table origin. */ - S390_lowcore.user_asce = - gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); - asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); S390_lowcore.gmap = 0UL; } EXPORT_SYMBOL_GPL(gmap_disable); -- cgit v1.2.3-70-g09d2 From b6cf8788a3382c2000743a0e393bcc8aeb0601cb Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 20 Sep 2011 17:07:29 +0200 Subject: [S390] kvm: extension capability for new address space layout 598841ca9919d008b520114d8a4378c4ce4e40a1 ([S390] use gmap address spaces for kvm guest images) changed kvm on s390 to use a separate address space for kvm guests. We can now put KVM guests anywhere in the user address mode with a size up to 8PB - as long as the memory is 1MB-aligned. This change was done without KVM extension capability bit. The change was added after 3.0, but we still have a chance to add a feature bit before 3.1 (keeping the releases in a sane state). We use number 71 to avoid collisions with other pending kvm patches as requested by Alexander Graf. Signed-off-by: Christian Borntraeger Acked-by: Avi Kivity Cc: Alexander Graf Signed-off-by: Heiko Carstens --- arch/s390/kvm/kvm-s390.c | 1 + include/linux/kvm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b4eced131e5c..dc2b580e27bc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -123,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext) switch (ext) { case KVM_CAP_S390_PSW: + case KVM_CAP_S390_GMAP: r = 1; break; default: diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 2c366b52f505..aace6b8691a2 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -553,6 +553,7 @@ struct kvm_ppc_pvinfo { #define KVM_CAP_SPAPR_TCE 63 #define KVM_CAP_PPC_SMT 64 #define KVM_CAP_PPC_RMA 65 +#define KVM_CAP_S390_GMAP 71 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-70-g09d2