diff options
-rw-r--r-- | arch/riscv/include/asm/kvm_vcpu_timer.h | 7 | ||||
-rw-r--r-- | arch/riscv/include/uapi/asm/kvm.h | 1 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 8 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_timer.c | 144 |
4 files changed, 153 insertions, 7 deletions
diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h index 50138e2eb91b..0d8fdb8ec63a 100644 --- a/arch/riscv/include/asm/kvm_vcpu_timer.h +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h @@ -28,6 +28,11 @@ struct kvm_vcpu_timer { u64 next_cycles; /* Underlying hrtimer instance */ struct hrtimer hrt; + + /* Flag to check if sstc is enabled or not */ + bool sstc_enabled; + /* A function pointer to switch between stimecmp or hrtimer at runtime */ + int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles); }; int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles); @@ -40,5 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); void kvm_riscv_guest_timer_init(struct kvm *kvm); +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu); #endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 24b2a6e27698..7351417afd62 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -97,6 +97,7 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_I, KVM_RISCV_ISA_EXT_M, KVM_RISCV_ISA_EXT_SVPBMT, + KVM_RISCV_ISA_EXT_SSTC, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 5d271b597613..d0f08d5b4282 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -52,6 +52,7 @@ static const unsigned long kvm_isa_ext_arr[] = { RISCV_ISA_EXT_i, RISCV_ISA_EXT_m, RISCV_ISA_EXT_SVPBMT, + RISCV_ISA_EXT_SSTC, }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -85,6 +86,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_C: case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: + case KVM_RISCV_ISA_EXT_SSTC: return false; default: break; @@ -203,7 +205,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER); + return kvm_riscv_vcpu_timer_pending(vcpu); } void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) @@ -785,6 +787,8 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa) if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT)) henvcfg |= ENVCFG_PBMTE; + if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC)) + henvcfg |= ENVCFG_STCE; csr_write(CSR_HENVCFG, henvcfg); #ifdef CONFIG_32BIT csr_write(CSR_HENVCFGH, henvcfg >> 32); @@ -828,6 +832,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->arch.isa); kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); + kvm_riscv_vcpu_timer_save(vcpu); + csr->vsstatus = csr_read(CSR_VSSTATUS); csr->vsie = csr_read(CSR_VSIE); csr->vstvec = csr_read(CSR_VSTVEC); diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 595043857049..16f50c46ba39 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) return 0; } -int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles) +{ +#if defined(CONFIG_32BIT) + csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF); + csr_write(CSR_VSTIMECMPH, ncycles >> 32); +#else + csr_write(CSR_VSTIMECMP, ncycles); +#endif + return 0; +} + +static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles) { struct kvm_vcpu_timer *t = &vcpu->arch.timer; struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; @@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) return 0; } +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + return t->timer_next_event(vcpu, ncycles); +} + +static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h) +{ + u64 delta_ns; + struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); + struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + + if (kvm_riscv_current_cycles(gt) < t->next_cycles) { + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); + hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); + return HRTIMER_RESTART; + } + + t->next_set = false; + kvm_vcpu_kick(vcpu); + + return HRTIMER_NORESTART; +} + +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + + if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) || + kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER)) + return true; + else + return false; +} + +static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + u64 delta_ns; + + if (!t->init_done) + return; + + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); + if (delta_ns) { + hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); + t->next_set = true; + } +} + +static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) +{ + kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); +} + int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) return -EINVAL; hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; t->init_done = true; t->next_set = false; + /* Enable sstc for every vcpu if available in hardware */ + if (riscv_isa_extension_available(NULL, SSTC)) { + t->sstc_enabled = true; + t->hrt.function = kvm_riscv_vcpu_vstimer_expired; + t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; + } else { + t->sstc_enabled = false; + t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; + t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; + } + return 0; } @@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) { + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + t->next_cycles = -1ULL; return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); } -void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) +static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu) { struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; -#ifdef CONFIG_64BIT - csr_write(CSR_HTIMEDELTA, gt->time_delta); -#else +#if defined(CONFIG_32BIT) csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); +#else + csr_write(CSR_HTIMEDELTA, gt->time_delta); #endif } +void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr; + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + kvm_riscv_vcpu_update_timedelta(vcpu); + + if (!t->sstc_enabled) + return; + + csr = &vcpu->arch.guest_csr; +#if defined(CONFIG_32BIT) + csr_write(CSR_VSTIMECMP, (u32)t->next_cycles); + csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); +#else + csr_write(CSR_VSTIMECMP, t->next_cycles); +#endif + + /* timer should be enabled for the remaining operations */ + if (unlikely(!t->init_done)) + return; + + kvm_riscv_vcpu_timer_unblocking(vcpu); +} + +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr; + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + if (!t->sstc_enabled) + return; + + csr = &vcpu->arch.guest_csr; + t = &vcpu->arch.timer; +#if defined(CONFIG_32BIT) + t->next_cycles = csr_read(CSR_VSTIMECMP); + t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; +#else + t->next_cycles = csr_read(CSR_VSTIMECMP); +#endif + /* timer should be enabled for the remaining operations */ + if (unlikely(!t->init_done)) + return; + + if (kvm_vcpu_is_blocking(vcpu)) + kvm_riscv_vcpu_timer_blocking(vcpu); +} + void kvm_riscv_guest_timer_init(struct kvm *kvm) { struct kvm_guest_timer *gt = &kvm->arch.timer; |