diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-12-18 09:08:05 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-12-18 09:08:05 +0100 |
commit | d9e3d899bc7a852d44b3305ed49799fbf090e756 (patch) | |
tree | f3a75cb5f60be908a4384bd9e7add08486d7d338 /virt | |
parent | cc2e60dfa6ba4ff2c054bca932b9afc8702a2f9a (diff) | |
parent | 1291a0d5049dbc06baaaf66a9ff3f53db493b19b (diff) |
Merge 4.15-rc4 into usb-next
We want the USB fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 11 | ||||
-rw-r--r-- | virt/kvm/arm/arm.c | 15 | ||||
-rw-r--r-- | virt/kvm/arm/hyp/timer-sr.c | 48 | ||||
-rw-r--r-- | virt/kvm/arm/hyp/vgic-v2-sr.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-irqfd.c | 3 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 2 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v4.c | 6 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 8 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 31 |
10 files changed, 77 insertions, 55 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 4151250ce8da..f9555b1e7f15 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -479,9 +479,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) vtimer_restore_state(vcpu); - if (has_vhe()) - disable_el1_phys_timer_access(); - /* Set the background timer for the physical timer emulation. */ phys_timer_emulate(vcpu); } @@ -510,9 +507,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) if (unlikely(!timer->enabled)) return; - if (has_vhe()) - enable_el1_phys_timer_access(); - vtimer_save_state(vcpu); /* @@ -841,7 +835,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) no_vgic: preempt_disable(); timer->enabled = 1; - kvm_timer_vcpu_load_vgic(vcpu); + if (!irqchip_in_kernel(vcpu->kvm)) + kvm_timer_vcpu_load_user(vcpu); + else + kvm_timer_vcpu_load_vgic(vcpu); preempt_enable(); return 0; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index a6524ff27de4..6b60c98a6e22 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm->vcpus[i] = NULL; } } + atomic_set(&kvm->online_vcpus, 0); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) @@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); - kvm_vgic_vcpu_destroy(vcpu); kvm_pmu_vcpu_destroy(vcpu); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); @@ -615,7 +615,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; - sigset_t sigsaved; if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; @@ -628,13 +627,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = kvm_handle_mmio_return(vcpu, vcpu->run); if (ret) return ret; + if (kvm_arm_handle_step_debug(vcpu, vcpu->run)) + return 0; + } if (run->immediate_exit) return -EINTR; - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); ret = 1; run->exit_reason = KVM_EXIT_UNKNOWN; @@ -769,8 +770,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_pmu_update_run(vcpu); } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); + return ret; } @@ -1504,7 +1505,7 @@ int kvm_arch_init(void *opaque) bool in_hyp_mode; if (!is_hyp_mode_available()) { - kvm_err("HYP mode not available\n"); + kvm_info("HYP mode not available\n"); return -ENODEV; } diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c index f39861639f08..f24404b3c8df 100644 --- a/virt/kvm/arm/hyp/timer-sr.c +++ b/virt/kvm/arm/hyp/timer-sr.c @@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high) write_sysreg(cntvoff, cntvoff_el2); } -void __hyp_text enable_el1_phys_timer_access(void) -{ - u64 val; - - /* Allow physical timer/counter access for the host */ - val = read_sysreg(cnthctl_el2); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, cnthctl_el2); -} - -void __hyp_text disable_el1_phys_timer_access(void) -{ - u64 val; - - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(cnthctl_el2); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, cnthctl_el2); -} - void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) { /* * We don't need to do this for VHE since the host kernel runs in EL2 * with HCR_EL2.TGE ==1, which makes those bits have no impact. */ - if (!has_vhe()) - enable_el1_phys_timer_access(); + if (!has_vhe()) { + u64 val; + + /* Allow physical timer/counter access for the host */ + val = read_sysreg(cnthctl_el2); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, cnthctl_el2); + } } void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) { - if (!has_vhe()) - disable_el1_phys_timer_access(); + if (!has_vhe()) { + u64 val; + + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(cnthctl_el2); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, cnthctl_el2); + } } diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index a3f18d362366..d7fd46fe9efb 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) else elrsr1 = 0; -#ifdef CONFIG_CPU_BIG_ENDIAN - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; -#else cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; -#endif } static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c index b7baf581611a..99e026d2dade 100644 --- a/virt/kvm/arm/vgic/vgic-irqfd.c +++ b/virt/kvm/arm/vgic/vgic-irqfd.c @@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm) u32 nr = dist->nr_spis; int i, ret; - entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), - GFP_KERNEL); + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 1f761a9991e7..8e633bd9cc1e 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) u32 *intids; int nr_irqs, i; unsigned long flags; + u8 pendmask; nr_irqs = vgic_copy_lpi_list(vcpu, &intids); if (nr_irqs < 0) @@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) for (i = 0; i < nr_irqs; i++) { int byte_offset, bit_nr; - u8 pendmask; byte_offset = intids[i] / BITS_PER_BYTE; bit_nr = intids[i] % BITS_PER_BYTE; @@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its, return E_ITS_MAPC_COLLECTION_OOR; collection = kzalloc(sizeof(*collection), GFP_KERNEL); + if (!collection) + return -ENOMEM; collection->collection_id = coll_id; collection->target_addr = COLLECTION_NOT_MAPPED; diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 2f05f732d3fd..f47e8481fa45 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) int last_byte_offset = -1; struct vgic_irq *irq; int ret; + u8 val; list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { int byte_offset, bit_nr; struct kvm_vcpu *vcpu; gpa_t pendbase, ptr; bool stored; - u8 val; vcpu = irq->target_vcpu; if (!vcpu) diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c index 53c324aa44ef..4a37292855bc 100644 --- a/virt/kvm/arm/vgic/vgic-v4.c +++ b/virt/kvm/arm/vgic/vgic-v4.c @@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, goto out; WARN_ON(!(irq->hw && irq->host_irq == virq)); - irq->hw = false; - ret = its_unmap_vlpi(virq); + if (irq->hw) { + irq->hw = false; + ret = its_unmap_vlpi(virq); + } out: mutex_unlock(&its->its_lock); diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index b168a328a9e0..ecb8e25f5fe5 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) { struct vgic_irq *irq; + unsigned long flags; int ret = 0; if (!vgic_initialized(vcpu->kvm)) @@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) return -EINVAL; irq = vgic_get_irq(vcpu->kvm, vcpu, intid); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); if (irq->owner && irq->owner != owner) ret = -EEXIST; else irq->owner = owner; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); return ret; } @@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm) bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) { - struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); + struct vgic_irq *irq; bool map_is_active; unsigned long flags; if (!vgic_initialized(vcpu->kvm)) return false; + irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); spin_lock_irqsave(&irq->irq_lock, flags); map_is_active = irq->hw && irq->active; spin_unlock_irqrestore(&irq->irq_lock, flags); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f169ecc4f2e8..210bf820385a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) +{ +} + bool kvm_is_reserved_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) @@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); + srcu_read_unlock(&kvm->srcu, idx); } @@ -2065,6 +2073,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); +void kvm_sigset_activate(struct kvm_vcpu *vcpu) +{ + if (!vcpu->sigset_active) + return; + + /* + * This does a lockless modification of ->real_blocked, which is fine + * because, only current can change ->real_blocked and all readers of + * ->real_blocked don't care as long ->real_blocked is always a subset + * of ->blocked. + */ + sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); +} + +void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) +{ + if (!vcpu->sigset_active) + return; + + sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); + sigemptyset(¤t->real_blocked); +} + static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) { unsigned int old, val, grow; |