diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
| -rw-r--r-- | arch/x86/kvm/x86.c | 52 | 
1 files changed, 27 insertions, 25 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 66d66d77caee..d02937760c3b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1665,8 +1665,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);  static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)  { -	kvm_x86_ops->write_tsc_offset(vcpu, offset); -	vcpu->arch.tsc_offset = offset; +	vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);  }  static inline bool kvm_check_tsc_unstable(void) @@ -1794,7 +1793,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);  static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,  					   s64 adjustment)  { -	kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); +	u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); +	kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);  }  static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) @@ -2924,7 +2924,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,  	unsigned size;  	r = -EFAULT; -	if (copy_from_user(&msrs, user_msrs, sizeof msrs)) +	if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))  		goto out;  	r = -E2BIG; @@ -3091,11 +3091,11 @@ long kvm_arch_dev_ioctl(struct file *filp,  		unsigned n;  		r = -EFAULT; -		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) +		if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))  			goto out;  		n = msr_list.nmsrs;  		msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; -		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) +		if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))  			goto out;  		r = -E2BIG;  		if (n < msr_list.nmsrs) @@ -3117,7 +3117,7 @@ long kvm_arch_dev_ioctl(struct file *filp,  		struct kvm_cpuid2 cpuid;  		r = -EFAULT; -		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) +		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))  			goto out;  		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, @@ -3126,7 +3126,7 @@ long kvm_arch_dev_ioctl(struct file *filp,  			goto out;  		r = -EFAULT; -		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) +		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))  			goto out;  		r = 0;  		break; @@ -3894,7 +3894,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		struct kvm_interrupt irq;  		r = -EFAULT; -		if (copy_from_user(&irq, argp, sizeof irq)) +		if (copy_from_user(&irq, argp, sizeof(irq)))  			goto out;  		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);  		break; @@ -3912,7 +3912,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		struct kvm_cpuid cpuid;  		r = -EFAULT; -		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) +		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))  			goto out;  		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);  		break; @@ -3922,7 +3922,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		struct kvm_cpuid2 cpuid;  		r = -EFAULT; -		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) +		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))  			goto out;  		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,  					      cpuid_arg->entries); @@ -3933,14 +3933,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		struct kvm_cpuid2 cpuid;  		r = -EFAULT; -		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) +		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))  			goto out;  		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,  					      cpuid_arg->entries);  		if (r)  			goto out;  		r = -EFAULT; -		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) +		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))  			goto out;  		r = 0;  		break; @@ -3961,13 +3961,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		struct kvm_tpr_access_ctl tac;  		r = -EFAULT; -		if (copy_from_user(&tac, argp, sizeof tac)) +		if (copy_from_user(&tac, argp, sizeof(tac)))  			goto out;  		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);  		if (r)  			goto out;  		r = -EFAULT; -		if (copy_to_user(argp, &tac, sizeof tac)) +		if (copy_to_user(argp, &tac, sizeof(tac)))  			goto out;  		r = 0;  		break; @@ -3980,7 +3980,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		if (!lapic_in_kernel(vcpu))  			goto out;  		r = -EFAULT; -		if (copy_from_user(&va, argp, sizeof va)) +		if (copy_from_user(&va, argp, sizeof(va)))  			goto out;  		idx = srcu_read_lock(&vcpu->kvm->srcu);  		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); @@ -3991,7 +3991,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		u64 mcg_cap;  		r = -EFAULT; -		if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) +		if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))  			goto out;  		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);  		break; @@ -4000,7 +4000,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		struct kvm_x86_mce mce;  		r = -EFAULT; -		if (copy_from_user(&mce, argp, sizeof mce)) +		if (copy_from_user(&mce, argp, sizeof(mce)))  			goto out;  		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);  		break; @@ -4536,7 +4536,7 @@ long kvm_arch_vm_ioctl(struct file *filp,  		if (kvm->created_vcpus)  			goto set_identity_unlock;  		r = -EFAULT; -		if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) +		if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))  			goto set_identity_unlock;  		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);  set_identity_unlock: @@ -4620,7 +4620,7 @@ set_identity_unlock:  		if (r)  			goto get_irqchip_out;  		r = -EFAULT; -		if (copy_to_user(argp, chip, sizeof *chip)) +		if (copy_to_user(argp, chip, sizeof(*chip)))  			goto get_irqchip_out;  		r = 0;  	get_irqchip_out: @@ -4666,7 +4666,7 @@ set_identity_unlock:  	}  	case KVM_SET_PIT: {  		r = -EFAULT; -		if (copy_from_user(&u.ps, argp, sizeof u.ps)) +		if (copy_from_user(&u.ps, argp, sizeof(u.ps)))  			goto out;  		r = -ENXIO;  		if (!kvm->arch.vpit) @@ -6918,6 +6918,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,  	clock_pairing.nsec = ts.tv_nsec;  	clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);  	clock_pairing.flags = 0; +	memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));  	ret = 0;  	if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, @@ -7455,7 +7456,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)  	else {  		if (vcpu->arch.apicv_active)  			kvm_x86_ops->sync_pir_to_irr(vcpu); -		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); +		if (ioapic_in_kernel(vcpu->kvm)) +			kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);  	}  	if (is_guest_mode(vcpu)) @@ -8205,7 +8207,7 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)  	sregs->efer = vcpu->arch.efer;  	sregs->apic_base = kvm_get_apic_base(vcpu); -	memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); +	memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));  	if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)  		set_bit(vcpu->arch.interrupt.nr, @@ -8509,7 +8511,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)  	fpu->last_opcode = fxsave->fop;  	fpu->last_ip = fxsave->rip;  	fpu->last_dp = fxsave->rdp; -	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); +	memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));  	vcpu_put(vcpu);  	return 0; @@ -8530,7 +8532,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)  	fxsave->fop = fpu->last_opcode;  	fxsave->rip = fpu->last_ip;  	fxsave->rdp = fpu->last_dp; -	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); +	memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));  	vcpu_put(vcpu);  	return 0;  | 
