diff options
Diffstat (limited to 'arch/x86/kvm')
| -rw-r--r-- | arch/x86/kvm/lapic.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 13 | 
4 files changed, 9 insertions, 10 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4a..f77df1c5de6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)  	if (!pv_eoi_enabled(vcpu))  		return 0;  	return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, -					 addr); +					 addr, sizeof(u8));  }  void kvm_lapic_init(void) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e1b1ce21bc0..7d39d70647e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -555,7 +555,7 @@ static void svm_init_erratum_383(void)  	int err;  	u64 val; -	if (!cpu_has_amd_erratum(amd_erratum_383)) +	if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))  		return;  	/* Use _safe variants to not break nested virtualization */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6667042714c..867b81037f9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2459,7 +2459,7 @@ static int hardware_enable(void *garbage)  		ept_sync_global();  	} -	store_gdt(&__get_cpu_var(host_gdt)); +	native_store_gdt(&__get_cpu_var(host_gdt));  	return 0;  } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f19ac0aca60..e1721324c27 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)  		return 0;  	} -	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) +	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, +					sizeof(u32)))  		return 1;  	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)  		gpa_offset = data & ~(PAGE_MASK | 1); -		/* Check that the address is 32-byte aligned. */ -		if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) -			break; -  		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, -		     &vcpu->arch.pv_time, data & ~1ULL)) +		     &vcpu->arch.pv_time, data & ~1ULL, +		     sizeof(struct pvclock_vcpu_time_info)))  			vcpu->arch.pv_time_enabled = false;  		else  			vcpu->arch.pv_time_enabled = true; @@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)  			return 1;  		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, -							data & KVM_STEAL_VALID_BITS)) +						data & KVM_STEAL_VALID_BITS, +						sizeof(struct kvm_steal_time)))  			return 1;  		vcpu->arch.st.msr_val = data;  |