firmware/br-ext-chip-allwinner/board/v83x/kernel/patches/00000-arch_x86_kvm_svm.c.patch

103 lines
2.9 KiB
Diff

diff -drupN a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
--- a/arch/x86/kvm/svm.c 2018-08-06 17:23:04.000000000 +0300
+++ b/arch/x86/kvm/svm.c 2022-06-12 05:28:14.000000000 +0300
@@ -46,7 +46,7 @@
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
#include <asm/microcode.h>
-#include <asm/spec-ctrl.h>
+#include <asm/nospec-branch.h>
#include <asm/virtext.h>
#include "trace.h"
@@ -186,12 +186,6 @@ struct vcpu_svm {
} host;
u64 spec_ctrl;
- /*
- * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
- * translated into the appropriate L2_CFG bits on the host to
- * perform speculative control.
- */
- u64 virt_spec_ctrl;
u32 *msrpm;
@@ -1568,7 +1562,6 @@ static void svm_vcpu_reset(struct kvm_vc
u32 eax = 1;
svm->spec_ctrl = 0;
- svm->virt_spec_ctrl = 0;
if (!init_event) {
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
@@ -3558,13 +3551,6 @@ static int svm_get_msr(struct kvm_vcpu *
msr_info->data = svm->spec_ctrl;
break;
- case MSR_AMD64_VIRT_SPEC_CTRL:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has_virt_ssbd(vcpu))
- return 1;
-
- msr_info->data = svm->virt_spec_ctrl;
- break;
case MSR_IA32_UCODE_REV:
msr_info->data = 0x01000065;
break;
@@ -3699,16 +3685,6 @@ static int svm_set_msr(struct kvm_vcpu *
break;
set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
break;
- case MSR_AMD64_VIRT_SPEC_CTRL:
- if (!msr->host_initiated &&
- !guest_cpuid_has_virt_ssbd(vcpu))
- return 1;
-
- if (data & ~SPEC_CTRL_SSBD)
- return 1;
-
- svm->virt_spec_ctrl = data;
- break;
case MSR_STAR:
svm->vmcb->save.star = data;
break;
@@ -4942,7 +4918,8 @@ static void svm_vcpu_run(struct kvm_vcpu
* is no need to worry about the conditional branch over the wrmsr
* being speculatively taken.
*/
- x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+ if (svm->spec_ctrl)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile (
"push %%" _ASM_BP "; \n\t"
@@ -5066,7 +5043,8 @@ static void svm_vcpu_run(struct kvm_vcpu
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+ if (svm->spec_ctrl)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
reload_tss(vcpu);
@@ -5169,7 +5147,7 @@ static bool svm_cpu_has_accelerated_tpr(
return false;
}
-static bool svm_has_emulated_msr(int index)
+static bool svm_has_high_real_mode_segbase(void)
{
return true;
}
@@ -5486,7 +5464,7 @@ static struct kvm_x86_ops svm_x86_ops __
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
- .has_emulated_msr = svm_has_emulated_msr,
+ .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,