*vcpu) -{ - struct vcpu_vt *vt = to_vt(vcpu); - - if (vt->guest_state_loaded) - return; - - if (likely(is_64bit_mm(current->mm))) - vt->msr_host_kernel_gs_base = current->thread.gsbase; - else - vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); - - vt->guest_state_loaded = true; -} - struct tdx_uret_msr { u32 msr; unsigned int slot; @@ -795,19 +776,38 @@ static struct tdx_uret_msr tdx_uret_msrs {.msr = MSR_TSC_AUX,}, }; -static void tdx_user_return_msr_update_cache(void) +void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) { + struct vcpu_vt *vt = to_vt(vcpu); int i; + if (vt->guest_state_loaded) + return; + + if (likely(is_64bit_mm(current->mm))) + vt->msr_host_kernel_gs_base = current->thread.gsbase; + else + vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); + + vt->guest_state_loaded = true; + + /* + * Explicitly set user-return MSRs that are clobbered by the TDX-Module + * if VP.ENTER succeeds, i.e. on TD-Exit, with the values that would be + * written by the TDX-Module. Don't rely on the TDX-Module to actually + * clobber the MSRs, as the contract is poorly defined and not upheld. + * E.g. the TDX-Module will synthesize an EPT Violation without doing + * VM-Enter if it suspects a zero-step attack, and never "restore" VMM + * state. + */ for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++) - kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot, - tdx_uret_msrs[i].defval); + kvm_set_user_return_msr(tdx_uret_msrs[i].slot, + tdx_uret_msrs[i].defval, -1ull); } static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu) { struct vcpu_vt *vt = to_vt(vcpu); - struct vcpu_tdx *tdx = to_tdx(vcpu); if (!vt->guest_state_loaded) return; @@ -815,11 +815,6 @@ static void tdx_prepare_switch_to_host(s ++vcpu->stat.host_state_reload; wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base); - if (tdx->guest_entered) { - tdx_user_return_msr_update_cache(); - tdx->guest_entered = false; - } - vt->guest_state_loaded = false; } @@ -1059,7 +1054,6 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu update_debugctlmsr(vcpu->arch.host_debugctl); tdx_load_host_xsave_state(vcpu); - tdx->guest_entered = true; vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET; @@ -3447,10 +3441,6 @@ static int __init __tdx_bringup(void) /* * Check if MSRs (tdx_uret_msrs) can be saved/restored * before returning to user space. - * - * this_cpu_ptr(user_return_msrs)->registered isn't checked - * because the registration is done at vcpu runtime by - * tdx_user_return_msr_update_cache(). */ tdx_uret_msrs[i].slot = kvm_find_user_return_msr(tdx_uret_msrs[i].msr); if (tdx_uret_msrs[i].slot == -1) { --- a/arch/x86/kvm/vmx/tdx.h +++ b/arch/x86/kvm/vmx/tdx.h @@ -67,7 +67,6 @@ struct vcpu_tdx { u64 vp_enter_ret; enum vcpu_tdx_state state; - bool guest_entered; u64 map_gpa_next; u64 map_gpa_end; --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -681,15 +681,6 @@ int kvm_set_user_return_msr(unsigned slo } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr); -void kvm_user_return_msr_update_cache(unsigned int slot, u64 value) -{ - struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs); - - msrs->values[slot].curr = value; - kvm_user_return_register_notifier(msrs); -} -EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_user_return_msr_update_cache); - u64 kvm_get_user_return_msr(unsigned int slot) { return this_cpu_ptr(user_return_msrs)->values[slot].curr;[PATCH 6.18 348/430] KVM: TDX: Explicitly set user-return MSRs that *may* be clobbered by the TDX-ModuleGreg Kroah-Hartman undefinedstable@vger.kernel.org undefined undefined undefined undefined undefined undefined‹F„Ži