Skip to content

Commit 76b0fef

Browse files
yosrym93gregkh
authored andcommitted
KVM: nSVM: Fix and simplify LBR virtualization handling with nested
commit 8a48214 upstream. The current scheme for handling LBRV when nested is used is very complicated, especially when L1 does not enable LBRV (i.e. does not set LBR_CTL_ENABLE_MASK). To avoid copying LBRs between VMCB01 and VMCB02 on every nested transition, the current implementation switches between using VMCB01 or VMCB02 as the source of truth for the LBRs while L2 is running. If L2 enables LBR, VMCB02 is used as the source of truth. When L2 disables LBR, the LBRs are copied to VMCB01 and VMCB01 is used as the source of truth. This introduces significant complexity, and incorrect behavior in some cases. For example, on a nested #VMEXIT, the LBRs are only copied from VMCB02 to VMCB01 if LBRV is enabled in VMCB01. This is because L2's writes to MSR_IA32_DEBUGCTLMSR to enable LBR are intercepted and propagated to VMCB01 instead of VMCB02. However, LBRV is only enabled in VMCB02 when L2 is running. This means that if L2 enables LBR and exits to L1, the LBRs will not be propagated from VMCB02 to VMCB01, because LBRV is disabled in VMCB01. There is no meaningful difference in CPUID rate in L2 when copying LBRs on every nested transition vs. the current approach, so do the simple and correct thing and always copy LBRs between VMCB01 and VMCB02 on nested transitions (when LBRV is disabled by L1). Drop the conditional LBRs copying in __svm_{enable/disable}_lbrv() as it is now unnecessary. VMCB02 becomes the only source of truth for LBRs when L2 is running, regardless of LBRV being enabled by L1, drop svm_get_lbr_vmcb() and use svm->vmcb directly in its place. Fixes: 1d5a1b5 ("KVM: x86: nSVM: correctly virtualize LBR msrs when L2 is running") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20251108004524.1600006-4-yosry.ahmed@linux.dev Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent c931ea9 commit 76b0fef

2 files changed

Lines changed: 17 additions & 50 deletions

File tree

arch/x86/kvm/svm/nested.c

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -602,11 +602,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
602602
*/
603603
svm_copy_lbrs(vmcb02, vmcb12);
604604
vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
605-
svm_update_lbrv(&svm->vcpu);
606-
607-
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
605+
} else {
608606
svm_copy_lbrs(vmcb02, vmcb01);
609607
}
608+
svm_update_lbrv(&svm->vcpu);
610609
}
611610

612611
static inline bool is_evtinj_soft(u32 evtinj)
@@ -731,11 +730,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
731730
svm->soft_int_next_rip = vmcb12_rip;
732731
}
733732

734-
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
735-
LBR_CTL_ENABLE_MASK;
736-
if (guest_can_use(vcpu, X86_FEATURE_LBRV))
737-
vmcb02->control.virt_ext |=
738-
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
733+
/* LBR_CTL_ENABLE_MASK is controlled by svm_update_lbrv() */
739734

740735
if (!nested_vmcb_needs_vls_intercept(svm))
741736
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
@@ -1066,13 +1061,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
10661061
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
10671062

10681063
if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1069-
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1064+
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)))
10701065
svm_copy_lbrs(vmcb12, vmcb02);
1071-
svm_update_lbrv(vcpu);
1072-
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1066+
else
10731067
svm_copy_lbrs(vmcb01, vmcb02);
1074-
svm_update_lbrv(vcpu);
1075-
}
1068+
1069+
svm_update_lbrv(vcpu);
10761070

10771071
if (vnmi) {
10781072
if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)

arch/x86/kvm/svm/svm.c

Lines changed: 10 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1016,13 +1016,7 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
10161016

10171017
static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
10181018
{
1019-
struct vcpu_svm *svm = to_svm(vcpu);
1020-
1021-
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1022-
1023-
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
1024-
if (is_guest_mode(vcpu))
1025-
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
1019+
to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
10261020
}
10271021

10281022
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
@@ -1033,36 +1027,15 @@ void svm_enable_lbrv(struct kvm_vcpu *vcpu)
10331027

10341028
static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
10351029
{
1036-
struct vcpu_svm *svm = to_svm(vcpu);
1037-
10381030
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
1039-
1040-
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1041-
1042-
/*
1043-
* Move the LBR msrs back to the vmcb01 to avoid copying them
1044-
* on nested guest entries.
1045-
*/
1046-
if (is_guest_mode(vcpu))
1047-
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
1048-
}
1049-
1050-
static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
1051-
{
1052-
/*
1053-
* If LBR virtualization is disabled, the LBR MSRs are always kept in
1054-
* vmcb01. If LBR virtualization is enabled and L1 is running VMs of
1055-
* its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
1056-
*/
1057-
return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
1058-
svm->vmcb01.ptr;
1031+
to_svm(vcpu)->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
10591032
}
10601033

10611034
void svm_update_lbrv(struct kvm_vcpu *vcpu)
10621035
{
10631036
struct vcpu_svm *svm = to_svm(vcpu);
10641037
bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
1065-
bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
1038+
bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) ||
10661039
(is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
10671040
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
10681041

@@ -2991,19 +2964,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
29912964
msr_info->data = svm->tsc_aux;
29922965
break;
29932966
case MSR_IA32_DEBUGCTLMSR:
2994-
msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
2967+
msr_info->data = svm->vmcb->save.dbgctl;
29952968
break;
29962969
case MSR_IA32_LASTBRANCHFROMIP:
2997-
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
2970+
msr_info->data = svm->vmcb->save.br_from;
29982971
break;
29992972
case MSR_IA32_LASTBRANCHTOIP:
3000-
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
2973+
msr_info->data = svm->vmcb->save.br_to;
30012974
break;
30022975
case MSR_IA32_LASTINTFROMIP:
3003-
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
2976+
msr_info->data = svm->vmcb->save.last_excp_from;
30042977
break;
30052978
case MSR_IA32_LASTINTTOIP:
3006-
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
2979+
msr_info->data = svm->vmcb->save.last_excp_to;
30072980
break;
30082981
case MSR_VM_HSAVE_PA:
30092982
msr_info->data = svm->nested.hsave_msr;
@@ -3276,10 +3249,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
32763249
if (data & DEBUGCTL_RESERVED_BITS)
32773250
return 1;
32783251

3279-
if (svm_get_lbr_vmcb(svm)->save.dbgctl == data)
3252+
if (svm->vmcb->save.dbgctl == data)
32803253
break;
32813254

3282-
svm_get_lbr_vmcb(svm)->save.dbgctl = data;
3255+
svm->vmcb->save.dbgctl = data;
32833256
vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
32843257
svm_update_lbrv(vcpu);
32853258
break;

0 commit comments

Comments
 (0)