@@ -7589,6 +7589,11 @@ static __init int hardware_setup(void)
75897589 else
75907590 kvm_disable_tdp ();
75917591
7592+ if (!nested ) {
7593+ kvm_x86_ops -> get_nested_state = NULL ;
7594+ kvm_x86_ops -> set_nested_state = NULL ;
7595+ }
7596+
75927597 /*
75937598 * Only enable PML when hardware supports PML feature, and both EPT
75947599 * and EPT A/D bit features are enabled -- PML depends on them to work.
@@ -11775,8 +11780,8 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1177511780}
1177611781
1177711782/*
11778- * If exit_qual is NULL, this is being called from RSM.
11779- * Otherwise it's called from vmlaunch/vmresume.
11783+ * If exit_qual is NULL, this is being called from state restore (either RSM
11784+ * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
1178011785 */
1178111786static int enter_vmx_non_root_mode (struct kvm_vcpu * vcpu , u32 * exit_qual )
1178211787{
@@ -13016,6 +13021,170 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
1301613021 return 0 ;
1301713022}
1301813023
13024+ static int vmx_get_nested_state (struct kvm_vcpu * vcpu ,
13025+ struct kvm_nested_state __user * user_kvm_nested_state ,
13026+ u32 user_data_size )
13027+ {
13028+ struct vcpu_vmx * vmx ;
13029+ struct vmcs12 * vmcs12 ;
13030+ struct kvm_nested_state kvm_state = {
13031+ .flags = 0 ,
13032+ .format = 0 ,
13033+ .size = sizeof (kvm_state ),
13034+ .vmx .vmxon_pa = -1ull ,
13035+ .vmx .vmcs_pa = -1ull ,
13036+ };
13037+
13038+ if (!vcpu )
13039+ return kvm_state .size + 2 * VMCS12_SIZE ;
13040+
13041+ vmx = to_vmx (vcpu );
13042+ vmcs12 = get_vmcs12 (vcpu );
13043+ if (nested_vmx_allowed (vcpu ) &&
13044+ (vmx -> nested .vmxon || vmx -> nested .smm .vmxon )) {
13045+ kvm_state .vmx .vmxon_pa = vmx -> nested .vmxon_ptr ;
13046+ kvm_state .vmx .vmcs_pa = vmx -> nested .current_vmptr ;
13047+
13048+ if (vmx -> nested .current_vmptr != -1ull )
13049+ kvm_state .size += VMCS12_SIZE ;
13050+
13051+ if (vmx -> nested .smm .vmxon )
13052+ kvm_state .vmx .smm .flags |= KVM_STATE_NESTED_SMM_VMXON ;
13053+
13054+ if (vmx -> nested .smm .guest_mode )
13055+ kvm_state .vmx .smm .flags |= KVM_STATE_NESTED_SMM_GUEST_MODE ;
13056+
13057+ if (is_guest_mode (vcpu )) {
13058+ kvm_state .flags |= KVM_STATE_NESTED_GUEST_MODE ;
13059+
13060+ if (vmx -> nested .nested_run_pending )
13061+ kvm_state .flags |= KVM_STATE_NESTED_RUN_PENDING ;
13062+ }
13063+ }
13064+
13065+ if (user_data_size < kvm_state .size )
13066+ goto out ;
13067+
13068+ if (copy_to_user (user_kvm_nested_state , & kvm_state , sizeof (kvm_state )))
13069+ return - EFAULT ;
13070+
13071+ if (vmx -> nested .current_vmptr == -1ull )
13072+ goto out ;
13073+
13074+ /*
13075+ * When running L2, the authoritative vmcs12 state is in the
13076+ * vmcs02. When running L1, the authoritative vmcs12 state is
13077+ * in the shadow vmcs linked to vmcs01, unless
13078+ * sync_shadow_vmcs is set, in which case, the authoritative
13079+ * vmcs12 state is in the vmcs12 already.
13080+ */
13081+ if (is_guest_mode (vcpu ))
13082+ sync_vmcs12 (vcpu , vmcs12 );
13083+ else if (enable_shadow_vmcs && !vmx -> nested .sync_shadow_vmcs )
13084+ copy_shadow_to_vmcs12 (vmx );
13085+
13086+ if (copy_to_user (user_kvm_nested_state -> data , vmcs12 , sizeof (* vmcs12 )))
13087+ return - EFAULT ;
13088+
13089+ out :
13090+ return kvm_state .size ;
13091+ }
13092+
13093+ static int vmx_set_nested_state (struct kvm_vcpu * vcpu ,
13094+ struct kvm_nested_state __user * user_kvm_nested_state ,
13095+ struct kvm_nested_state * kvm_state )
13096+ {
13097+ struct vcpu_vmx * vmx = to_vmx (vcpu );
13098+ struct vmcs12 * vmcs12 ;
13099+ u32 exit_qual ;
13100+ int ret ;
13101+
13102+ if (kvm_state -> format != 0 )
13103+ return - EINVAL ;
13104+
13105+ if (!nested_vmx_allowed (vcpu ))
13106+ return kvm_state -> vmx .vmxon_pa == -1ull ? 0 : - EINVAL ;
13107+
13108+ if (kvm_state -> vmx .vmxon_pa == -1ull ) {
13109+ if (kvm_state -> vmx .smm .flags )
13110+ return - EINVAL ;
13111+
13112+ if (kvm_state -> vmx .vmcs_pa != -1ull )
13113+ return - EINVAL ;
13114+
13115+ vmx_leave_nested (vcpu );
13116+ return 0 ;
13117+ }
13118+
13119+ if (!page_address_valid (vcpu , kvm_state -> vmx .vmxon_pa ))
13120+ return - EINVAL ;
13121+
13122+ if (kvm_state -> size < sizeof (kvm_state ) + sizeof (* vmcs12 ))
13123+ return - EINVAL ;
13124+
13125+ if (kvm_state -> vmx .vmcs_pa == kvm_state -> vmx .vmxon_pa ||
13126+ !page_address_valid (vcpu , kvm_state -> vmx .vmcs_pa ))
13127+ return - EINVAL ;
13128+
13129+ if ((kvm_state -> vmx .smm .flags & KVM_STATE_NESTED_SMM_GUEST_MODE ) &&
13130+ (kvm_state -> flags & KVM_STATE_NESTED_GUEST_MODE ))
13131+ return - EINVAL ;
13132+
13133+ if (kvm_state -> vmx .smm .flags &
13134+ ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON ))
13135+ return - EINVAL ;
13136+
13137+ if ((kvm_state -> vmx .smm .flags & KVM_STATE_NESTED_SMM_GUEST_MODE ) &&
13138+ !(kvm_state -> vmx .smm .flags & KVM_STATE_NESTED_SMM_VMXON ))
13139+ return - EINVAL ;
13140+
13141+ vmx_leave_nested (vcpu );
13142+ if (kvm_state -> vmx .vmxon_pa == -1ull )
13143+ return 0 ;
13144+
13145+ vmx -> nested .vmxon_ptr = kvm_state -> vmx .vmxon_pa ;
13146+ ret = enter_vmx_operation (vcpu );
13147+ if (ret )
13148+ return ret ;
13149+
13150+ set_current_vmptr (vmx , kvm_state -> vmx .vmcs_pa );
13151+
13152+ if (kvm_state -> vmx .smm .flags & KVM_STATE_NESTED_SMM_VMXON ) {
13153+ vmx -> nested .smm .vmxon = true;
13154+ vmx -> nested .vmxon = false;
13155+
13156+ if (kvm_state -> vmx .smm .flags & KVM_STATE_NESTED_SMM_GUEST_MODE )
13157+ vmx -> nested .smm .guest_mode = true;
13158+ }
13159+
13160+ vmcs12 = get_vmcs12 (vcpu );
13161+ if (copy_from_user (vmcs12 , user_kvm_nested_state -> data , sizeof (* vmcs12 )))
13162+ return - EFAULT ;
13163+
13164+ if (vmcs12 -> revision_id != VMCS12_REVISION )
13165+ return - EINVAL ;
13166+
13167+ if (!(kvm_state -> flags & KVM_STATE_NESTED_GUEST_MODE ))
13168+ return 0 ;
13169+
13170+ vmx -> nested .nested_run_pending =
13171+ !!(kvm_state -> flags & KVM_STATE_NESTED_RUN_PENDING );
13172+
13173+ if (check_vmentry_prereqs (vcpu , vmcs12 ) ||
13174+ check_vmentry_postreqs (vcpu , vmcs12 , & exit_qual ))
13175+ return - EINVAL ;
13176+
13177+ if (kvm_state -> flags & KVM_STATE_NESTED_RUN_PENDING )
13178+ vmx -> nested .nested_run_pending = 1 ;
13179+
13180+ vmx -> nested .dirty_vmcs12 = true;
13181+ ret = enter_vmx_non_root_mode (vcpu , NULL );
13182+ if (ret )
13183+ return - EINVAL ;
13184+
13185+ return 0 ;
13186+ }
13187+
1301913188static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
1302013189 .cpu_has_kvm_support = cpu_has_kvm_support ,
1302113190 .disabled_by_bios = vmx_disabled_by_bios ,
@@ -13150,6 +13319,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
1315013319
1315113320 .setup_mce = vmx_setup_mce ,
1315213321
13322+ .get_nested_state = vmx_get_nested_state ,
13323+ .set_nested_state = vmx_set_nested_state ,
1315313324 .get_vmcs12_pages = nested_get_vmcs12_pages ,
1315413325
1315513326 .smi_allowed = vmx_smi_allowed ,
0 commit comments