Skip to content

Commit

Permalink
KVM: X86: Run higher VMPL if events are pending
Browse files Browse the repository at this point in the history
Each vCPU VMPL maintains its own set of pending events. When a vCPU is
kicked or exits back to the host, check using an X86 op to see if events
are pending for a higher priority VMPL and allow switching to that VMPL.

This has been implemented for SEV-SNP where VMPL0 is checked when
running a lower VMPL and the current_vmpl is switched to VMPL0 if there
are pending events.

Signed-off-by: Roy Hopkins <[email protected]>
  • Loading branch information
roy-hopkins committed Oct 8, 2024
1 parent d39aa91 commit 5c78db7
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 2 deletions.
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm-x86-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ KVM_X86_OP_OPTIONAL(alloc_apic_backing_page)
KVM_X86_OP_OPTIONAL_RET0(gmem_prepare)
KVM_X86_OP_OPTIONAL_RET0(private_max_mapping_level)
KVM_X86_OP_OPTIONAL(gmem_invalidate)
KVM_X86_OP_OPTIONAL_RET0(pending_event_higher_vmpl)

#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1839,6 +1839,7 @@ struct kvm_x86_ops {
int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
int (*private_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn);
int (*pending_event_higher_vmpl)(struct kvm_vcpu *vcpu);
};

struct kvm_x86_nested_ops {
Expand Down
17 changes: 17 additions & 0 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -5574,3 +5574,20 @@ bool sev_snp_blocked(enum inject_type type, struct kvm_vcpu *vcpu)

return blocked;
}

int sev_pending_event_higher_vmpl(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_vmpl_state *vcpu_parent = vcpu->vcpu_parent;

/*
* Check to see if VMPL0 has any pending events to process
* if the current VMPL is lower
*/
if (vcpu->vmpl > 0) {
if (kvm_test_request(KVM_REQ_EVENT, vcpu_parent->vcpu_vmpl[0])) {
vcpu_parent->target_vmpl = 0;
return 1;
}
}
return 0;
}
1 change: 1 addition & 0 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -5172,6 +5172,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {

.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
.vm_move_enc_context_from = sev_vm_move_enc_context_from,
.pending_event_higher_vmpl = sev_pending_event_higher_vmpl,
#endif
.check_emulate_instruction = svm_check_emulate_instruction,

Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/svm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -800,6 +800,7 @@ static inline bool sev_snp_is_rinj_active(struct kvm_vcpu *vcpu)
return sev_snp_guest(vcpu->kvm) &&
(sev->vmsa_features[vcpu->vmpl] & SVM_SEV_FEAT_RESTRICTED_INJECTION);
};
int sev_pending_event_higher_vmpl(struct kvm_vcpu *vcpu);
#else
static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
{
Expand Down
18 changes: 16 additions & 2 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -11310,6 +11310,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm_vcpu_vmpl_state *vcpu_parent = vcpu->vcpu_parent;
int vmpl;

vcpu->common->run->exit_reason = KVM_EXIT_UNKNOWN;

Expand All @@ -11334,8 +11335,15 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (kvm_xen_has_pending_events(vcpu))
kvm_xen_inject_pending_events(vcpu);

if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
/*
* Check for pending timer interrupts on all vCPU priorties. We
* will switch to the higher priority VMPL if a pending interrupt
* is ready.
*/
for (vmpl = 0; vmpl <= vcpu_parent->max_vmpl; ++vmpl) {
if (kvm_cpu_has_pending_timer(vcpu_parent->vcpu_vmpl[vmpl]))
kvm_inject_pending_timer_irqs(vcpu_parent->vcpu_vmpl[vmpl]);
}

if (dm_request_for_irq_injection(vcpu) &&
kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
Expand All @@ -11356,6 +11364,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
/* If the exit code results in a VTL switch then let the caller handle it */
if (vcpu_parent->target_vmpl != vcpu_parent->current_vmpl)
break;

/* Exit on pending interrupts/events for a higher priority VMPL */
if (kvm_x86_call(pending_event_higher_vmpl)(vcpu)) {
r = 0;
break;
}
}

return r;
Expand Down

0 comments on commit 5c78db7

Please sign in to comment.