Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1016,17 +1016,6 @@ struct kvm_vcpu_arch {
#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp;
#endif

/*
* Zhaoxin/Centaur extended software managed vcpu states.
* - pauseopt_interrupted: set when pauseopt optimized state interrupted
* by some vmexit.
* - pauseopt_rip: stores the guest RIP at the time of vmexit if the vmexit
* occurred during pauseopt optimized state.
* We will move these definitions to zhaoxin specific arch in the future.
*/
bool pauseopt_interrupted;
unsigned long pauseopt_rip;
};

struct kvm_lpage_info {
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/msr-index.h
Original file line number Diff line number Diff line change
Expand Up @@ -813,7 +813,7 @@
* bit 0: exec-cntl3 VMCS field.
*/
#define MSR_ZX_EXT_VMCS_CAPS 0x1675
#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0)
#define MSR_ZX_VMCS_EXEC_CTL3_EN BIT(0)

/* Transmeta defined MSRs */
#define MSR_TMTA_LONGRUN_CTRL 0x80868010
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/feat_ctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static void init_zhaoxin_ext_capabilities(struct cpuinfo_x86 *c)

err = rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &ext_vmcs_cap, &ign);

if (!(ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3))
if (!(ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3_EN))
return;

err = rdmsr_safe(MSR_ZX_VMX_PROCBASED_CTLS3, &ign, &msr_high);
Expand Down
45 changes: 14 additions & 31 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -2664,7 +2664,7 @@ static int setup_zhaoxin_vmcs_controls(struct vmcs_config *vmcs_conf)
* control, rather than a bit in the 2nd CPU-based control.
*/
rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &zx_ext_vmcs_cap, &ign);
if (!(zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3))
if (!(zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3_EN))
return 0;

ret = rdmsr_safe(MSR_ZX_VMX_PROCBASED_CTLS3, &ign, &msr_high);
Expand Down Expand Up @@ -5023,6 +5023,9 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)

vmx->rmode.vm86_active = 0;
vmx->spec_ctrl = 0;
vmx->msr_pauseopt_control = 0;
vmx->pauseopt_in_progress = false;
vmx->pauseopt_rip = 0;
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

vmx_vcpu_reset() now resets the software PAUSEOPT tracking fields, but it never clears the VMCS PAUSEOPT_TARGET_TSC field. Because __vmx_vcpu_reset()/init_vmcs() is skipped when init_event==true, a stale non-zero PAUSEOPT_TARGET_TSC value can persist across INIT/reset and be misinterpreted as “PAUSEOPT in progress” on subsequent VM-exits. Consider explicitly vmcs_write64(PAUSEOPT_TARGET_TSC, 0) here (guarded by cpu_has_vmx_pauseopt()/is_zhaoxin_cpu()) to guarantee a clean state after reset paths.

Suggested change
vmx->pauseopt_rip = 0;
vmx->pauseopt_rip = 0;
if (cpu_has_vmx_pauseopt() || is_zhaoxin_cpu())
vmcs_write64(PAUSEOPT_TARGET_TSC, 0);

Copilot uses AI. Check for mistakes.

vmx->msr_ia32_umwait_control = 0;

Expand Down Expand Up @@ -7508,54 +7511,34 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_state_exit_irqoff();
}

static bool is_vmexit_during_pauseopt(struct kvm_vcpu *vcpu)
{
uint8_t opcode[4];
gpa_t gpa;
unsigned long rip;
const u32 pauseopt_opcode = 0xD0A60FF2;
u32 code;

rip = kvm_rip_read(vcpu);
gpa = kvm_mmu_gva_to_gpa_read(vcpu, (gva_t)rip, NULL);
if (gpa == INVALID_GPA)
return false;

if (kvm_vcpu_read_guest(vcpu, gpa, opcode, 4) != 0)
return false;

code = le32_to_cpu(*(u32 *)opcode);
if (code == pauseopt_opcode)
return true;

return false;
}

static void zx_vmx_vcpu_run_pre(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long new_rip;

if (vcpu->arch.pauseopt_interrupted) {
if (vmx->pauseopt_in_progress) {
new_rip = kvm_rip_read(vcpu);
if (new_rip != vcpu->arch.pauseopt_rip) {
if (new_rip != vmx->pauseopt_rip) {
/*
* When the execution of PAUSEOPT in the guest is interrupted by
* other events, causing a vmexit, the pauseopt target tsc should be
* cleared to zero before the next vmentry if guest rip changed,
* avoiding re-enter pauseopt optimized state after enter guest.
*/
vmcs_write64(PAUSEOPT_TARGET_TSC, 0);
vcpu->arch.pauseopt_interrupted = false;
vcpu->arch.pauseopt_rip = 0;
vmx->pauseopt_in_progress = false;
vmx->pauseopt_rip = 0;
}
}
}

static void zx_vmx_vcpu_run_post(struct kvm_vcpu *vcpu)
{
if (cpu_has_vmx_pauseopt() && is_vmexit_during_pauseopt(vcpu)) {
vcpu->arch.pauseopt_interrupted = true;
vcpu->arch.pauseopt_rip = kvm_rip_read(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);

if (cpu_has_vmx_pauseopt() && vmcs_read64(PAUSEOPT_TARGET_TSC)) {
vmx->pauseopt_in_progress = true;
vmx->pauseopt_rip = kvm_rip_read(vcpu);
}
}

Expand Down
4 changes: 3 additions & 1 deletion arch/x86/kvm/vmx/vmx.h
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,9 @@ struct vcpu_vmx {

u64 spec_ctrl;
u32 msr_ia32_umwait_control;
u32 msr_pauseopt_control;
u32 msr_pauseopt_control;
bool pauseopt_in_progress;
unsigned long pauseopt_rip;
Comment on lines +284 to +286
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In struct vcpu_vmx, the newly added fields are not aligned/indented consistently with the surrounding members (most fields in this block use tab-aligned spacing). Please adjust formatting to match the existing style in this struct to keep diffs/readability consistent.

Suggested change
u32 msr_pauseopt_control;
bool pauseopt_in_progress;
unsigned long pauseopt_rip;
u32 msr_pauseopt_control;
bool pauseopt_in_progress;
unsigned long pauseopt_rip;

Copilot uses AI. Check for mistakes.

/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
Expand Down
Loading