dect
/
linux-2.6
Archived
13
0
Fork 0

KVM: x86: Add x86 callback for intercept check

This patch adds a callback into kvm_x86_ops so that svm and
vmx code can do intercept checks on emulated instructions.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Joerg Roedel 2011-04-04 12:39:27 +02:00 committed by Avi Kivity
parent 8ea7d6aef8
commit 8a76d7f25f
6 changed files with 74 additions and 11 deletions

View File

@ -25,6 +25,24 @@ struct x86_exception {
u64 address; /* cr2 or nested page fault gpa */
};
/*
* This struct is used to carry enough information from the instruction
* decoder to main KVM so that a decision can be made whether the
* instruction needs to be intercepted or not.
*/
struct x86_instruction_info {
u8 intercept; /* which intercept */
u8 rep_prefix; /* rep prefix? */
u8 modrm_mod; /* mod part of modrm */
u8 modrm_reg; /* index of register used */
u8 modrm_rm; /* rm part of modrm */
u64 src_val; /* value of source operand */
u8 src_bytes; /* size of source operand */
u8 dst_bytes; /* size of destination operand */
u8 ad_bytes; /* size of src/dst address */
u64 next_rip; /* rip following the instruction */
};
/*
* x86_emulate_ops:
*
@ -163,8 +181,8 @@ struct x86_emulate_ops {
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
int (*intercept)(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
int (*intercept)(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage);
};

View File

@ -505,6 +505,8 @@ struct kvm_vcpu_stat {
u32 nmi_injections;
};
struct x86_instruction_info;
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
@ -592,6 +594,11 @@ struct kvm_x86_ops {
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
int (*check_intercept)(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage);
const struct trace_print_flags *exit_reasons_str;
};

View File

@ -408,6 +408,26 @@ struct gprefix {
(_eip) += (_size); \
})
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
.rep_prefix = ctxt->decode.rep_prefix,
.modrm_mod = ctxt->decode.modrm_mod,
.modrm_reg = ctxt->decode.modrm_reg,
.modrm_rm = ctxt->decode.modrm_rm,
.src_val = ctxt->decode.src.val64,
.src_bytes = ctxt->decode.src.bytes,
.dst_bytes = ctxt->decode.dst.bytes,
.ad_bytes = ctxt->decode.ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt->vcpu, &info, stage);
}
static inline unsigned long ad_mask(struct decode_cache *c)
{
return (1UL << (c->ad_bytes << 3)) - 1;
@ -3132,8 +3152,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
}
if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept,
X86_ICPT_PRE_EXCEPT);
rc = emulator_check_intercept(ctxt, c->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
@ -3158,8 +3178,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
}
if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept,
X86_ICPT_POST_EXCEPT);
rc = emulator_check_intercept(ctxt, c->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
@ -3203,8 +3223,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn:
if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept,
X86_ICPT_POST_MEMACCESS);
rc = emulator_check_intercept(ctxt, c->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}

View File

@ -3868,6 +3868,13 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
update_cr0_intercept(svm);
}
static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
}
static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@ -3953,6 +3960,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.adjust_tsc_offset = svm_adjust_tsc_offset,
.set_tdp_cr3 = set_tdp_cr3,
.check_intercept = svm_check_intercept,
};
static int __init svm_init(void)

View File

@ -4409,6 +4409,13 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
}
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@ -4494,6 +4501,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.adjust_tsc_offset = vmx_adjust_tsc_offset,
.set_tdp_cr3 = vmx_set_cr3,
.check_intercept = vmx_check_intercept,
};
static int __init vmx_init(void)

View File

@ -4297,11 +4297,11 @@ static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
preempt_enable();
}
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
static int emulator_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
return kvm_x86_ops->check_intercept(vcpu, info, stage);
}
static struct x86_emulate_ops emulate_ops = {