mirror of
https://github.com/torvalds/linux.git
synced 2024-11-05 19:41:54 +00:00
KVM: x86: Breakpoints do not consider CS.base
x86 debug registers hold a linear address. Therefore, breakpoints detection should consider CS.base, and check whether instruction linear address equals (CS.base + RIP). This patch introduces a function to evaluate RIP linear address and uses it for breakpoints detection. Signed-off-by: Nadav Amit <namit@cs.technion.ac.il> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
7305eb5d8c
commit
82b32774c2
@ -1067,6 +1067,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
|||||||
void kvm_define_shared_msr(unsigned index, u32 msr);
|
void kvm_define_shared_msr(unsigned index, u32 msr);
|
||||||
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
||||||
|
|
||||||
|
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
|
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
|
||||||
|
|
||||||
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
|
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
|
||||||
|
@ -5118,9 +5118,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
|
|||||||
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
|
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
|
||||||
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
|
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
|
||||||
vcpu->run->debug.arch.dr7 = dr7;
|
vcpu->run->debug.arch.dr7 = dr7;
|
||||||
vcpu->run->debug.arch.pc =
|
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
|
||||||
vmcs_readl(GUEST_CS_BASE) +
|
|
||||||
vmcs_readl(GUEST_RIP);
|
|
||||||
vcpu->run->debug.arch.exception = DB_VECTOR;
|
vcpu->run->debug.arch.exception = DB_VECTOR;
|
||||||
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
|
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -5207,21 +5207,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
|
|||||||
|
|
||||||
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
|
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
|
||||||
{
|
{
|
||||||
struct kvm_run *kvm_run = vcpu->run;
|
|
||||||
unsigned long eip = vcpu->arch.emulate_ctxt.eip;
|
|
||||||
u32 dr6 = 0;
|
|
||||||
|
|
||||||
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
|
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
|
||||||
(vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
|
(vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
|
||||||
dr6 = kvm_vcpu_check_hw_bp(eip, 0,
|
struct kvm_run *kvm_run = vcpu->run;
|
||||||
|
unsigned long eip = kvm_get_linear_rip(vcpu);
|
||||||
|
u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
|
||||||
vcpu->arch.guest_debug_dr7,
|
vcpu->arch.guest_debug_dr7,
|
||||||
vcpu->arch.eff_db);
|
vcpu->arch.eff_db);
|
||||||
|
|
||||||
if (dr6 != 0) {
|
if (dr6 != 0) {
|
||||||
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
|
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
|
||||||
kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
|
kvm_run->debug.arch.pc = eip;
|
||||||
get_segment_base(vcpu, VCPU_SREG_CS);
|
|
||||||
|
|
||||||
kvm_run->debug.arch.exception = DB_VECTOR;
|
kvm_run->debug.arch.exception = DB_VECTOR;
|
||||||
kvm_run->exit_reason = KVM_EXIT_DEBUG;
|
kvm_run->exit_reason = KVM_EXIT_DEBUG;
|
||||||
*r = EMULATE_USER_EXIT;
|
*r = EMULATE_USER_EXIT;
|
||||||
@ -5231,7 +5227,8 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
|
|||||||
|
|
||||||
if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
|
if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
|
||||||
!(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
|
!(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
|
||||||
dr6 = kvm_vcpu_check_hw_bp(eip, 0,
|
unsigned long eip = kvm_get_linear_rip(vcpu);
|
||||||
|
u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
|
||||||
vcpu->arch.dr7,
|
vcpu->arch.dr7,
|
||||||
vcpu->arch.db);
|
vcpu->arch.db);
|
||||||
|
|
||||||
@ -7538,12 +7535,18 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
|
|||||||
return kvm_x86_ops->interrupt_allowed(vcpu);
|
return kvm_x86_ops->interrupt_allowed(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (is_64_bit_mode(vcpu))
|
||||||
|
return kvm_rip_read(vcpu);
|
||||||
|
return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
|
||||||
|
kvm_rip_read(vcpu));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
|
||||||
|
|
||||||
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
|
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
|
||||||
{
|
{
|
||||||
unsigned long current_rip = kvm_rip_read(vcpu) +
|
return kvm_get_linear_rip(vcpu) == linear_rip;
|
||||||
get_segment_base(vcpu, VCPU_SREG_CS);
|
|
||||||
|
|
||||||
return current_rip == linear_rip;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
|
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user