mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
KVM: x86: fix vcpu->mmio_fragments overflow
After commit b3356bf0db
(KVM: emulator: optimize "rep ins" handling),
the pieces of io data can be collected and write them to the guest memory
or MMIO together
Unfortunately, kvm splits the mmio access into 8 bytes and store them to
vcpu->mmio_fragments. If the guest uses "rep ins" to move large data, it
will cause vcpu->mmio_fragments overflow
The bug can be exposed by isapc (-M isapc):
[23154.818733] general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC
[ ......]
[23154.858083] Call Trace:
[23154.859874] [<ffffffffa04f0e17>] kvm_get_cr8+0x1d/0x28 [kvm]
[23154.861677] [<ffffffffa04fa6d4>] kvm_arch_vcpu_ioctl_run+0xcda/0xe45 [kvm]
[23154.863604] [<ffffffffa04f5a1a>] ? kvm_arch_vcpu_load+0x17b/0x180 [kvm]
Actually, we can use one mmio_fragment to store a large mmio access then
split it when we pass the mmio-exit-info to userspace. After that, we only
need two entries to store mmio info for the cross-mmio pages access
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
35fd3dc58d
commit
87da7e66a4
@ -3779,7 +3779,7 @@ static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||||||
{
|
{
|
||||||
struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
|
struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
|
||||||
|
|
||||||
memcpy(vcpu->run->mmio.data, frag->data, frag->len);
|
memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3832,18 +3832,11 @@ mmio:
|
|||||||
bytes -= handled;
|
bytes -= handled;
|
||||||
val += handled;
|
val += handled;
|
||||||
|
|
||||||
while (bytes) {
|
WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
|
||||||
unsigned now = min(bytes, 8U);
|
frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
|
||||||
|
frag->gpa = gpa;
|
||||||
frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
|
frag->data = val;
|
||||||
frag->gpa = gpa;
|
frag->len = bytes;
|
||||||
frag->data = val;
|
|
||||||
frag->len = now;
|
|
||||||
|
|
||||||
gpa += now;
|
|
||||||
val += now;
|
|
||||||
bytes -= now;
|
|
||||||
}
|
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3890,7 +3883,7 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
|
|||||||
vcpu->mmio_needed = 1;
|
vcpu->mmio_needed = 1;
|
||||||
vcpu->mmio_cur_fragment = 0;
|
vcpu->mmio_cur_fragment = 0;
|
||||||
|
|
||||||
vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
|
vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
|
||||||
vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
|
vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
|
||||||
vcpu->run->exit_reason = KVM_EXIT_MMIO;
|
vcpu->run->exit_reason = KVM_EXIT_MMIO;
|
||||||
vcpu->run->mmio.phys_addr = gpa;
|
vcpu->run->mmio.phys_addr = gpa;
|
||||||
@ -5522,28 +5515,44 @@ static int complete_emulated_pio(struct kvm_vcpu *vcpu)
|
|||||||
*
|
*
|
||||||
* read:
|
* read:
|
||||||
* for each fragment
|
* for each fragment
|
||||||
* write gpa, len
|
* for each mmio piece in the fragment
|
||||||
* exit
|
* write gpa, len
|
||||||
* copy data
|
* exit
|
||||||
|
* copy data
|
||||||
* execute insn
|
* execute insn
|
||||||
*
|
*
|
||||||
* write:
|
* write:
|
||||||
* for each fragment
|
* for each fragment
|
||||||
* write gpa, len
|
* for each mmio piece in the fragment
|
||||||
* copy data
|
* write gpa, len
|
||||||
* exit
|
* copy data
|
||||||
|
* exit
|
||||||
*/
|
*/
|
||||||
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_run *run = vcpu->run;
|
struct kvm_run *run = vcpu->run;
|
||||||
struct kvm_mmio_fragment *frag;
|
struct kvm_mmio_fragment *frag;
|
||||||
|
unsigned len;
|
||||||
|
|
||||||
BUG_ON(!vcpu->mmio_needed);
|
BUG_ON(!vcpu->mmio_needed);
|
||||||
|
|
||||||
/* Complete previous fragment */
|
/* Complete previous fragment */
|
||||||
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
|
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
|
||||||
|
len = min(8u, frag->len);
|
||||||
if (!vcpu->mmio_is_write)
|
if (!vcpu->mmio_is_write)
|
||||||
memcpy(frag->data, run->mmio.data, frag->len);
|
memcpy(frag->data, run->mmio.data, len);
|
||||||
|
|
||||||
|
if (frag->len <= 8) {
|
||||||
|
/* Switch to the next fragment. */
|
||||||
|
frag++;
|
||||||
|
vcpu->mmio_cur_fragment++;
|
||||||
|
} else {
|
||||||
|
/* Go forward to the next mmio piece. */
|
||||||
|
frag->data += len;
|
||||||
|
frag->gpa += len;
|
||||||
|
frag->len -= len;
|
||||||
|
}
|
||||||
|
|
||||||
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
|
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
|
||||||
vcpu->mmio_needed = 0;
|
vcpu->mmio_needed = 0;
|
||||||
if (vcpu->mmio_is_write)
|
if (vcpu->mmio_is_write)
|
||||||
@ -5551,13 +5560,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->mmio_read_completed = 1;
|
vcpu->mmio_read_completed = 1;
|
||||||
return complete_emulated_io(vcpu);
|
return complete_emulated_io(vcpu);
|
||||||
}
|
}
|
||||||
/* Initiate next fragment */
|
|
||||||
++frag;
|
|
||||||
run->exit_reason = KVM_EXIT_MMIO;
|
run->exit_reason = KVM_EXIT_MMIO;
|
||||||
run->mmio.phys_addr = frag->gpa;
|
run->mmio.phys_addr = frag->gpa;
|
||||||
if (vcpu->mmio_is_write)
|
if (vcpu->mmio_is_write)
|
||||||
memcpy(run->mmio.data, frag->data, frag->len);
|
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
|
||||||
run->mmio.len = frag->len;
|
run->mmio.len = min(8u, frag->len);
|
||||||
run->mmio.is_write = vcpu->mmio_is_write;
|
run->mmio.is_write = vcpu->mmio_is_write;
|
||||||
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
|
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -42,19 +42,8 @@
|
|||||||
*/
|
*/
|
||||||
#define KVM_MEMSLOT_INVALID (1UL << 16)
|
#define KVM_MEMSLOT_INVALID (1UL << 16)
|
||||||
|
|
||||||
/*
|
/* Two fragments for cross MMIO pages. */
|
||||||
* If we support unaligned MMIO, at most one fragment will be split into two:
|
#define KVM_MAX_MMIO_FRAGMENTS 2
|
||||||
*/
|
|
||||||
#ifdef KVM_UNALIGNED_MMIO
|
|
||||||
# define KVM_EXTRA_MMIO_FRAGMENTS 1
|
|
||||||
#else
|
|
||||||
# define KVM_EXTRA_MMIO_FRAGMENTS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define KVM_USER_MMIO_SIZE 8
|
|
||||||
|
|
||||||
#define KVM_MAX_MMIO_FRAGMENTS \
|
|
||||||
(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the normal pfn, the highest 12 bits should be zero,
|
* For the normal pfn, the highest 12 bits should be zero,
|
||||||
|
Loading…
Reference in New Issue
Block a user