A small set of x86 fixes. The most serious is an SRCU lockdep fix.

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQEcBAABAgAGBQJUVd9KAAoJEL/70l94x66Dc1AH/0jdb8DsewyAuJzLKaJ/qJwK
 9JMqglpDQ+Sm0f2puPyJkR8NQd2AMPK7J5aJjWAl/XxJjsDcn+TQur20okzUDXLJ
 21sIbqo92hCgpSNs+RHLHlj7/iMQVYnMFh7bp6JcvzmhpN8F/D793BT+oOxdjMRg
 PLCQ794ugGhFboesDkV822VWgtQ26yG2aQDWbYgL9r5xPp5OpbzSiq85KopSEfS0
 K+PPntI8yNI+EvOC9ta0FfEOMMfQoLDds+V0FXiEIRx43MV8bwAXpWzsB8ibd1F6
 eY+cVvSPzWgDSCVLn3gfYkrRl3sWGdvyfxTe/cz507ZfXcuT2uHJhtbpH2KCGto=
 =FJ6/
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "A small set of x86 fixes.  The most serious is an SRCU lockdep fix.

  A bit late - needed some time to test the SRCU fix, which only came in
  on Friday"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: vmx: defer load of APIC access page address during reset
  KVM: nVMX: Disable preemption while reading from shadow VMCS
  KVM: x86: Fix far-jump to non-canonical check
  KVM: emulator: fix execution close to the segment limit
  KVM: emulator: fix error code for __linearize
This commit is contained in:
Linus Torvalds 2014-11-02 12:31:02 -08:00
commit 7501a53329
2 changed files with 45 additions and 16 deletions

View File

@ -574,12 +574,14 @@ static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
case 4: case 4:
ctxt->_eip = (u32)dst; ctxt->_eip = (u32)dst;
break; break;
#ifdef CONFIG_X86_64
case 8: case 8:
if ((cs_l && is_noncanonical_address(dst)) || if ((cs_l && is_noncanonical_address(dst)) ||
(!cs_l && (dst & ~(u32)-1))) (!cs_l && (dst >> 32) != 0))
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
ctxt->_eip = dst; ctxt->_eip = dst;
break; break;
#endif
default: default:
WARN(1, "unsupported eip assignment size\n"); WARN(1, "unsupported eip assignment size\n");
} }
@ -641,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
static int __linearize(struct x86_emulate_ctxt *ctxt, static int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr, struct segmented_address addr,
unsigned size, bool write, bool fetch, unsigned *max_size, unsigned size,
bool write, bool fetch,
ulong *linear) ulong *linear)
{ {
struct desc_struct desc; struct desc_struct desc;
@ -652,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
unsigned cpl; unsigned cpl;
la = seg_base(ctxt, addr.seg) + addr.ea; la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
switch (ctxt->mode) { switch (ctxt->mode) {
case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT64:
if (((signed long)la << 16) >> 16 != la) if (((signed long)la << 16) >> 16 != la)
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
goto bad;
break; break;
default: default:
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
@ -673,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
(ctxt->d & NoBigReal)) { (ctxt->d & NoBigReal)) {
/* la is between zero and 0xffff */ /* la is between zero and 0xffff */
if (la > 0xffff || (u32)(la + size - 1) > 0xffff) if (la > 0xffff)
goto bad; goto bad;
*max_size = 0x10000 - la;
} else if ((desc.type & 8) || !(desc.type & 4)) { } else if ((desc.type & 8) || !(desc.type & 4)) {
/* expand-up segment */ /* expand-up segment */
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) if (addr.ea > lim)
goto bad; goto bad;
*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
} else { } else {
/* expand-down segment */ /* expand-down segment */
if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) if (addr.ea <= lim)
goto bad; goto bad;
lim = desc.d ? 0xffffffff : 0xffff; lim = desc.d ? 0xffffffff : 0xffff;
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) if (addr.ea > lim)
goto bad; goto bad;
*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
} }
if (size > *max_size)
goto bad;
cpl = ctxt->ops->cpl(ctxt); cpl = ctxt->ops->cpl(ctxt);
if (!(desc.type & 8)) { if (!(desc.type & 8)) {
/* data segment */ /* data segment */
@ -711,9 +724,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
bad: bad:
if (addr.seg == VCPU_SREG_SS) if (addr.seg == VCPU_SREG_SS)
return emulate_ss(ctxt, sel); return emulate_ss(ctxt, 0);
else else
return emulate_gp(ctxt, sel); return emulate_gp(ctxt, 0);
} }
static int linearize(struct x86_emulate_ctxt *ctxt, static int linearize(struct x86_emulate_ctxt *ctxt,
@ -721,7 +734,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
unsigned size, bool write, unsigned size, bool write,
ulong *linear) ulong *linear)
{ {
return __linearize(ctxt, addr, size, write, false, linear); unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false, linear);
} }
@ -746,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{ {
int rc; int rc;
unsigned size; unsigned size, max_size;
unsigned long linear; unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data; int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS, struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size }; .ea = ctxt->eip + cur_size };
size = 15UL ^ cur_size; /*
rc = __linearize(ctxt, addr, size, false, true, &linear); * We do not know exactly how many bytes will be needed, and
* __linearize is expensive, so fetch as much as possible. We
* just have to avoid going beyond the 15 byte limit, the end
* of the segment, or the end of the page.
*
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
if (unlikely(rc != X86EMUL_CONTINUE)) if (unlikely(rc != X86EMUL_CONTINUE))
return rc; return rc;
size = min_t(unsigned, 15UL ^ cur_size, max_size);
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/* /*
@ -766,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* still, we must have hit the 15-byte boundary. * still, we must have hit the 15-byte boundary.
*/ */
if (unlikely(size < op_size)) if (unlikely(size < op_size))
return X86EMUL_UNHANDLEABLE; return emulate_gp(ctxt, 0);
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception); size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE)) if (unlikely(rc != X86EMUL_CONTINUE))
@ -2012,7 +2037,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
if (rc != X86EMUL_CONTINUE) { if (rc != X86EMUL_CONTINUE) {
WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
/* assigning eip failed; restore the old cs */ /* assigning eip failed; restore the old cs */
ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
return rc; return rc;
@ -2109,7 +2134,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
return rc; return rc;
rc = assign_eip_far(ctxt, eip, new_desc.l); rc = assign_eip_far(ctxt, eip, new_desc.l);
if (rc != X86EMUL_CONTINUE) { if (rc != X86EMUL_CONTINUE) {
WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
} }
return rc; return rc;

View File

@ -4579,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_write32(TPR_THRESHOLD, 0); vmcs_write32(TPR_THRESHOLD, 0);
} }
kvm_vcpu_reload_apic_access_page(vcpu); kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
if (vmx_vm_has_apicv(vcpu->kvm)) if (vmx_vm_has_apicv(vcpu->kvm))
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@ -6426,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
const unsigned long *fields = shadow_read_write_fields; const unsigned long *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields;
preempt_disable();
vmcs_load(shadow_vmcs); vmcs_load(shadow_vmcs);
for (i = 0; i < num_fields; i++) { for (i = 0; i < num_fields; i++) {
@ -6449,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
vmcs_clear(shadow_vmcs); vmcs_clear(shadow_vmcs);
vmcs_load(vmx->loaded_vmcs->vmcs); vmcs_load(vmx->loaded_vmcs->vmcs);
preempt_enable();
} }
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)