mirror of
https://github.com/torvalds/linux.git
synced 2024-11-06 12:11:59 +00:00
powerpc: Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8"
This reverts commit9678cdaae9
("Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8") because the original commit had multiple, partly self-cancelling bugs, that could cause occasional memory corruption. In fact the logmpp instruction was incorrectly using register r0 as the source of the buffer address and operation code, and depending on what was in r0, it would either do nothing or corrupt the 64k page pointed to by r0. The logmpp instruction encoding and the operation code definitions could be corrected, but then there is the problem that there is no clearly defined way to know when the hardware has finished writing to the buffer. The original commit attempted to work around this by aborting the write-out before starting the prefetch, but this is ineffective in the case where the virtual core is now executing on a different physical core from the one where the write-out was initiated. These problems plus advice from the hardware designers not to use the function (since the measured performance improvement from using the feature was actually mostly negative), mean that reverting the code is the best option. Fixes:9678cdaae9
("Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8") Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
abb39bc792
commit
23316316c1
@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#include <asm/reg.h>
|
|
||||||
|
|
||||||
/* bytes per L1 cache line */
|
/* bytes per L1 cache line */
|
||||||
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
|
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
|
||||||
@ -40,12 +39,6 @@ struct ppc64_caches {
|
|||||||
};
|
};
|
||||||
|
|
||||||
extern struct ppc64_caches ppc64_caches;
|
extern struct ppc64_caches ppc64_caches;
|
||||||
|
|
||||||
static inline void logmpp(u64 x)
|
|
||||||
{
|
|
||||||
asm volatile(PPC_LOGMPP(R1) : : "r" (x));
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
|
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
|
||||||
|
|
||||||
#if defined(__ASSEMBLY__)
|
#if defined(__ASSEMBLY__)
|
||||||
|
@ -297,8 +297,6 @@ struct kvmppc_vcore {
|
|||||||
u32 arch_compat;
|
u32 arch_compat;
|
||||||
ulong pcr;
|
ulong pcr;
|
||||||
ulong dpdes; /* doorbell state (POWER8) */
|
ulong dpdes; /* doorbell state (POWER8) */
|
||||||
void *mpp_buffer; /* Micro Partition Prefetch buffer */
|
|
||||||
bool mpp_buffer_is_valid;
|
|
||||||
ulong conferring_threads;
|
ulong conferring_threads;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -141,7 +141,6 @@
|
|||||||
#define PPC_INST_ISEL 0x7c00001e
|
#define PPC_INST_ISEL 0x7c00001e
|
||||||
#define PPC_INST_ISEL_MASK 0xfc00003e
|
#define PPC_INST_ISEL_MASK 0xfc00003e
|
||||||
#define PPC_INST_LDARX 0x7c0000a8
|
#define PPC_INST_LDARX 0x7c0000a8
|
||||||
#define PPC_INST_LOGMPP 0x7c0007e4
|
|
||||||
#define PPC_INST_LSWI 0x7c0004aa
|
#define PPC_INST_LSWI 0x7c0004aa
|
||||||
#define PPC_INST_LSWX 0x7c00042a
|
#define PPC_INST_LSWX 0x7c00042a
|
||||||
#define PPC_INST_LWARX 0x7c000028
|
#define PPC_INST_LWARX 0x7c000028
|
||||||
@ -285,20 +284,6 @@
|
|||||||
#define __PPC_EH(eh) 0
|
#define __PPC_EH(eh) 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* POWER8 Micro Partition Prefetch (MPP) parameters */
|
|
||||||
/* Address mask is common for LOGMPP instruction and MPPR SPR */
|
|
||||||
#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
|
|
||||||
|
|
||||||
/* Bits 60 and 61 of MPP SPR should be set to one of the following */
|
|
||||||
/* Aborting the fetch is indeed setting 00 in the table size bits */
|
|
||||||
#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
|
|
||||||
#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
|
|
||||||
|
|
||||||
/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
|
|
||||||
#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
|
|
||||||
#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
|
|
||||||
#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
|
|
||||||
|
|
||||||
/* Deal with instructions that older assemblers aren't aware of */
|
/* Deal with instructions that older assemblers aren't aware of */
|
||||||
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
|
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
|
||||||
__PPC_RA(a) | __PPC_RB(b))
|
__PPC_RA(a) | __PPC_RB(b))
|
||||||
@ -307,8 +292,6 @@
|
|||||||
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
|
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
|
||||||
___PPC_RT(t) | ___PPC_RA(a) | \
|
___PPC_RT(t) | ___PPC_RA(a) | \
|
||||||
___PPC_RB(b) | __PPC_EH(eh))
|
___PPC_RB(b) | __PPC_EH(eh))
|
||||||
#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
|
|
||||||
__PPC_RB(b))
|
|
||||||
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
|
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
|
||||||
___PPC_RT(t) | ___PPC_RA(a) | \
|
___PPC_RT(t) | ___PPC_RA(a) | \
|
||||||
___PPC_RB(b) | __PPC_EH(eh))
|
___PPC_RB(b) | __PPC_EH(eh))
|
||||||
|
@ -226,7 +226,6 @@
|
|||||||
#define CTRL_TE 0x00c00000 /* thread enable */
|
#define CTRL_TE 0x00c00000 /* thread enable */
|
||||||
#define CTRL_RUNLATCH 0x1
|
#define CTRL_RUNLATCH 0x1
|
||||||
#define SPRN_DAWR 0xB4
|
#define SPRN_DAWR 0xB4
|
||||||
#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
|
|
||||||
#define SPRN_RPR 0xBA /* Relative Priority Register */
|
#define SPRN_RPR 0xBA /* Relative Priority Register */
|
||||||
#define SPRN_CIABR 0xBB
|
#define SPRN_CIABR 0xBB
|
||||||
#define CIABR_PRIV 0x3
|
#define CIABR_PRIV 0x3
|
||||||
|
@ -36,7 +36,6 @@
|
|||||||
|
|
||||||
#include <asm/reg.h>
|
#include <asm/reg.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
#include <asm/cache.h>
|
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
@ -75,12 +74,6 @@
|
|||||||
|
|
||||||
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
|
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
|
||||||
|
|
||||||
#if defined(CONFIG_PPC_64K_PAGES)
|
|
||||||
#define MPP_BUFFER_ORDER 0
|
|
||||||
#elif defined(CONFIG_PPC_4K_PAGES)
|
|
||||||
#define MPP_BUFFER_ORDER 3
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int dynamic_mt_modes = 6;
|
static int dynamic_mt_modes = 6;
|
||||||
module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
|
module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
|
||||||
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
|
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
|
||||||
@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
|
|||||||
vcore->kvm = kvm;
|
vcore->kvm = kvm;
|
||||||
INIT_LIST_HEAD(&vcore->preempt_list);
|
INIT_LIST_HEAD(&vcore->preempt_list);
|
||||||
|
|
||||||
vcore->mpp_buffer_is_valid = false;
|
|
||||||
|
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
||||||
vcore->mpp_buffer = (void *)__get_free_pages(
|
|
||||||
GFP_KERNEL|__GFP_ZERO,
|
|
||||||
MPP_BUFFER_ORDER);
|
|
||||||
|
|
||||||
return vcore;
|
return vcore;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
|
|
||||||
{
|
|
||||||
phys_addr_t phy_addr, mpp_addr;
|
|
||||||
|
|
||||||
phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
|
|
||||||
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
|
|
||||||
|
|
||||||
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
|
|
||||||
logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
|
|
||||||
|
|
||||||
vc->mpp_buffer_is_valid = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
|
|
||||||
{
|
|
||||||
phys_addr_t phy_addr, mpp_addr;
|
|
||||||
|
|
||||||
phy_addr = virt_to_phys(vc->mpp_buffer);
|
|
||||||
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
|
|
||||||
|
|
||||||
/* We must abort any in-progress save operations to ensure
|
|
||||||
* the table is valid so that prefetch engine knows when to
|
|
||||||
* stop prefetching. */
|
|
||||||
logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
|
|
||||||
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A list of virtual cores for each physical CPU.
|
* A list of virtual cores for each physical CPU.
|
||||||
* These are vcores that could run but their runner VCPU tasks are
|
* These are vcores that could run but their runner VCPU tasks are
|
||||||
@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||||||
|
|
||||||
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
||||||
|
|
||||||
if (vc->mpp_buffer_is_valid)
|
|
||||||
kvmppc_start_restoring_l2_cache(vc);
|
|
||||||
|
|
||||||
__kvmppc_vcore_entry();
|
__kvmppc_vcore_entry();
|
||||||
|
|
||||||
if (vc->mpp_buffer)
|
|
||||||
kvmppc_start_saving_l2_cache(vc);
|
|
||||||
|
|
||||||
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
|
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
|
||||||
|
|
||||||
spin_lock(&vc->lock);
|
spin_lock(&vc->lock);
|
||||||
@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
|
|||||||
{
|
{
|
||||||
long int i;
|
long int i;
|
||||||
|
|
||||||
for (i = 0; i < KVM_MAX_VCORES; ++i) {
|
for (i = 0; i < KVM_MAX_VCORES; ++i)
|
||||||
if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
|
|
||||||
struct kvmppc_vcore *vc = kvm->arch.vcores[i];
|
|
||||||
free_pages((unsigned long)vc->mpp_buffer,
|
|
||||||
MPP_BUFFER_ORDER);
|
|
||||||
}
|
|
||||||
kfree(kvm->arch.vcores[i]);
|
kfree(kvm->arch.vcores[i]);
|
||||||
}
|
|
||||||
kvm->arch.online_vcores = 0;
|
kvm->arch.online_vcores = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user