forked from Minki/linux
KVM: MMU: Make pte_list_desc fit cache lines well
We have PTE_LIST_EXT + 1 pointers in this structure and these 40/20 bytes do not fit cache lines well. Furthermore, some allocators may use 64/32-byte objects for the pte_list_desc cache. This patch solves this problem by changing PTE_LIST_EXT from 4 to 3. For shadow paging, the new size is still large enough to hold both the kernel and process mappings for usual anonymous pages. For file mappings, there may be a slight change in the cache usage. Note: with EPT/NPT we almost always have a single spte in each reverse mapping and we will not see any change by this. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
c36fc04ef5
commit
220f773a00
@ -135,8 +135,6 @@ module_param(dbg, bool, 0644);
|
||||
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
|
||||
| PT64_NX_MASK)
|
||||
|
||||
#define PTE_LIST_EXT 4
|
||||
|
||||
#define ACC_EXEC_MASK 1
|
||||
#define ACC_WRITE_MASK PT_WRITABLE_MASK
|
||||
#define ACC_USER_MASK PT_USER_MASK
|
||||
@ -151,6 +149,9 @@ module_param(dbg, bool, 0644);
|
||||
|
||||
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
|
||||
|
||||
/* make pte_list_desc fit well in cache line */
|
||||
#define PTE_LIST_EXT 3
|
||||
|
||||
struct pte_list_desc {
|
||||
u64 *sptes[PTE_LIST_EXT];
|
||||
struct pte_list_desc *more;
|
||||
|
Loading…
Reference in New Issue
Block a user