mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
PPC KVM update for 4.20.
The major new feature here is nested HV KVM support. This allows the HV KVM module to load inside a radix guest on POWER9 and run radix guests underneath it. These nested guests can run in supervisor mode and don't require any additional instructions to be emulated, unlike with PR KVM, and so performance is much better than with PR KVM, and is very close to the performance of a non-nested guest. A nested hypervisor (a guest with nested guests) can be migrated to another host and will bring all its nested guests along with it. A nested guest can also itself run guests, and so on down to any desired depth of nesting. Apart from that there are a series of updates for IOMMU handling from Alexey Kardashevskiy, a "one VM per core" mode for HV KVM for security-paranoid applications, and a small fix for PR KVM. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJbvY8iAAoJEJ2a6ncsY3GfMVAH/0t1nKifIr3KTKm+zR4peOU/ DMOTp/BomPCAlIJfp/C+j56GJ9H5me+i/ykTG6ZvHFhIInaM6OvavU7xQ7LBRvl1 ltjSr21IRO5BUWhnuDCYQXPFasJ0nX+LrKIRr7vNoUioUJHk6W1e4Mfep7CNs6fD Q1p4Yg0BFCxN+ijhIOlQ49/ymbbaPFArOPh0mhipOEeY51NvCXYiUHiygu8RWnV9 sHuVFW3zQac0geUaWMAscVJ4dcvEw7GLXeY4FwWQtBPMq01LjJrECZcqy/c3Ut/2 06k7MpjZqPRneLp7aGznZCUjfQUMuP80ZRPQ8QLC/q1TODl+wPvB+hby1Q2tKvg= =zAPd -----END PGP SIGNATURE----- Merge tag 'kvm-ppc-next-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD PPC KVM update for 4.20. The major new feature here is nested HV KVM support. This allows the HV KVM module to load inside a radix guest on POWER9 and run radix guests underneath it. These nested guests can run in supervisor mode and don't require any additional instructions to be emulated, unlike with PR KVM, and so performance is much better than with PR KVM, and is very close to the performance of a non-nested guest. A nested hypervisor (a guest with nested guests) can be migrated to another host and will bring all its nested guests along with it. A nested guest can also itself run guests, and so on down to any desired depth of nesting. Apart from that there are a series of updates for IOMMU handling from Alexey Kardashevskiy, a "one VM per core" mode for HV KVM for security-paranoid applications, and a small fix for PR KVM.
This commit is contained in:
commit
7dd2157cb6
@ -1922,6 +1922,7 @@ registers, find a list below:
|
||||
PPC | KVM_REG_PPC_TIDR | 64
|
||||
PPC | KVM_REG_PPC_PSSCR | 64
|
||||
PPC | KVM_REG_PPC_DEC_EXPIRY | 64
|
||||
PPC | KVM_REG_PPC_PTCR | 64
|
||||
PPC | KVM_REG_PPC_TM_GPR0 | 64
|
||||
...
|
||||
PPC | KVM_REG_PPC_TM_GPR31 | 64
|
||||
@ -2269,6 +2270,10 @@ The supported flags are:
|
||||
The emulated MMU supports 1T segments in addition to the
|
||||
standard 256M ones.
|
||||
|
||||
- KVM_PPC_NO_HASH
|
||||
This flag indicates that HPT guests are not supported by KVM,
|
||||
thus all guests must use radix MMU mode.
|
||||
|
||||
The "slb_size" field indicates how many SLB entries are supported
|
||||
|
||||
The "sps" array contains 8 entries indicating the supported base
|
||||
@ -4531,6 +4536,20 @@ With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
|
||||
a #GP would be raised when the guest tries to access. Currently, this
|
||||
capability does not enable write permissions of this MSR for the guest.
|
||||
|
||||
7.16 KVM_CAP_PPC_NESTED_HV
|
||||
|
||||
Architectures: ppc
|
||||
Parameters: none
|
||||
Returns: 0 on success, -EINVAL when the implementation doesn't support
|
||||
nested-HV virtualization.
|
||||
|
||||
HV-KVM on POWER9 and later systems allows for "nested-HV"
|
||||
virtualization, which provides a way for a guest VM to run guests that
|
||||
can run using the CPU's supervisor mode (privileged non-hypervisor
|
||||
state). Enabling this capability on a VM depends on the CPU having
|
||||
the necessary functionality and on the facility being enabled with a
|
||||
kvm-hv module parameter.
|
||||
|
||||
8. Other capabilities.
|
||||
----------------------
|
||||
|
||||
|
@ -150,4 +150,25 @@ extern s32 patch__memset_nocache, patch__memcpy_nocache;
|
||||
|
||||
extern long flush_count_cache;
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
|
||||
void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
|
||||
#else
|
||||
static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
|
||||
bool preserve_nv) { }
|
||||
static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
|
||||
bool preserve_nv) { }
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
void kvmhv_save_host_pmu(void);
|
||||
void kvmhv_load_host_pmu(void);
|
||||
void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
|
||||
void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
|
||||
|
||||
int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
|
||||
|
||||
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
|
||||
long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
|
||||
unsigned long dabrx);
|
||||
|
||||
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
|
||||
|
@ -203,6 +203,18 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline unsigned int ap_to_shift(unsigned long ap)
|
||||
{
|
||||
int psize;
|
||||
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
|
||||
if (mmu_psize_defs[psize].ap == ap)
|
||||
return mmu_psize_defs[psize].shift;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long get_sllp_encoding(int psize)
|
||||
{
|
||||
unsigned long sllp;
|
||||
|
@ -53,6 +53,7 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid,
|
||||
unsigned long addr,
|
||||
unsigned long page_size);
|
||||
extern void radix__flush_pwc_lpid(unsigned int lpid);
|
||||
extern void radix__flush_tlb_lpid(unsigned int lpid);
|
||||
extern void radix__local_flush_tlb_lpid(unsigned int lpid);
|
||||
extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
|
||||
|
||||
|
@ -322,6 +322,11 @@
|
||||
#define H_GET_24X7_DATA 0xF07C
|
||||
#define H_GET_PERF_COUNTER_INFO 0xF080
|
||||
|
||||
/* Platform-specific hcalls used for nested HV KVM */
|
||||
#define H_SET_PARTITION_TABLE 0xF800
|
||||
#define H_ENTER_NESTED 0xF804
|
||||
#define H_TLB_INVALIDATE 0xF808
|
||||
|
||||
/* Values for 2nd argument to H_SET_MODE */
|
||||
#define H_SET_MODE_RESOURCE_SET_CIABR 1
|
||||
#define H_SET_MODE_RESOURCE_SET_DAWR 2
|
||||
@ -461,6 +466,42 @@ struct h_cpu_char_result {
|
||||
u64 behaviour;
|
||||
};
|
||||
|
||||
/* Register state for entering a nested guest with H_ENTER_NESTED */
|
||||
struct hv_guest_state {
|
||||
u64 version; /* version of this structure layout */
|
||||
u32 lpid;
|
||||
u32 vcpu_token;
|
||||
/* These registers are hypervisor privileged (at least for writing) */
|
||||
u64 lpcr;
|
||||
u64 pcr;
|
||||
u64 amor;
|
||||
u64 dpdes;
|
||||
u64 hfscr;
|
||||
s64 tb_offset;
|
||||
u64 dawr0;
|
||||
u64 dawrx0;
|
||||
u64 ciabr;
|
||||
u64 hdec_expiry;
|
||||
u64 purr;
|
||||
u64 spurr;
|
||||
u64 ic;
|
||||
u64 vtb;
|
||||
u64 hdar;
|
||||
u64 hdsisr;
|
||||
u64 heir;
|
||||
u64 asdr;
|
||||
/* These are OS privileged but need to be set late in guest entry */
|
||||
u64 srr0;
|
||||
u64 srr1;
|
||||
u64 sprg[4];
|
||||
u64 pidr;
|
||||
u64 cfar;
|
||||
u64 ppr;
|
||||
};
|
||||
|
||||
/* Latest version of hv_guest_state structure */
|
||||
#define HV_GUEST_STATE_VERSION 1
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_HVCALL_H */
|
||||
|
@ -84,7 +84,6 @@
|
||||
#define BOOK3S_INTERRUPT_INST_STORAGE 0x400
|
||||
#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
|
||||
#define BOOK3S_INTERRUPT_EXTERNAL 0x500
|
||||
#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501
|
||||
#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502
|
||||
#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
|
||||
#define BOOK3S_INTERRUPT_PROGRAM 0x700
|
||||
@ -134,8 +133,7 @@
|
||||
#define BOOK3S_IRQPRIO_EXTERNAL 14
|
||||
#define BOOK3S_IRQPRIO_DECREMENTER 15
|
||||
#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16
|
||||
#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17
|
||||
#define BOOK3S_IRQPRIO_MAX 18
|
||||
#define BOOK3S_IRQPRIO_MAX 17
|
||||
|
||||
#define BOOK3S_HFLAG_DCBZ32 0x1
|
||||
#define BOOK3S_HFLAG_SLB 0x2
|
||||
|
@ -188,14 +188,37 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
|
||||
extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu,
|
||||
unsigned long ea, unsigned long dsisr);
|
||||
extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *gpte, u64 root,
|
||||
u64 *pte_ret_p);
|
||||
extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *gpte, u64 table,
|
||||
int table_index, u64 *pte_ret_p);
|
||||
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *gpte, bool data, bool iswrite);
|
||||
extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
|
||||
unsigned int shift, struct kvm_memory_slot *memslot,
|
||||
unsigned int lpid);
|
||||
extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
|
||||
bool writing, unsigned long gpa,
|
||||
unsigned int lpid);
|
||||
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
|
||||
unsigned long gpa,
|
||||
struct kvm_memory_slot *memslot,
|
||||
bool writing, bool kvm_ro,
|
||||
pte_t *inserted_pte, unsigned int *levelp);
|
||||
extern int kvmppc_init_vm_radix(struct kvm *kvm);
|
||||
extern void kvmppc_free_radix(struct kvm *kvm);
|
||||
extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
|
||||
unsigned int lpid);
|
||||
extern int kvmppc_radix_init(void);
|
||||
extern void kvmppc_radix_exit(void);
|
||||
extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
unsigned long gfn);
|
||||
extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
|
||||
unsigned long gpa, unsigned int shift,
|
||||
struct kvm_memory_slot *memslot,
|
||||
unsigned int lpid);
|
||||
extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
unsigned long gfn);
|
||||
extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
@ -271,6 +294,21 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
|
||||
#endif
|
||||
|
||||
long kvmhv_nested_init(void);
|
||||
void kvmhv_nested_exit(void);
|
||||
void kvmhv_vm_nested_init(struct kvm *kvm);
|
||||
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
|
||||
void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
|
||||
void kvmhv_release_all_nested(struct kvm *kvm);
|
||||
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
|
||||
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
|
||||
int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
|
||||
u64 time_limit, unsigned long lpcr);
|
||||
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
|
||||
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
|
||||
struct hv_guest_state *hr);
|
||||
long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
|
||||
|
||||
extern int kvm_irq_bypass;
|
||||
@ -301,12 +339,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
||||
|
||||
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
vcpu->arch.cr = val;
|
||||
vcpu->arch.regs.ccr = val;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.cr;
|
||||
return vcpu->arch.regs.ccr;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
||||
@ -384,9 +422,6 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
|
||||
/* TO = 31 for unconditional trap */
|
||||
#define INS_TW 0x7fe00008
|
||||
|
||||
/* LPIDs we support with this build -- runtime limit may be lower */
|
||||
#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
|
||||
|
||||
#define SPLIT_HACK_MASK 0xff000000
|
||||
#define SPLIT_HACK_OFFS 0xfb000000
|
||||
|
||||
|
@ -23,6 +23,108 @@
|
||||
#include <linux/string.h>
|
||||
#include <asm/bitops.h>
|
||||
#include <asm/book3s/64/mmu-hash.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
static inline bool kvmhv_on_pseries(void)
|
||||
{
|
||||
return !cpu_has_feature(CPU_FTR_HVMODE);
|
||||
}
|
||||
#else
|
||||
static inline bool kvmhv_on_pseries(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Structure for a nested guest, that is, for a guest that is managed by
|
||||
* one of our guests.
|
||||
*/
|
||||
struct kvm_nested_guest {
|
||||
struct kvm *l1_host; /* L1 VM that owns this nested guest */
|
||||
int l1_lpid; /* lpid L1 guest thinks this guest is */
|
||||
int shadow_lpid; /* real lpid of this nested guest */
|
||||
pgd_t *shadow_pgtable; /* our page table for this guest */
|
||||
u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
|
||||
u64 process_table; /* process table entry for this guest */
|
||||
long refcnt; /* number of pointers to this struct */
|
||||
struct mutex tlb_lock; /* serialize page faults and tlbies */
|
||||
struct kvm_nested_guest *next;
|
||||
cpumask_t need_tlb_flush;
|
||||
cpumask_t cpu_in_guest;
|
||||
short prev_cpu[NR_CPUS];
|
||||
};
|
||||
|
||||
/*
|
||||
* We define a nested rmap entry as a single 64-bit quantity
|
||||
* 0xFFF0000000000000 12-bit lpid field
|
||||
* 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
|
||||
* 0x0000000000000001 1-bit single entry flag
|
||||
*/
|
||||
#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
|
||||
#define RMAP_NESTED_LPID_SHIFT (52)
|
||||
#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
|
||||
#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
|
||||
|
||||
/* Structure for a nested guest rmap entry */
|
||||
struct rmap_nested {
|
||||
struct llist_node list;
|
||||
u64 rmap;
|
||||
};
|
||||
|
||||
/*
|
||||
* for_each_nest_rmap_safe - iterate over the list of nested rmap entries
|
||||
* safe against removal of the list entry or NULL list
|
||||
* @pos: a (struct rmap_nested *) to use as a loop cursor
|
||||
* @node: pointer to the first entry
|
||||
* NOTE: this can be NULL
|
||||
* @rmapp: an (unsigned long *) in which to return the rmap entries on each
|
||||
* iteration
|
||||
* NOTE: this must point to already allocated memory
|
||||
*
|
||||
* The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
|
||||
* rmap entry in the memslot. The list is always terminated by a "single entry"
|
||||
* stored in the list element of the final entry of the llist. If there is ONLY
|
||||
* a single entry then this is itself in the rmap entry of the memslot, not a
|
||||
* llist head pointer.
|
||||
*
|
||||
* Note that the iterator below assumes that a nested rmap entry is always
|
||||
* non-zero. This is true for our usage because the LPID field is always
|
||||
* non-zero (zero is reserved for the host).
|
||||
*
|
||||
* This should be used to iterate over the list of rmap_nested entries with
|
||||
* processing done on the u64 rmap value given by each iteration. This is safe
|
||||
* against removal of list entries and it is always safe to call free on (pos).
|
||||
*
|
||||
* e.g.
|
||||
* struct rmap_nested *cursor;
|
||||
* struct llist_node *first;
|
||||
* unsigned long rmap;
|
||||
* for_each_nest_rmap_safe(cursor, first, &rmap) {
|
||||
* do_something(rmap);
|
||||
* free(cursor);
|
||||
* }
|
||||
*/
|
||||
#define for_each_nest_rmap_safe(pos, node, rmapp) \
|
||||
for ((pos) = llist_entry((node), typeof(*(pos)), list); \
|
||||
(node) && \
|
||||
(*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
|
||||
((u64) (node)) : ((pos)->rmap))) && \
|
||||
(((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
|
||||
((struct llist_node *) ((pos) = NULL)) : \
|
||||
(pos)->list.next)), true); \
|
||||
(pos) = llist_entry((node), typeof(*(pos)), list))
|
||||
|
||||
struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
|
||||
bool create);
|
||||
void kvmhv_put_nested(struct kvm_nested_guest *gp);
|
||||
int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
|
||||
|
||||
/* Encoding of first parameter for H_TLB_INVALIDATE */
|
||||
#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
|
||||
___PPC_R(r))
|
||||
|
||||
/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
|
||||
#define PPC_MIN_HPT_ORDER 18
|
||||
@ -435,6 +537,7 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
|
||||
}
|
||||
|
||||
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
|
||||
extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
|
||||
|
||||
extern void kvmhv_rm_send_ipi(int cpu);
|
||||
|
||||
@ -482,7 +585,7 @@ static inline u64 sanitize_msr(u64 msr)
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.cr = vcpu->arch.cr_tm;
|
||||
vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
|
||||
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
|
||||
vcpu->arch.regs.link = vcpu->arch.lr_tm;
|
||||
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
|
||||
@ -499,7 +602,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.cr_tm = vcpu->arch.cr;
|
||||
vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
|
||||
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
|
||||
vcpu->arch.lr_tm = vcpu->arch.regs.link;
|
||||
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
|
||||
@ -515,6 +618,17 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
unsigned long gpa, unsigned int level,
|
||||
unsigned long mmu_seq, unsigned int lpid,
|
||||
unsigned long *rmapp, struct rmap_nested **n_rmap);
|
||||
extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
|
||||
struct rmap_nested **n_rmap);
|
||||
extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
unsigned long gpa, unsigned long hpa,
|
||||
unsigned long nbytes);
|
||||
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
#endif /* __ASM_KVM_BOOK3S_64_H__ */
|
||||
|
@ -25,6 +25,9 @@
|
||||
#define XICS_MFRR 0xc
|
||||
#define XICS_IPI 2 /* interrupt source # for IPIs */
|
||||
|
||||
/* LPIDs we support with this build -- runtime limit may be lower */
|
||||
#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
|
||||
|
||||
/* Maximum number of threads per physical core */
|
||||
#define MAX_SMT_THREADS 8
|
||||
|
||||
|
@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
||||
|
||||
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
vcpu->arch.cr = val;
|
||||
vcpu->arch.regs.ccr = val;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.cr;
|
||||
return vcpu->arch.regs.ccr;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
||||
|
@ -46,6 +46,7 @@
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
|
||||
#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES)
|
||||
#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS
|
||||
|
||||
#else
|
||||
#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
|
||||
@ -94,6 +95,7 @@ struct dtl_entry;
|
||||
|
||||
struct kvmppc_vcpu_book3s;
|
||||
struct kvmppc_book3s_shadow_vcpu;
|
||||
struct kvm_nested_guest;
|
||||
|
||||
struct kvm_vm_stat {
|
||||
ulong remote_tlb_flush;
|
||||
@ -287,10 +289,12 @@ struct kvm_arch {
|
||||
u8 radix;
|
||||
u8 fwnmi_enabled;
|
||||
bool threads_indep;
|
||||
bool nested_enable;
|
||||
pgd_t *pgtable;
|
||||
u64 process_table;
|
||||
struct dentry *debugfs_dir;
|
||||
struct dentry *htab_dentry;
|
||||
struct dentry *radix_dentry;
|
||||
struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
@ -311,6 +315,9 @@ struct kvm_arch {
|
||||
#endif
|
||||
struct kvmppc_ops *kvm_ops;
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
u64 l1_ptcr;
|
||||
int max_nested_lpid;
|
||||
struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
|
||||
/* This array can grow quite large, keep it at the end */
|
||||
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
|
||||
#endif
|
||||
@ -360,7 +367,9 @@ struct kvmppc_pte {
|
||||
bool may_write : 1;
|
||||
bool may_execute : 1;
|
||||
unsigned long wimg;
|
||||
unsigned long rc;
|
||||
u8 page_size; /* MMU_PAGE_xxx */
|
||||
u8 page_shift;
|
||||
};
|
||||
|
||||
struct kvmppc_mmu {
|
||||
@ -537,8 +546,6 @@ struct kvm_vcpu_arch {
|
||||
ulong tar;
|
||||
#endif
|
||||
|
||||
u32 cr;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
ulong hflags;
|
||||
ulong guest_owned_ext;
|
||||
@ -707,6 +714,7 @@ struct kvm_vcpu_arch {
|
||||
u8 hcall_needed;
|
||||
u8 epr_flags; /* KVMPPC_EPR_xxx */
|
||||
u8 epr_needed;
|
||||
u8 external_oneshot; /* clear external irq after delivery */
|
||||
|
||||
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
|
||||
|
||||
@ -781,6 +789,10 @@ struct kvm_vcpu_arch {
|
||||
u32 emul_inst;
|
||||
|
||||
u32 online;
|
||||
|
||||
/* For support of nested guests */
|
||||
struct kvm_nested_guest *nested;
|
||||
u32 nested_vcpu_id;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
||||
|
@ -194,9 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
|
||||
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
|
||||
(stt)->size, (ioba), (npages)) ? \
|
||||
H_PARAMETER : H_SUCCESS)
|
||||
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
|
||||
unsigned long tce);
|
||||
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||
extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
||||
unsigned long *ua, unsigned long **prmap);
|
||||
extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
|
||||
unsigned long idx, unsigned long tce);
|
||||
@ -327,6 +325,7 @@ struct kvmppc_ops {
|
||||
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
|
||||
unsigned long flags);
|
||||
void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
|
||||
int (*enable_nested)(struct kvm *kvm);
|
||||
};
|
||||
|
||||
extern struct kvmppc_ops *kvmppc_hv_ops;
|
||||
@ -585,6 +584,7 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
|
||||
|
||||
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||
int level, bool line_status);
|
||||
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
|
||||
u32 priority) { return -1; }
|
||||
@ -607,6 +607,7 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
|
||||
|
||||
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||
int level, bool line_status) { return -ENODEV; }
|
||||
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
|
||||
#endif /* CONFIG_KVM_XIVE */
|
||||
|
||||
/*
|
||||
@ -652,6 +653,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||
unsigned long mfrr);
|
||||
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
|
||||
|
||||
/*
|
||||
* Host-side operations we want to set up while running in real
|
||||
|
@ -104,6 +104,7 @@
|
||||
#define OP_31_XOP_LHZUX 311
|
||||
#define OP_31_XOP_MSGSNDP 142
|
||||
#define OP_31_XOP_MSGCLRP 174
|
||||
#define OP_31_XOP_TLBIE 306
|
||||
#define OP_31_XOP_MFSPR 339
|
||||
#define OP_31_XOP_LWAX 341
|
||||
#define OP_31_XOP_LHAX 343
|
||||
|
@ -415,6 +415,7 @@
|
||||
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
|
||||
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
|
||||
#define HFSCR_FP __MASK(FSCR_FP_LG)
|
||||
#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
|
||||
#define SPRN_TAR 0x32f /* Target Address Register */
|
||||
#define SPRN_LPCR 0x13E /* LPAR Control Register */
|
||||
#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
|
||||
@ -766,6 +767,7 @@
|
||||
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
|
||||
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
|
||||
#define HSRR1_DENORM 0x00100000 /* Denorm exception */
|
||||
#define HSRR1_HISI_WRITE 0x00010000 /* HISI bcs couldn't update mem */
|
||||
|
||||
#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
|
||||
#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
|
||||
|
@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char {
|
||||
|
||||
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
|
||||
#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
|
||||
#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0)
|
||||
|
||||
/* Transactional Memory checkpointed state:
|
||||
* This is all GPRs, all VSX regs and a subset of SPRs
|
||||
|
@ -438,7 +438,7 @@ int main(void)
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
|
||||
#endif
|
||||
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
|
||||
OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
|
||||
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
|
||||
@ -503,6 +503,7 @@ int main(void)
|
||||
OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
|
||||
OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
|
||||
OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
|
||||
OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested);
|
||||
OFFSET(VCPU_CPU, kvm_vcpu, cpu);
|
||||
OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
|
||||
#endif
|
||||
@ -695,7 +696,7 @@ int main(void)
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#else /* CONFIG_PPC_BOOK3S */
|
||||
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
|
||||
OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
|
||||
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
|
||||
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
|
||||
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
|
||||
|
@ -147,8 +147,8 @@ __init_hvmode_206:
|
||||
rldicl. r0,r3,4,63
|
||||
bnelr
|
||||
ld r5,CPU_SPEC_FEATURES(r4)
|
||||
LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
|
||||
xor r5,r5,r6
|
||||
LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST)
|
||||
andc r5,r5,r6
|
||||
std r5,CPU_SPEC_FEATURES(r4)
|
||||
blr
|
||||
|
||||
|
@ -75,7 +75,8 @@ kvm-hv-y += \
|
||||
book3s_hv.o \
|
||||
book3s_hv_interrupts.o \
|
||||
book3s_64_mmu_hv.o \
|
||||
book3s_64_mmu_radix.o
|
||||
book3s_64_mmu_radix.o \
|
||||
book3s_hv_nested.o
|
||||
|
||||
kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
|
||||
book3s_hv_tm.o
|
||||
|
@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
|
||||
ulong pc = kvmppc_get_pc(vcpu);
|
||||
ulong lr = kvmppc_get_lr(vcpu);
|
||||
if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
|
||||
kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
|
||||
if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
|
||||
kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
|
||||
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
|
||||
}
|
||||
}
|
||||
@ -150,7 +153,6 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
|
||||
case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
|
||||
case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
|
||||
case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
|
||||
case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
|
||||
case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
|
||||
case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
|
||||
case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
|
||||
@ -236,18 +238,35 @@ EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
|
||||
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
|
||||
struct kvm_interrupt *irq)
|
||||
{
|
||||
unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
|
||||
/*
|
||||
* This case (KVM_INTERRUPT_SET) should never actually arise for
|
||||
* a pseries guest (because pseries guests expect their interrupt
|
||||
* controllers to continue asserting an external interrupt request
|
||||
* until it is acknowledged at the interrupt controller), but is
|
||||
* included to avoid ABI breakage and potentially for other
|
||||
* sorts of guest.
|
||||
*
|
||||
* There is a subtlety here: HV KVM does not test the
|
||||
* external_oneshot flag in the code that synthesizes
|
||||
* external interrupts for the guest just before entering
|
||||
* the guest. That is OK even if userspace did do a
|
||||
* KVM_INTERRUPT_SET on a pseries guest vcpu, because the
|
||||
* caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
|
||||
* which ends up doing a smp_send_reschedule(), which will
|
||||
* pull the guest all the way out to the host, meaning that
|
||||
* we will call kvmppc_core_prepare_to_enter() before entering
|
||||
* the guest again, and that will handle the external_oneshot
|
||||
* flag correctly.
|
||||
*/
|
||||
if (irq->irq == KVM_INTERRUPT_SET)
|
||||
vcpu->arch.external_oneshot = 1;
|
||||
|
||||
if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
|
||||
vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
|
||||
|
||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
|
||||
}
|
||||
|
||||
void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
|
||||
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
|
||||
}
|
||||
|
||||
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
|
||||
@ -278,7 +297,6 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
|
||||
vec = BOOK3S_INTERRUPT_DECREMENTER;
|
||||
break;
|
||||
case BOOK3S_IRQPRIO_EXTERNAL:
|
||||
case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
|
||||
deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
|
||||
vec = BOOK3S_INTERRUPT_EXTERNAL;
|
||||
break;
|
||||
@ -352,8 +370,16 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
|
||||
case BOOK3S_IRQPRIO_DECREMENTER:
|
||||
/* DEC interrupts get cleared by mtdec */
|
||||
return false;
|
||||
case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
|
||||
/* External interrupts get cleared by userspace */
|
||||
case BOOK3S_IRQPRIO_EXTERNAL:
|
||||
/*
|
||||
* External interrupts get cleared by userspace
|
||||
* except when set by the KVM_INTERRUPT ioctl with
|
||||
* KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
|
||||
*/
|
||||
if (vcpu->arch.external_oneshot) {
|
||||
vcpu->arch.external_oneshot = 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -268,14 +268,13 @@ int kvmppc_mmu_hv_init(void)
|
||||
{
|
||||
unsigned long host_lpid, rsvd_lpid;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_HVMODE))
|
||||
return -EINVAL;
|
||||
|
||||
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
|
||||
return -EINVAL;
|
||||
|
||||
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
|
||||
host_lpid = mfspr(SPRN_LPID);
|
||||
host_lpid = 0;
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE))
|
||||
host_lpid = mfspr(SPRN_LPID);
|
||||
rsvd_lpid = LPID_RSVD;
|
||||
|
||||
kvmppc_init_lpid(rsvd_lpid + 1);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -363,6 +363,40 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
||||
unsigned long tce)
|
||||
{
|
||||
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
||||
enum dma_data_direction dir = iommu_tce_direction(tce);
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
unsigned long ua = 0;
|
||||
|
||||
/* Allow userspace to poison TCE table */
|
||||
if (dir == DMA_NONE)
|
||||
return H_SUCCESS;
|
||||
|
||||
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
|
||||
return H_TOO_HARD;
|
||||
|
||||
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
|
||||
unsigned long hpa = 0;
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
long shift = stit->tbl->it_page_shift;
|
||||
|
||||
mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
|
||||
if (!mem)
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
unsigned long hpa = 0;
|
||||
@ -401,7 +435,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
|
||||
long ret;
|
||||
|
||||
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
|
||||
return H_HARDWARE;
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (dir == DMA_NONE)
|
||||
return H_SUCCESS;
|
||||
@ -449,15 +483,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
|
||||
return H_HARDWARE;
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (mm_iommu_mapped_inc(mem))
|
||||
return H_CLOSED;
|
||||
return H_TOO_HARD;
|
||||
|
||||
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
mm_iommu_mapped_dec(mem);
|
||||
return H_HARDWARE;
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
|
||||
if (dir != DMA_NONE)
|
||||
@ -517,8 +551,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
|
||||
if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
|
||||
ret = H_PARAMETER;
|
||||
goto unlock_exit;
|
||||
}
|
||||
@ -533,14 +566,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
|
||||
entry, ua, dir);
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_clear_tce(stit->tbl, entry);
|
||||
goto unlock_exit;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry, tce);
|
||||
@ -583,7 +612,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
return ret;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
|
||||
ret = H_TOO_HARD;
|
||||
goto unlock_exit;
|
||||
}
|
||||
@ -599,10 +628,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
ret = kvmppc_tce_validate(stt, tce);
|
||||
if (ret != H_SUCCESS)
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
||||
&ua, NULL))
|
||||
for (i = 0; i < npages; ++i) {
|
||||
/*
|
||||
* This looks unsafe, because we validate, then regrab
|
||||
* the TCE from userspace which could have been changed by
|
||||
* another thread.
|
||||
*
|
||||
* But it actually is safe, because the relevant checks will be
|
||||
* re-executed in the following code. If userspace tries to
|
||||
* change this dodgily it will result in a messier failure mode
|
||||
* but won't threaten the host.
|
||||
*/
|
||||
if (get_user(tce, tces + i)) {
|
||||
ret = H_TOO_HARD;
|
||||
goto unlock_exit;
|
||||
}
|
||||
tce = be64_to_cpu(tce);
|
||||
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
@ -610,14 +655,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
stit->tbl, entry + i, ua,
|
||||
iommu_tce_direction(tce));
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_clear_tce(stit->tbl, entry);
|
||||
goto unlock_exit;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry + i, tce);
|
||||
|
@ -87,6 +87,7 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/*
|
||||
* Validates TCE address.
|
||||
* At the moment flags and page mask are validated.
|
||||
@ -94,14 +95,14 @@ EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
||||
* to the table and user space is supposed to process them), we can skip
|
||||
* checking other things (such as TCE is a guest RAM address or the page
|
||||
* was actually allocated).
|
||||
*
|
||||
* WARNING: This will be called in real-mode on HV KVM and virtual
|
||||
* mode on PR KVM
|
||||
*/
|
||||
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
|
||||
static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
||||
unsigned long tce)
|
||||
{
|
||||
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
||||
enum dma_data_direction dir = iommu_tce_direction(tce);
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
unsigned long ua = 0;
|
||||
|
||||
/* Allow userspace to poison TCE table */
|
||||
if (dir == DMA_NONE)
|
||||
@ -110,9 +111,25 @@ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
|
||||
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
||||
return H_PARAMETER;
|
||||
|
||||
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
|
||||
return H_TOO_HARD;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
unsigned long hpa = 0;
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
long shift = stit->tbl->it_page_shift;
|
||||
|
||||
mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
|
||||
if (!mem)
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
/* Note on the use of page_address() in real mode,
|
||||
*
|
||||
@ -164,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_tce_put);
|
||||
|
||||
long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||
long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
||||
unsigned long *ua, unsigned long **prmap)
|
||||
{
|
||||
unsigned long gfn = gpa >> PAGE_SHIFT;
|
||||
unsigned long gfn = tce >> PAGE_SHIFT;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
memslot = search_memslots(kvm_memslots(kvm), gfn);
|
||||
@ -175,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||
return -EINVAL;
|
||||
|
||||
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
||||
(gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
||||
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
if (prmap)
|
||||
@ -184,7 +201,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
||||
EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
|
||||
@ -300,10 +317,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||
|
||||
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
|
||||
&hpa)))
|
||||
return H_HARDWARE;
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
||||
return H_CLOSED;
|
||||
return H_TOO_HARD;
|
||||
|
||||
ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
||||
if (ret) {
|
||||
@ -368,13 +385,12 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
if (ret != H_SUCCESS)
|
||||
return ret;
|
||||
|
||||
ret = kvmppc_tce_validate(stt, tce);
|
||||
ret = kvmppc_rm_tce_validate(stt, tce);
|
||||
if (ret != H_SUCCESS)
|
||||
return ret;
|
||||
|
||||
dir = iommu_tce_direction(tce);
|
||||
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
|
||||
if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
entry = ioba >> stt->page_shift;
|
||||
@ -387,14 +403,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
|
||||
stit->tbl, entry, ua, dir);
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry, tce);
|
||||
@ -480,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
||||
return H_TOO_HARD;
|
||||
|
||||
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
||||
@ -496,12 +508,12 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
* We do not require memory to be preregistered in this case
|
||||
* so lock rmap and do __find_linux_pte_or_hugepte().
|
||||
*/
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
||||
return H_TOO_HARD;
|
||||
|
||||
rmap = (void *) vmalloc_to_phys(rmap);
|
||||
if (WARN_ON_ONCE_RM(!rmap))
|
||||
return H_HARDWARE;
|
||||
return H_TOO_HARD;
|
||||
|
||||
/*
|
||||
* Synchronize with the MMU notifier callbacks in
|
||||
@ -521,14 +533,16 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
for (i = 0; i < npages; ++i) {
|
||||
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
||||
|
||||
ret = kvmppc_tce_validate(stt, tce);
|
||||
ret = kvmppc_rm_tce_validate(stt, tce);
|
||||
if (ret != H_SUCCESS)
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
||||
|
||||
ua = 0;
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
||||
&ua, NULL))
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
@ -536,14 +550,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
stit->tbl, entry + i, ua,
|
||||
iommu_tce_direction(tce));
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
|
||||
entry);
|
||||
goto unlock_exit;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry + i, tce);
|
||||
|
@ -36,7 +36,6 @@
|
||||
#define OP_31_XOP_MTSR 210
|
||||
#define OP_31_XOP_MTSRIN 242
|
||||
#define OP_31_XOP_TLBIEL 274
|
||||
#define OP_31_XOP_TLBIE 306
|
||||
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
|
||||
#define OP_31_XOP_FAKE_SC1 308
|
||||
#define OP_31_XOP_SLBMTE 402
|
||||
@ -110,7 +109,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
|
||||
vcpu->arch.tar_tm = vcpu->arch.tar;
|
||||
vcpu->arch.lr_tm = vcpu->arch.regs.link;
|
||||
vcpu->arch.cr_tm = vcpu->arch.cr;
|
||||
vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
|
||||
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
|
||||
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
|
||||
}
|
||||
@ -129,7 +128,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
|
||||
vcpu->arch.tar = vcpu->arch.tar_tm;
|
||||
vcpu->arch.regs.link = vcpu->arch.lr_tm;
|
||||
vcpu->arch.cr = vcpu->arch.cr_tm;
|
||||
vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
|
||||
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
|
||||
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
|
||||
}
|
||||
@ -141,7 +140,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
|
||||
uint64_t texasr;
|
||||
|
||||
/* CR0 = 0 | MSR[TS] | 0 */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
|
||||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
|
||||
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
|
||||
<< CR0_SHIFT);
|
||||
|
||||
@ -220,7 +219,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
|
||||
tm_abort(ra_val);
|
||||
|
||||
/* CR0 = 0 | MSR[TS] | 0 */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
|
||||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
|
||||
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
|
||||
<< CR0_SHIFT);
|
||||
|
||||
@ -494,8 +493,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
||||
preempt_disable();
|
||||
vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
|
||||
(vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
|
||||
vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
|
||||
(vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
|
||||
|
||||
vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
|
||||
(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -231,6 +231,15 @@ void kvmhv_rm_send_ipi(int cpu)
|
||||
void __iomem *xics_phys;
|
||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||
|
||||
/* For a nested hypervisor, use the XICS via hcall */
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
|
||||
IPI_PRIORITY);
|
||||
return;
|
||||
}
|
||||
|
||||
/* On POWER9 we can use msgsnd for any destination cpu. */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
msg |= get_hard_smp_processor_id(cpu);
|
||||
@ -460,12 +469,19 @@ static long kvmppc_read_one_intr(bool *again)
|
||||
return 1;
|
||||
|
||||
/* Now read the interrupt from the ICP */
|
||||
xics_phys = local_paca->kvm_hstate.xics_phys;
|
||||
rc = 0;
|
||||
if (!xics_phys)
|
||||
rc = opal_int_get_xirr(&xirr, false);
|
||||
else
|
||||
xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
|
||||
xirr = cpu_to_be32(retbuf[0]);
|
||||
} else {
|
||||
xics_phys = local_paca->kvm_hstate.xics_phys;
|
||||
rc = 0;
|
||||
if (!xics_phys)
|
||||
rc = opal_int_get_xirr(&xirr, false);
|
||||
else
|
||||
xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
|
||||
}
|
||||
if (rc < 0)
|
||||
return 1;
|
||||
|
||||
@ -494,7 +510,13 @@ static long kvmppc_read_one_intr(bool *again)
|
||||
*/
|
||||
if (xisr == XICS_IPI) {
|
||||
rc = 0;
|
||||
if (xics_phys) {
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
plpar_hcall_raw(H_IPI, retbuf,
|
||||
hard_smp_processor_id(), 0xff);
|
||||
plpar_hcall_raw(H_EOI, retbuf, h_xirr);
|
||||
} else if (xics_phys) {
|
||||
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
|
||||
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
|
||||
} else {
|
||||
@ -520,7 +542,13 @@ static long kvmppc_read_one_intr(bool *again)
|
||||
/* We raced with the host,
|
||||
* we need to resend that IPI, bummer
|
||||
*/
|
||||
if (xics_phys)
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
plpar_hcall_raw(H_IPI, retbuf,
|
||||
hard_smp_processor_id(),
|
||||
IPI_PRIORITY);
|
||||
} else if (xics_phys)
|
||||
__raw_rm_writeb(IPI_PRIORITY,
|
||||
xics_phys + XICS_MFRR);
|
||||
else
|
||||
@ -729,3 +757,51 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
|
||||
smp_mb();
|
||||
local_paca->kvm_hstate.kvm_split_mode = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
|
||||
* Can we inject a Decrementer or a External interrupt?
|
||||
*/
|
||||
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ext;
|
||||
unsigned long vec = 0;
|
||||
unsigned long lpcr;
|
||||
|
||||
/* Insert EXTERNAL bit into LPCR at the MER bit position */
|
||||
ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
|
||||
lpcr = mfspr(SPRN_LPCR);
|
||||
lpcr |= ext << LPCR_MER_SH;
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
isync();
|
||||
|
||||
if (vcpu->arch.shregs.msr & MSR_EE) {
|
||||
if (ext) {
|
||||
vec = BOOK3S_INTERRUPT_EXTERNAL;
|
||||
} else {
|
||||
long int dec = mfspr(SPRN_DEC);
|
||||
if (!(lpcr & LPCR_LD))
|
||||
dec = (int) dec;
|
||||
if (dec < 0)
|
||||
vec = BOOK3S_INTERRUPT_DECREMENTER;
|
||||
}
|
||||
}
|
||||
if (vec) {
|
||||
unsigned long msr, old_msr = vcpu->arch.shregs.msr;
|
||||
|
||||
kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
|
||||
kvmppc_set_srr1(vcpu, old_msr);
|
||||
kvmppc_set_pc(vcpu, vec);
|
||||
msr = vcpu->arch.intr_msr;
|
||||
if (MSR_TM_ACTIVE(old_msr))
|
||||
msr |= MSR_TS_S;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
}
|
||||
|
||||
if (vcpu->arch.doorbell_request) {
|
||||
mtspr(SPRN_DPDES, 1);
|
||||
vcpu->arch.vcore->dpdes = 1;
|
||||
smp_wmb();
|
||||
vcpu->arch.doorbell_request = 0;
|
||||
}
|
||||
}
|
||||
|
@ -64,52 +64,7 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
|
||||
/* Save host PMU registers */
|
||||
BEGIN_FTR_SECTION
|
||||
/* Work around P8 PMAE bug */
|
||||
li r3, -1
|
||||
clrrdi r3, r3, 10
|
||||
mfspr r8, SPRN_MMCR2
|
||||
mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
|
||||
isync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
li r3, 1
|
||||
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
|
||||
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
|
||||
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
|
||||
mfspr r6, SPRN_MMCRA
|
||||
/* Clear MMCRA in order to disable SDAR updates */
|
||||
li r5, 0
|
||||
mtspr SPRN_MMCRA, r5
|
||||
isync
|
||||
lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */
|
||||
cmpwi r5, 0
|
||||
beq 31f /* skip if not */
|
||||
mfspr r5, SPRN_MMCR1
|
||||
mfspr r9, SPRN_SIAR
|
||||
mfspr r10, SPRN_SDAR
|
||||
std r7, HSTATE_MMCR0(r13)
|
||||
std r5, HSTATE_MMCR1(r13)
|
||||
std r6, HSTATE_MMCRA(r13)
|
||||
std r9, HSTATE_SIAR(r13)
|
||||
std r10, HSTATE_SDAR(r13)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r9, SPRN_SIER
|
||||
std r8, HSTATE_MMCR2(r13)
|
||||
std r9, HSTATE_SIER(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
mfspr r3, SPRN_PMC1
|
||||
mfspr r5, SPRN_PMC2
|
||||
mfspr r6, SPRN_PMC3
|
||||
mfspr r7, SPRN_PMC4
|
||||
mfspr r8, SPRN_PMC5
|
||||
mfspr r9, SPRN_PMC6
|
||||
stw r3, HSTATE_PMC1(r13)
|
||||
stw r5, HSTATE_PMC2(r13)
|
||||
stw r6, HSTATE_PMC3(r13)
|
||||
stw r7, HSTATE_PMC4(r13)
|
||||
stw r8, HSTATE_PMC5(r13)
|
||||
stw r9, HSTATE_PMC6(r13)
|
||||
31:
|
||||
bl kvmhv_save_host_pmu
|
||||
|
||||
/*
|
||||
* Put whatever is in the decrementer into the
|
||||
@ -161,3 +116,51 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(kvmhv_save_host_pmu)
|
||||
BEGIN_FTR_SECTION
|
||||
/* Work around P8 PMAE bug */
|
||||
li r3, -1
|
||||
clrrdi r3, r3, 10
|
||||
mfspr r8, SPRN_MMCR2
|
||||
mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
|
||||
isync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
li r3, 1
|
||||
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
|
||||
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
|
||||
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
|
||||
mfspr r6, SPRN_MMCRA
|
||||
/* Clear MMCRA in order to disable SDAR updates */
|
||||
li r5, 0
|
||||
mtspr SPRN_MMCRA, r5
|
||||
isync
|
||||
lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */
|
||||
cmpwi r5, 0
|
||||
beq 31f /* skip if not */
|
||||
mfspr r5, SPRN_MMCR1
|
||||
mfspr r9, SPRN_SIAR
|
||||
mfspr r10, SPRN_SDAR
|
||||
std r7, HSTATE_MMCR0(r13)
|
||||
std r5, HSTATE_MMCR1(r13)
|
||||
std r6, HSTATE_MMCRA(r13)
|
||||
std r9, HSTATE_SIAR(r13)
|
||||
std r10, HSTATE_SDAR(r13)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r9, SPRN_SIER
|
||||
std r8, HSTATE_MMCR2(r13)
|
||||
std r9, HSTATE_SIER(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
mfspr r3, SPRN_PMC1
|
||||
mfspr r5, SPRN_PMC2
|
||||
mfspr r6, SPRN_PMC3
|
||||
mfspr r7, SPRN_PMC4
|
||||
mfspr r8, SPRN_PMC5
|
||||
mfspr r9, SPRN_PMC6
|
||||
stw r3, HSTATE_PMC1(r13)
|
||||
stw r5, HSTATE_PMC2(r13)
|
||||
stw r6, HSTATE_PMC3(r13)
|
||||
stw r7, HSTATE_PMC4(r13)
|
||||
stw r8, HSTATE_PMC5(r13)
|
||||
stw r9, HSTATE_PMC6(r13)
|
||||
31: blr
|
||||
|
1291
arch/powerpc/kvm/book3s_hv_nested.c
Normal file
1291
arch/powerpc/kvm/book3s_hv_nested.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -177,6 +177,7 @@ void kvmppc_subcore_enter_guest(void)
|
||||
|
||||
local_paca->sibling_subcore_state->in_guest[subcore_id] = 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest);
|
||||
|
||||
void kvmppc_subcore_exit_guest(void)
|
||||
{
|
||||
@ -187,6 +188,7 @@ void kvmppc_subcore_exit_guest(void)
|
||||
|
||||
local_paca->sibling_subcore_state->in_guest[subcore_id] = 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest);
|
||||
|
||||
static bool kvmppc_tb_resync_required(void)
|
||||
{
|
||||
@ -331,5 +333,13 @@ long kvmppc_realmode_hmi_handler(void)
|
||||
} else {
|
||||
wait_for_tb_resync();
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset tb_offset_applied so the guest exit code won't try
|
||||
* to subtract the previous timebase offset from the timebase.
|
||||
*/
|
||||
if (local_paca->kvm_hstate.kvm_vcore)
|
||||
local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
|
||||
|
||||
/* Mark the target VCPU as having an interrupt pending */
|
||||
vcpu->stat.queue_intr++;
|
||||
set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
|
||||
set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
|
||||
|
||||
/* Kick self ? Just set MER and return */
|
||||
if (vcpu == this_vcpu) {
|
||||
@ -170,8 +170,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
|
||||
static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Note: Only called on self ! */
|
||||
clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
|
||||
&vcpu->arch.pending_exceptions);
|
||||
clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
|
||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
|
||||
}
|
||||
|
||||
@ -768,6 +767,14 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
|
||||
void __iomem *xics_phys;
|
||||
int64_t rc;
|
||||
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
iosync();
|
||||
plpar_hcall_raw(H_EOI, retbuf, hwirq);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = pnv_opal_pci_msi_eoi(c, hwirq);
|
||||
|
||||
if (rc)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -130,7 +130,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
/* Set CR0 to indicate previous transactional state */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
|
||||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
|
||||
/* L=1 => tresume, L=0 => tsuspend */
|
||||
if (instr & (1 << 21)) {
|
||||
@ -174,7 +174,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
copy_from_checkpoint(vcpu);
|
||||
|
||||
/* Set CR0 to indicate previous transactional state */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
|
||||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
|
||||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
|
||||
return RESUME_GUEST;
|
||||
@ -204,7 +204,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
copy_to_checkpoint(vcpu);
|
||||
|
||||
/* Set CR0 to indicate previous transactional state */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
|
||||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
|
||||
vcpu->arch.shregs.msr = msr | MSR_TS_S;
|
||||
return RESUME_GUEST;
|
||||
|
@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
||||
if (instr & (1 << 21))
|
||||
vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||
/* Set CR0 to 0b0010 */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
|
||||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
|
||||
0x20000000;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
|
||||
vcpu->arch.regs.nip = vcpu->arch.tfhar;
|
||||
copy_from_checkpoint(vcpu);
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
|
||||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
|
||||
svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
|
||||
svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
|
||||
svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
|
||||
svcpu->cr = vcpu->arch.cr;
|
||||
svcpu->cr = vcpu->arch.regs.ccr;
|
||||
svcpu->xer = vcpu->arch.regs.xer;
|
||||
svcpu->ctr = vcpu->arch.regs.ctr;
|
||||
svcpu->lr = vcpu->arch.regs.link;
|
||||
@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
|
||||
vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
|
||||
vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
|
||||
vcpu->arch.cr = svcpu->cr;
|
||||
vcpu->arch.regs.ccr = svcpu->cr;
|
||||
vcpu->arch.regs.xer = svcpu->xer;
|
||||
vcpu->arch.regs.ctr = svcpu->ctr;
|
||||
vcpu->arch.regs.link = svcpu->lr;
|
||||
@ -1246,7 +1246,6 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
case BOOK3S_INTERRUPT_EXTERNAL:
|
||||
case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
|
||||
case BOOK3S_INTERRUPT_EXTERNAL_HV:
|
||||
case BOOK3S_INTERRUPT_H_VIRT:
|
||||
vcpu->stat.ext_intr_exits++;
|
||||
|
@ -310,7 +310,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp,
|
||||
*/
|
||||
if (new.out_ee) {
|
||||
kvmppc_book3s_queue_irqprio(icp->vcpu,
|
||||
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
|
||||
BOOK3S_INTERRUPT_EXTERNAL);
|
||||
if (!change_self)
|
||||
kvmppc_fast_vcpu_kick(icp->vcpu);
|
||||
}
|
||||
@ -593,8 +593,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
|
||||
u32 xirr;
|
||||
|
||||
/* First, remove EE from the processor */
|
||||
kvmppc_book3s_dequeue_irqprio(icp->vcpu,
|
||||
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
|
||||
kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
|
||||
|
||||
/*
|
||||
* ICP State: Accept_Interrupt
|
||||
@ -754,8 +753,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||
* We can remove EE from the current processor, the update
|
||||
* transaction will set it again if needed
|
||||
*/
|
||||
kvmppc_book3s_dequeue_irqprio(icp->vcpu,
|
||||
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
|
||||
kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
|
||||
|
||||
do {
|
||||
old_state = new_state = READ_ONCE(icp->state);
|
||||
@ -1167,8 +1165,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
|
||||
* Deassert the CPU interrupt request.
|
||||
* icp_try_update will reassert it if necessary.
|
||||
*/
|
||||
kvmppc_book3s_dequeue_irqprio(icp->vcpu,
|
||||
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
|
||||
kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
|
||||
|
||||
/*
|
||||
* Note that if we displace an interrupt from old_state.xisr,
|
||||
@ -1393,7 +1390,8 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
|
||||
cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
/* Enable real mode support */
|
||||
xics->real_mode = ENABLE_REALMODE;
|
||||
xics->real_mode_dbg = DEBUG_REALMODE;
|
||||
|
@ -61,6 +61,69 @@
|
||||
*/
|
||||
#define XIVE_Q_GAP 2
|
||||
|
||||
/*
|
||||
* Push a vcpu's context to the XIVE on guest entry.
|
||||
* This assumes we are in virtual mode (MMU on)
|
||||
*/
|
||||
void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
|
||||
u64 pq;
|
||||
|
||||
if (!tima)
|
||||
return;
|
||||
eieio();
|
||||
__raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
|
||||
__raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
|
||||
vcpu->arch.xive_pushed = 1;
|
||||
eieio();
|
||||
|
||||
/*
|
||||
* We clear the irq_pending flag. There is a small chance of a
|
||||
* race vs. the escalation interrupt happening on another
|
||||
* processor setting it again, but the only consequence is to
|
||||
* cause a spurious wakeup on the next H_CEDE, which is not an
|
||||
* issue.
|
||||
*/
|
||||
vcpu->arch.irq_pending = 0;
|
||||
|
||||
/*
|
||||
* In single escalation mode, if the escalation interrupt is
|
||||
* on, we mask it.
|
||||
*/
|
||||
if (vcpu->arch.xive_esc_on) {
|
||||
pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
|
||||
XIVE_ESB_SET_PQ_01));
|
||||
mb();
|
||||
|
||||
/*
|
||||
* We have a possible subtle race here: The escalation
|
||||
* interrupt might have fired and be on its way to the
|
||||
* host queue while we mask it, and if we unmask it
|
||||
* early enough (re-cede right away), there is a
|
||||
* theorical possibility that it fires again, thus
|
||||
* landing in the target queue more than once which is
|
||||
* a big no-no.
|
||||
*
|
||||
* Fortunately, solving this is rather easy. If the
|
||||
* above load setting PQ to 01 returns a previous
|
||||
* value where P is set, then we know the escalation
|
||||
* interrupt is somewhere on its way to the host. In
|
||||
* that case we simply don't clear the xive_esc_on
|
||||
* flag below. It will be eventually cleared by the
|
||||
* handler for the escalation interrupt.
|
||||
*
|
||||
* Then, when doing a cede, we check that flag again
|
||||
* before re-enabling the escalation interrupt, and if
|
||||
* set, we abort the cede.
|
||||
*/
|
||||
if (!(pq & XIVE_ESB_VAL_P))
|
||||
/* Now P is 0, we can clear the flag */
|
||||
vcpu->arch.xive_esc_on = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
|
||||
|
||||
/*
|
||||
* This is a simple trigger for a generic XIVE IRQ. This must
|
||||
* only be called for interrupts that support a trigger page
|
||||
|
@ -280,14 +280,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
|
||||
/* First collect pending bits from HW */
|
||||
GLUE(X_PFX,ack_pending)(xc);
|
||||
|
||||
/*
|
||||
* Cleanup the old-style bits if needed (they may have been
|
||||
* set by pull or an escalation interrupts).
|
||||
*/
|
||||
if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
|
||||
clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
|
||||
&vcpu->arch.pending_exceptions);
|
||||
|
||||
pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
|
||||
xc->pending, xc->hw_cppr, xc->cppr);
|
||||
|
||||
|
@ -182,7 +182,7 @@
|
||||
*/
|
||||
PPC_LL r4, PACACURRENT(r13)
|
||||
PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
|
||||
stw r10, VCPU_CR(r4)
|
||||
PPC_STL r10, VCPU_CR(r4)
|
||||
PPC_STL r11, VCPU_GPR(R4)(r4)
|
||||
PPC_STL r5, VCPU_GPR(R5)(r4)
|
||||
PPC_STL r6, VCPU_GPR(R6)(r4)
|
||||
@ -292,7 +292,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
|
||||
PPC_STL r4, VCPU_GPR(R4)(r11)
|
||||
PPC_LL r4, THREAD_NORMSAVE(0)(r10)
|
||||
PPC_STL r5, VCPU_GPR(R5)(r11)
|
||||
stw r13, VCPU_CR(r11)
|
||||
PPC_STL r13, VCPU_CR(r11)
|
||||
mfspr r5, \srr0
|
||||
PPC_STL r3, VCPU_GPR(R10)(r11)
|
||||
PPC_LL r3, THREAD_NORMSAVE(2)(r10)
|
||||
@ -319,7 +319,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
|
||||
PPC_STL r4, VCPU_GPR(R4)(r11)
|
||||
PPC_LL r4, GPR9(r8)
|
||||
PPC_STL r5, VCPU_GPR(R5)(r11)
|
||||
stw r9, VCPU_CR(r11)
|
||||
PPC_STL r9, VCPU_CR(r11)
|
||||
mfspr r5, \srr0
|
||||
PPC_STL r3, VCPU_GPR(R8)(r11)
|
||||
PPC_LL r3, GPR10(r8)
|
||||
@ -643,7 +643,7 @@ lightweight_exit:
|
||||
PPC_LL r3, VCPU_LR(r4)
|
||||
PPC_LL r5, VCPU_XER(r4)
|
||||
PPC_LL r6, VCPU_CTR(r4)
|
||||
lwz r7, VCPU_CR(r4)
|
||||
PPC_LL r7, VCPU_CR(r4)
|
||||
PPC_LL r8, VCPU_PC(r4)
|
||||
PPC_LD(r9, VCPU_SHARED_MSR, r11)
|
||||
PPC_LL r0, VCPU_GPR(R0)(r4)
|
||||
|
@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
|
||||
emulated = EMULATE_FAIL;
|
||||
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
|
||||
vcpu->arch.regs.ccr = vcpu->arch.cr;
|
||||
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
|
||||
int type = op.type & INSTR_TYPE_MASK;
|
||||
int size = GETSIZE(op.type);
|
||||
|
@ -594,7 +594,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = !!(hv_enabled && radix_enabled());
|
||||
break;
|
||||
case KVM_CAP_PPC_MMU_HASH_V3:
|
||||
r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
|
||||
r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
|
||||
cpu_has_feature(CPU_FTR_HVMODE));
|
||||
break;
|
||||
case KVM_CAP_PPC_NESTED_HV:
|
||||
r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
|
||||
!kvmppc_hv_ops->enable_nested(NULL));
|
||||
break;
|
||||
#endif
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
@ -2114,6 +2119,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
||||
r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
|
||||
break;
|
||||
}
|
||||
|
||||
case KVM_CAP_PPC_NESTED_HV:
|
||||
r = -EINVAL;
|
||||
if (!is_kvmppc_hv_enabled(kvm) ||
|
||||
!kvm->arch.kvm_ops->enable_nested)
|
||||
break;
|
||||
r = kvm->arch.kvm_ops->enable_nested(kvm);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
r = -EINVAL;
|
||||
|
@ -28,17 +28,25 @@
|
||||
* Save transactional state and TM-related registers.
|
||||
* Called with:
|
||||
* - r3 pointing to the vcpu struct
|
||||
* - r4 points to the MSR with current TS bits:
|
||||
* - r4 containing the MSR with current TS bits:
|
||||
* (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
|
||||
* This can modify all checkpointed registers, but
|
||||
* restores r1, r2 before exit.
|
||||
* - r5 containing a flag indicating that non-volatile registers
|
||||
* must be preserved.
|
||||
* If r5 == 0, this can modify all checkpointed registers, but
|
||||
* restores r1, r2 before exit. If r5 != 0, this restores the
|
||||
* MSR TM/FP/VEC/VSX bits to their state on entry.
|
||||
*/
|
||||
_GLOBAL(__kvmppc_save_tm)
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
mr r9, r3
|
||||
cmpdi cr7, r5, 0
|
||||
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
mr r10, r8
|
||||
li r0, 1
|
||||
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||
ori r8, r8, MSR_FP
|
||||
@ -51,6 +59,27 @@ _GLOBAL(__kvmppc_save_tm)
|
||||
std r1, HSTATE_SCRATCH2(r13)
|
||||
std r3, HSTATE_SCRATCH1(r13)
|
||||
|
||||
/* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */
|
||||
mfcr r6
|
||||
SAVE_GPR(6, r1)
|
||||
|
||||
/* Save DSCR so we can restore it to avoid running with user value */
|
||||
mfspr r7, SPRN_DSCR
|
||||
SAVE_GPR(7, r1)
|
||||
|
||||
/*
|
||||
* We are going to do treclaim., which will modify all checkpointed
|
||||
* registers. Save the non-volatile registers on the stack if
|
||||
* preservation of non-volatile state has been requested.
|
||||
*/
|
||||
beq cr7, 3f
|
||||
SAVE_NVGPRS(r1)
|
||||
|
||||
/* MSR[TS] will be 0 (non-transactional) once we do treclaim. */
|
||||
li r0, 0
|
||||
rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||
SAVE_GPR(10, r1) /* final MSR value */
|
||||
3:
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
BEGIN_FTR_SECTION
|
||||
/* Emulation of the treclaim instruction needs TEXASR before treclaim */
|
||||
@ -74,22 +103,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
std r9, PACATMSCRATCH(r13)
|
||||
ld r9, HSTATE_SCRATCH1(r13)
|
||||
|
||||
/* Get a few more GPRs free. */
|
||||
std r29, VCPU_GPRS_TM(29)(r9)
|
||||
std r30, VCPU_GPRS_TM(30)(r9)
|
||||
std r31, VCPU_GPRS_TM(31)(r9)
|
||||
|
||||
/* Save away PPR and DSCR soon so don't run with user values. */
|
||||
mfspr r31, SPRN_PPR
|
||||
/* Save away PPR soon so we don't run with user value. */
|
||||
std r0, VCPU_GPRS_TM(0)(r9)
|
||||
mfspr r0, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
mfspr r30, SPRN_DSCR
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
#endif
|
||||
|
||||
/* Save all but r9, r13 & r29-r31 */
|
||||
reg = 0
|
||||
/* Reload stack pointer. */
|
||||
std r1, VCPU_GPRS_TM(1)(r9)
|
||||
ld r1, HSTATE_SCRATCH2(r13)
|
||||
|
||||
/* Set MSR RI now we have r1 and r13 back. */
|
||||
std r2, VCPU_GPRS_TM(2)(r9)
|
||||
li r2, MSR_RI
|
||||
mtmsrd r2, 1
|
||||
|
||||
/* Reload TOC pointer. */
|
||||
ld r2, PACATOC(r13)
|
||||
|
||||
/* Save all but r0-r2, r9 & r13 */
|
||||
reg = 3
|
||||
.rept 29
|
||||
.if (reg != 9) && (reg != 13)
|
||||
std reg, VCPU_GPRS_TM(reg)(r9)
|
||||
@ -103,33 +135,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
ld r4, PACATMSCRATCH(r13)
|
||||
std r4, VCPU_GPRS_TM(9)(r9)
|
||||
|
||||
/* Reload stack pointer and TOC. */
|
||||
ld r1, HSTATE_SCRATCH2(r13)
|
||||
ld r2, PACATOC(r13)
|
||||
|
||||
/* Set MSR RI now we have r1 and r13 back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Save away checkpinted SPRs. */
|
||||
std r31, VCPU_PPR_TM(r9)
|
||||
std r30, VCPU_DSCR_TM(r9)
|
||||
mflr r5
|
||||
/* Restore host DSCR and CR values, after saving guest values */
|
||||
mfcr r6
|
||||
mfspr r7, SPRN_DSCR
|
||||
stw r6, VCPU_CR_TM(r9)
|
||||
std r7, VCPU_DSCR_TM(r9)
|
||||
REST_GPR(6, r1)
|
||||
REST_GPR(7, r1)
|
||||
mtcr r6
|
||||
mtspr SPRN_DSCR, r7
|
||||
|
||||
/* Save away checkpointed SPRs. */
|
||||
std r0, VCPU_PPR_TM(r9)
|
||||
mflr r5
|
||||
mfctr r7
|
||||
mfspr r8, SPRN_AMR
|
||||
mfspr r10, SPRN_TAR
|
||||
mfxer r11
|
||||
std r5, VCPU_LR_TM(r9)
|
||||
stw r6, VCPU_CR_TM(r9)
|
||||
std r7, VCPU_CTR_TM(r9)
|
||||
std r8, VCPU_AMR_TM(r9)
|
||||
std r10, VCPU_TAR_TM(r9)
|
||||
std r11, VCPU_XER_TM(r9)
|
||||
|
||||
/* Restore r12 as trap number. */
|
||||
lwz r12, VCPU_TRAP(r9)
|
||||
|
||||
/* Save FP/VSX. */
|
||||
addi r3, r9, VCPU_FPRS_TM
|
||||
bl store_fp_state
|
||||
@ -137,6 +165,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
bl store_vr_state
|
||||
mfspr r6, SPRN_VRSAVE
|
||||
stw r6, VCPU_VRSAVE_TM(r9)
|
||||
|
||||
/* Restore non-volatile registers if requested to */
|
||||
beq cr7, 1f
|
||||
REST_NVGPRS(r1)
|
||||
REST_GPR(10, r1)
|
||||
1:
|
||||
/*
|
||||
* We need to save these SPRs after the treclaim so that the software
|
||||
@ -146,12 +179,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
*/
|
||||
mfspr r7, SPRN_TEXASR
|
||||
std r7, VCPU_TEXASR(r9)
|
||||
11:
|
||||
mfspr r5, SPRN_TFHAR
|
||||
mfspr r6, SPRN_TFIAR
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
|
||||
/* Restore MSR state if requested */
|
||||
beq cr7, 2f
|
||||
mtmsrd r10, 0
|
||||
2:
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
@ -161,49 +198,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
* be invoked from C function by PR KVM only.
|
||||
*/
|
||||
_GLOBAL(_kvmppc_save_tm_pr)
|
||||
mflr r5
|
||||
std r5, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||
SAVE_NVGPRS(r1)
|
||||
|
||||
/* save MSR since TM/math bits might be impacted
|
||||
* by __kvmppc_save_tm().
|
||||
*/
|
||||
mfmsr r5
|
||||
SAVE_GPR(5, r1)
|
||||
|
||||
/* also save DSCR/CR/TAR so that it can be recovered later */
|
||||
mfspr r6, SPRN_DSCR
|
||||
SAVE_GPR(6, r1)
|
||||
|
||||
mfcr r7
|
||||
stw r7, _CCR(r1)
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
|
||||
mfspr r8, SPRN_TAR
|
||||
SAVE_GPR(8, r1)
|
||||
std r8, PPC_MIN_STKFRM-8(r1)
|
||||
|
||||
li r5, 1 /* preserve non-volatile registers */
|
||||
bl __kvmppc_save_tm
|
||||
|
||||
REST_GPR(8, r1)
|
||||
ld r8, PPC_MIN_STKFRM-8(r1)
|
||||
mtspr SPRN_TAR, r8
|
||||
|
||||
ld r7, _CCR(r1)
|
||||
mtcr r7
|
||||
|
||||
REST_GPR(6, r1)
|
||||
mtspr SPRN_DSCR, r6
|
||||
|
||||
/* need preserve current MSR's MSR_TS bits */
|
||||
REST_GPR(5, r1)
|
||||
mfmsr r6
|
||||
rldicl r6, r6, 64 - MSR_TS_S_LG, 62
|
||||
rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||
mtmsrd r5
|
||||
|
||||
REST_NVGPRS(r1)
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
ld r5, PPC_LR_STKOFF(r1)
|
||||
mtlr r5
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
|
||||
@ -215,15 +225,21 @@ EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
|
||||
* - r4 is the guest MSR with desired TS bits:
|
||||
* For HV KVM, it is VCPU_MSR
|
||||
* For PR KVM, it is provided by caller
|
||||
* This potentially modifies all checkpointed registers.
|
||||
* It restores r1, r2 from the PACA.
|
||||
* - r5 containing a flag indicating that non-volatile registers
|
||||
* must be preserved.
|
||||
* If r5 == 0, this potentially modifies all checkpointed registers, but
|
||||
* restores r1, r2 from the PACA before exit.
|
||||
* If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry.
|
||||
*/
|
||||
_GLOBAL(__kvmppc_restore_tm)
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
|
||||
cmpdi cr7, r5, 0
|
||||
|
||||
/* Turn on TM/FP/VSX/VMX so we can restore them. */
|
||||
mfmsr r5
|
||||
mr r10, r5
|
||||
li r6, MSR_TM >> 32
|
||||
sldi r6, r6, 32
|
||||
or r5, r5, r6
|
||||
@ -244,8 +260,7 @@ _GLOBAL(__kvmppc_restore_tm)
|
||||
|
||||
mr r5, r4
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beqlr /* TM not active in guest */
|
||||
std r1, HSTATE_SCRATCH2(r13)
|
||||
beq 9f /* TM not active in guest */
|
||||
|
||||
/* Make sure the failure summary is set, otherwise we'll program check
|
||||
* when we trechkpt. It's possible that this might have been not set
|
||||
@ -255,6 +270,26 @@ _GLOBAL(__kvmppc_restore_tm)
|
||||
oris r7, r7, (TEXASR_FS)@h
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
/*
|
||||
* Make a stack frame and save non-volatile registers if requested.
|
||||
*/
|
||||
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||
std r1, HSTATE_SCRATCH2(r13)
|
||||
|
||||
mfcr r6
|
||||
mfspr r7, SPRN_DSCR
|
||||
SAVE_GPR(2, r1)
|
||||
SAVE_GPR(6, r1)
|
||||
SAVE_GPR(7, r1)
|
||||
|
||||
beq cr7, 4f
|
||||
SAVE_NVGPRS(r1)
|
||||
|
||||
/* MSR[TS] will be 1 (suspended) once we do trechkpt */
|
||||
li r0, 1
|
||||
rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||
SAVE_GPR(10, r1) /* final MSR value */
|
||||
4:
|
||||
/*
|
||||
* We need to load up the checkpointed state for the guest.
|
||||
* We need to do this early as it will blow away any GPRs, VSRs and
|
||||
@ -291,8 +326,6 @@ _GLOBAL(__kvmppc_restore_tm)
|
||||
ld r29, VCPU_DSCR_TM(r3)
|
||||
ld r30, VCPU_PPR_TM(r3)
|
||||
|
||||
std r2, PACATMSCRATCH(r13) /* Save TOC */
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
@ -318,18 +351,31 @@ _GLOBAL(__kvmppc_restore_tm)
|
||||
/* Now let's get back the state we need. */
|
||||
HMT_MEDIUM
|
||||
GET_PACA(r13)
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
#endif
|
||||
ld r1, HSTATE_SCRATCH2(r13)
|
||||
ld r2, PACATMSCRATCH(r13)
|
||||
REST_GPR(7, r1)
|
||||
mtspr SPRN_DSCR, r7
|
||||
|
||||
/* Set the MSR RI since we have our registers back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Restore TOC pointer and CR */
|
||||
REST_GPR(2, r1)
|
||||
REST_GPR(6, r1)
|
||||
mtcr r6
|
||||
|
||||
/* Restore non-volatile registers if requested to. */
|
||||
beq cr7, 5f
|
||||
REST_GPR(10, r1)
|
||||
REST_NVGPRS(r1)
|
||||
|
||||
5: addi r1, r1, SWITCH_FRAME_SIZE
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
|
||||
9: /* Restore MSR bits if requested */
|
||||
beqlr cr7
|
||||
mtmsrd r10, 0
|
||||
blr
|
||||
|
||||
/*
|
||||
@ -337,47 +383,23 @@ _GLOBAL(__kvmppc_restore_tm)
|
||||
* can be invoked from C function by PR KVM only.
|
||||
*/
|
||||
_GLOBAL(_kvmppc_restore_tm_pr)
|
||||
mflr r5
|
||||
std r5, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||
SAVE_NVGPRS(r1)
|
||||
|
||||
/* save MSR to avoid TM/math bits change */
|
||||
mfmsr r5
|
||||
SAVE_GPR(5, r1)
|
||||
|
||||
/* also save DSCR/CR/TAR so that it can be recovered later */
|
||||
mfspr r6, SPRN_DSCR
|
||||
SAVE_GPR(6, r1)
|
||||
|
||||
mfcr r7
|
||||
stw r7, _CCR(r1)
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
|
||||
/* save TAR so that it can be recovered later */
|
||||
mfspr r8, SPRN_TAR
|
||||
SAVE_GPR(8, r1)
|
||||
std r8, PPC_MIN_STKFRM-8(r1)
|
||||
|
||||
li r5, 1
|
||||
bl __kvmppc_restore_tm
|
||||
|
||||
REST_GPR(8, r1)
|
||||
ld r8, PPC_MIN_STKFRM-8(r1)
|
||||
mtspr SPRN_TAR, r8
|
||||
|
||||
ld r7, _CCR(r1)
|
||||
mtcr r7
|
||||
|
||||
REST_GPR(6, r1)
|
||||
mtspr SPRN_DSCR, r6
|
||||
|
||||
/* need preserve current MSR's MSR_TS bits */
|
||||
REST_GPR(5, r1)
|
||||
mfmsr r6
|
||||
rldicl r6, r6, 64 - MSR_TS_S_LG, 62
|
||||
rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||
mtmsrd r5
|
||||
|
||||
REST_NVGPRS(r1)
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
ld r5, PPC_LR_STKOFF(r1)
|
||||
mtlr r5
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
|
||||
|
@ -14,7 +14,6 @@
|
||||
{0x400, "INST_STORAGE"}, \
|
||||
{0x480, "INST_SEGMENT"}, \
|
||||
{0x500, "EXTERNAL"}, \
|
||||
{0x501, "EXTERNAL_LEVEL"}, \
|
||||
{0x502, "EXTERNAL_HV"}, \
|
||||
{0x600, "ALIGNMENT"}, \
|
||||
{0x700, "PROGRAM"}, \
|
||||
|
@ -830,6 +830,15 @@ void radix__flush_pwc_lpid(unsigned int lpid)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
|
||||
|
||||
/*
|
||||
* Flush partition scoped translations from LPID (=LPIDR)
|
||||
*/
|
||||
void radix__flush_tlb_lpid(unsigned int lpid)
|
||||
{
|
||||
_tlbie_lpid(lpid, RIC_FLUSH_ALL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid);
|
||||
|
||||
/*
|
||||
* Flush partition scoped translations from LPID (=LPIDR)
|
||||
*/
|
||||
|
@ -719,6 +719,7 @@ struct kvm_ppc_one_seg_page_size {
|
||||
|
||||
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
|
||||
#define KVM_PPC_1T_SEGMENTS 0x00000002
|
||||
#define KVM_PPC_NO_HASH 0x00000004
|
||||
|
||||
struct kvm_ppc_smmu_info {
|
||||
__u64 flags;
|
||||
@ -953,6 +954,7 @@ struct kvm_ppc_resize_hpt {
|
||||
#define KVM_CAP_NESTED_STATE 157
|
||||
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
|
||||
#define KVM_CAP_MSR_PLATFORM_INFO 159
|
||||
#define KVM_CAP_PPC_NESTED_HV 160
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
@ -15,7 +15,6 @@
|
||||
{0x400, "INST_STORAGE"}, \
|
||||
{0x480, "INST_SEGMENT"}, \
|
||||
{0x500, "EXTERNAL"}, \
|
||||
{0x501, "EXTERNAL_LEVEL"}, \
|
||||
{0x502, "EXTERNAL_HV"}, \
|
||||
{0x600, "ALIGNMENT"}, \
|
||||
{0x700, "PROGRAM"}, \
|
||||
|
Loading…
Reference in New Issue
Block a user