mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 00:51:44 +00:00
Merge remote-tracking branch 'airlied/drm-next' into drm-misc-next-fixes
Backmerge drm-next with rc7
This commit is contained in:
commit
c048c984de
@ -22,7 +22,8 @@ Required properties :
|
||||
- #clock-cells : must contain 1
|
||||
- #reset-cells : must contain 1
|
||||
|
||||
For the PRCM CCUs on H3/A64, one more clock is needed:
|
||||
For the PRCM CCUs on H3/A64, two more clocks are needed:
|
||||
- "pll-periph": the SoC's peripheral PLL from the main CCU
|
||||
- "iosc": the SoC's internal frequency oscillator
|
||||
|
||||
Example for generic CCU:
|
||||
@ -39,8 +40,8 @@ Example for PRCM CCU:
|
||||
r_ccu: clock@01f01400 {
|
||||
compatible = "allwinner,sun50i-a64-r-ccu";
|
||||
reg = <0x01f01400 0x100>;
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>;
|
||||
clock-names = "hosc", "losc", "iosc";
|
||||
clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
|
||||
clock-names = "hosc", "losc", "iosc", "pll-periph";
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
@ -41,9 +41,9 @@ Required properties:
|
||||
Optional properties:
|
||||
|
||||
In order to use the GPIO lines in PWM mode, some additional optional
|
||||
properties are required. Only Armada 370 and XP support these properties.
|
||||
properties are required.
|
||||
|
||||
- compatible: Must contain "marvell,armada-370-xp-gpio"
|
||||
- compatible: Must contain "marvell,armada-370-gpio"
|
||||
|
||||
- reg: an additional register set is needed, for the GPIO Blink
|
||||
Counter on/off registers.
|
||||
@ -71,7 +71,7 @@ Example:
|
||||
};
|
||||
|
||||
gpio1: gpio@18140 {
|
||||
compatible = "marvell,armada-370-xp-gpio";
|
||||
compatible = "marvell,armada-370-gpio";
|
||||
reg = <0x18140 0x40>, <0x181c8 0x08>;
|
||||
reg-names = "gpio", "pwm";
|
||||
ngpios = <17>;
|
||||
|
@ -31,7 +31,7 @@ Example:
|
||||
compatible = "st,stm32-timers";
|
||||
reg = <0x40010000 0x400>;
|
||||
clocks = <&rcc 0 160>;
|
||||
clock-names = "clk_int";
|
||||
clock-names = "int";
|
||||
|
||||
pwm {
|
||||
compatible = "st,stm32-pwm";
|
||||
|
@ -34,7 +34,7 @@ Required properties:
|
||||
"brcm,bcm6328-switch"
|
||||
"brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
|
||||
|
||||
See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
|
||||
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
|
||||
required and optional properties.
|
||||
|
||||
Examples:
|
||||
|
@ -27,6 +27,7 @@ Optional properties:
|
||||
of the device. On many systems this is wired high so the device goes
|
||||
out of reset at power-on, but if it is under program control, this
|
||||
optional GPIO can wake up in response to it.
|
||||
- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
|
||||
|
||||
Examples:
|
||||
|
||||
|
4
Makefile
4
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1437,7 +1437,7 @@ help:
|
||||
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
|
||||
@echo ' make V=2 [targets] 2 => give reason for rebuild of target'
|
||||
@echo ' make O=dir [targets] Locate all output files in "dir", including .config'
|
||||
@echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)'
|
||||
@echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)'
|
||||
@echo ' make C=2 [targets] Force check of all c source with $$CHECK'
|
||||
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
|
||||
@echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
|
||||
|
@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
|
||||
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
|
||||
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
|
||||
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
|
||||
vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
|
||||
vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
|
||||
tk->tkr_raw.shift) +
|
||||
tk->tkr_raw.xtime_nsec;
|
||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
|
||||
/* tkr_raw.xtime_nsec == 0 */
|
||||
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
|
||||
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
|
||||
/* tkr_mono.shift == tkr_raw.shift */
|
||||
|
@ -256,7 +256,6 @@ monotonic_raw:
|
||||
seqcnt_check fail=monotonic_raw
|
||||
|
||||
/* All computations are done with left-shifted nsecs. */
|
||||
lsl x14, x14, x12
|
||||
get_nsec_per_sec res=x9
|
||||
lsl x9, x9, x12
|
||||
|
||||
|
@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
|
||||
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
|
||||
bool user, bool kernel)
|
||||
{
|
||||
int idx_user, idx_kernel;
|
||||
/*
|
||||
* Initialize idx_user and idx_kernel to workaround bogus
|
||||
* maybe-initialized warning when using GCC 6.
|
||||
*/
|
||||
int idx_user = 0, idx_kernel = 0;
|
||||
unsigned long flags, old_entryhi;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
||||
extern int kprobe_handler(struct pt_regs *regs);
|
||||
extern int kprobe_post_handler(struct pt_regs *regs);
|
||||
extern int is_current_kprobe_addr(unsigned long addr);
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb);
|
||||
|
@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
do_hash_page:
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
andis. r0,r4,0xa410 /* weird error? */
|
||||
andis. r0,r4,0xa450 /* weird error? */
|
||||
bne- handle_page_fault /* if not, try to insert a HPTE */
|
||||
andis. r0,r4,DSISR_DABRMATCH@h
|
||||
bne- handle_dabr_fault
|
||||
CURRENT_THREAD_INFO(r11, r1)
|
||||
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
|
||||
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
|
||||
@ -1438,11 +1436,16 @@ do_hash_page:
|
||||
|
||||
/* Error */
|
||||
blt- 13f
|
||||
|
||||
/* Reload DSISR into r4 for the DABR check below */
|
||||
ld r4,_DSISR(r1)
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
||||
/* Here we have a page fault that hash_page can't handle. */
|
||||
handle_page_fault:
|
||||
11: ld r4,_DAR(r1)
|
||||
11: andis. r0,r4,DSISR_DABRMATCH@h
|
||||
bne- handle_dabr_fault
|
||||
ld r4,_DAR(r1)
|
||||
ld r5,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_page_fault
|
||||
|
@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
int is_current_kprobe_addr(unsigned long addr)
|
||||
{
|
||||
struct kprobe *p = kprobe_running();
|
||||
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
|
||||
}
|
||||
|
||||
bool arch_within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
return (addr >= (unsigned long)__kprobes_text_start &&
|
||||
@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* jprobes use jprobe_return() which skips the normal return
|
||||
* path of the function, and this messes up the accounting of the
|
||||
* function graph tracer.
|
||||
*
|
||||
* Pause function graph tracing while performing the jprobe function.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(setjmp_pre_handler);
|
||||
@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
* saved regs...
|
||||
*/
|
||||
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
|
||||
/* It's OK to start function graph tracing again */
|
||||
unpause_graph_tracing();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Emergency stacks are used for a range of things, from asynchronous
|
||||
* NMIs (system reset, machine check) to synchronous, process context.
|
||||
* We set preempt_count to zero, even though that isn't necessarily correct. To
|
||||
* get the right value we'd need to copy it from the previous thread_info, but
|
||||
* doing that might fault causing more problems.
|
||||
* TODO: what to do with accounting?
|
||||
*/
|
||||
static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
|
||||
{
|
||||
ti->task = NULL;
|
||||
ti->cpu = cpu;
|
||||
ti->preempt_count = 0;
|
||||
ti->local_flags = 0;
|
||||
ti->flags = 0;
|
||||
klp_init_thread_info(ti);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stack space used when we detect a bad kernel stack pointer, and
|
||||
* early in SMP boots before relocation is enabled. Exclusive emergency
|
||||
@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
|
||||
* Since we use these as temporary stacks during secondary CPU
|
||||
* bringup, we need to get at them in real mode. This means they
|
||||
* must also be within the RMO region.
|
||||
*
|
||||
* The IRQ stacks allocated elsewhere in this file are zeroed and
|
||||
* initialized in kernel/irq.c. These are initialized here in order
|
||||
* to have emergency stacks available as early as possible.
|
||||
*/
|
||||
limit = min(safe_stack_limit(), ppc64_rma_size);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct thread_info *ti;
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* emergency stack for NMI exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
/* emergency stack for machine check exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
#endif
|
||||
}
|
||||
|
@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
|
||||
stdu r1,-SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
/* Save all gprs to pt_regs */
|
||||
SAVE_8GPRS(0,r1)
|
||||
SAVE_8GPRS(8,r1)
|
||||
SAVE_8GPRS(16,r1)
|
||||
SAVE_8GPRS(24,r1)
|
||||
SAVE_GPR(0, r1)
|
||||
SAVE_10GPRS(2, r1)
|
||||
SAVE_10GPRS(12, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
|
||||
/* Save previous stack pointer (r1) */
|
||||
addi r8, r1, SWITCH_FRAME_SIZE
|
||||
std r8, GPR1(r1)
|
||||
|
||||
/* Load special regs for save below */
|
||||
mfmsr r8
|
||||
@ -95,18 +99,44 @@ ftrace_call:
|
||||
bl ftrace_stub
|
||||
nop
|
||||
|
||||
/* Load ctr with the possibly modified NIP */
|
||||
ld r3, _NIP(r1)
|
||||
mtctr r3
|
||||
/* Load the possibly modified NIP */
|
||||
ld r15, _NIP(r1)
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
cmpd r14,r3 /* has NIP been altered? */
|
||||
cmpd r14, r15 /* has NIP been altered? */
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
|
||||
/* NIP has not been altered, skip over further checks */
|
||||
beq 1f
|
||||
|
||||
/* Check if there is an active kprobe on us */
|
||||
subi r3, r14, 4
|
||||
bl is_current_kprobe_addr
|
||||
nop
|
||||
|
||||
/*
|
||||
* If r3 == 1, then this is a kprobe/jprobe.
|
||||
* else, this is livepatched function.
|
||||
*
|
||||
* The conditional branch for livepatch_handler below will use the
|
||||
* result of this comparison. For kprobe/jprobe, we just need to branch to
|
||||
* the new NIP, not call livepatch_handler. The branch below is bne, so we
|
||||
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
|
||||
* CR0[EQ] = (r3 == 1).
|
||||
*/
|
||||
cmpdi r3, 1
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* Load CTR with the possibly modified NIP */
|
||||
mtctr r15
|
||||
|
||||
/* Restore gprs */
|
||||
REST_8GPRS(0,r1)
|
||||
REST_8GPRS(8,r1)
|
||||
REST_8GPRS(16,r1)
|
||||
REST_8GPRS(24,r1)
|
||||
REST_GPR(0,r1)
|
||||
REST_10GPRS(2,r1)
|
||||
REST_10GPRS(12,r1)
|
||||
REST_10GPRS(22,r1)
|
||||
|
||||
/* Restore possibly modified LR */
|
||||
ld r0, _LINK(r1)
|
||||
@ -119,7 +149,10 @@ ftrace_call:
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
||||
/*
|
||||
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
|
||||
* not on a kprobe/jprobe, then handle livepatch.
|
||||
*/
|
||||
bne- livepatch_handler
|
||||
#endif
|
||||
|
||||
|
@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
|
||||
break;
|
||||
case KVM_REG_PPC_TB_OFFSET:
|
||||
/*
|
||||
* POWER9 DD1 has an erratum where writing TBU40 causes
|
||||
* the timebase to lose ticks. So we don't let the
|
||||
* timebase offset be changed on P9 DD1. (It is
|
||||
* initialized to zero.)
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
break;
|
||||
/* round up to multiple of 2^24 */
|
||||
vcpu->arch.vcore->tb_offset =
|
||||
ALIGN(set_reg_val(id, *val), 1UL << 24);
|
||||
@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
int srcu_idx;
|
||||
unsigned long ebb_regs[3] = {}; /* shut up GCC */
|
||||
unsigned long user_tar = 0;
|
||||
unsigned int user_vrsave;
|
||||
|
||||
if (!vcpu->arch.sane) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't allow entry with a suspended transaction, because
|
||||
* the guest entry/exit code will lose it.
|
||||
* If the guest has TM enabled, save away their TM-related SPRs
|
||||
* (they will get restored by the TM unavailable interrupt).
|
||||
*/
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
|
||||
(current->thread.regs->msr & MSR_TM)) {
|
||||
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
|
||||
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
||||
run->fail_entry.hardware_entry_failure_reason = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
|
||||
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
|
||||
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
|
||||
current->thread.regs->msr &= ~MSR_TM;
|
||||
}
|
||||
#endif
|
||||
|
||||
kvmppc_core_prepare_to_enter(vcpu);
|
||||
|
||||
/* No need to go into the guest when all we'll do is come back out */
|
||||
@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
|
||||
flush_all_to_thread(current);
|
||||
|
||||
/* Save userspace EBB and other register values */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
ebb_regs[0] = mfspr(SPRN_EBBHR);
|
||||
ebb_regs[1] = mfspr(SPRN_EBBRR);
|
||||
ebb_regs[2] = mfspr(SPRN_BESCR);
|
||||
user_tar = mfspr(SPRN_TAR);
|
||||
}
|
||||
user_vrsave = mfspr(SPRN_VRSAVE);
|
||||
|
||||
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
|
||||
vcpu->arch.pgdir = current->mm->pgd;
|
||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
}
|
||||
} while (is_kvmppc_resume_guest(r));
|
||||
|
||||
/* Restore userspace EBB and other register values */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
mtspr(SPRN_EBBHR, ebb_regs[0]);
|
||||
mtspr(SPRN_EBBRR, ebb_regs[1]);
|
||||
mtspr(SPRN_BESCR, ebb_regs[2]);
|
||||
mtspr(SPRN_TAR, user_tar);
|
||||
mtspr(SPRN_FSCR, current->thread.fscr);
|
||||
}
|
||||
mtspr(SPRN_VRSAVE, user_vrsave);
|
||||
|
||||
out:
|
||||
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
|
||||
atomic_dec(&vcpu->kvm->arch.vcpus_running);
|
||||
|
@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
* Put whatever is in the decrementer into the
|
||||
* hypervisor decrementer.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
ld r6, VCORE_KVM(r5)
|
||||
ld r9, KVM_HOST_LPCR(r6)
|
||||
andis. r9, r9, LPCR_LD@h
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
mfspr r8,SPRN_DEC
|
||||
mftb r7
|
||||
mtspr SPRN_HDEC,r8
|
||||
BEGIN_FTR_SECTION
|
||||
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
|
||||
bne 32f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
extsw r8,r8
|
||||
32: mtspr SPRN_HDEC,r8
|
||||
add r8,r8,r7
|
||||
std r8,HSTATE_DECEXP(r13)
|
||||
|
||||
|
@ -32,12 +32,29 @@
|
||||
#include <asm/opal.h>
|
||||
#include <asm/xive-regs.h>
|
||||
|
||||
/* Sign-extend HDEC if not on POWER9 */
|
||||
#define EXTEND_HDEC(reg) \
|
||||
BEGIN_FTR_SECTION; \
|
||||
extsw reg, reg; \
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||
|
||||
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
|
||||
|
||||
/* Values in HSTATE_NAPPING(r13) */
|
||||
#define NAPPING_CEDE 1
|
||||
#define NAPPING_NOVCPU 2
|
||||
|
||||
/* Stack frame offsets for kvmppc_hv_entry */
|
||||
#define SFS 144
|
||||
#define STACK_SLOT_TRAP (SFS-4)
|
||||
#define STACK_SLOT_TID (SFS-16)
|
||||
#define STACK_SLOT_PSSCR (SFS-24)
|
||||
#define STACK_SLOT_PID (SFS-32)
|
||||
#define STACK_SLOT_IAMR (SFS-40)
|
||||
#define STACK_SLOT_CIABR (SFS-48)
|
||||
#define STACK_SLOT_DAWR (SFS-56)
|
||||
#define STACK_SLOT_DAWRX (SFS-64)
|
||||
|
||||
/*
|
||||
* Call kvmppc_hv_entry in real mode.
|
||||
* Must be called with interrupts hard-disabled.
|
||||
@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
kvmppc_primary_no_guest:
|
||||
/* We handle this much like a ceded vcpu */
|
||||
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
|
||||
/* HDEC may be larger than DEC for arch >= v3.00, but since the */
|
||||
/* HDEC value came from DEC in the first place, it will fit */
|
||||
mfspr r3, SPRN_HDEC
|
||||
mtspr SPRN_DEC, r3
|
||||
/*
|
||||
@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
|
||||
|
||||
/* See if our timeslice has expired (HDEC is negative) */
|
||||
mfspr r0, SPRN_HDEC
|
||||
EXTEND_HDEC(r0)
|
||||
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
|
||||
cmpwi r0, 0
|
||||
cmpdi r0, 0
|
||||
blt kvm_novcpu_exit
|
||||
|
||||
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
|
||||
@ -319,10 +339,10 @@ kvm_novcpu_exit:
|
||||
bl kvmhv_accumulate_time
|
||||
#endif
|
||||
13: mr r3, r12
|
||||
stw r12, 112-4(r1)
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
bl kvmhv_commence_exit
|
||||
nop
|
||||
lwz r12, 112-4(r1)
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
b kvmhv_switch_to_host
|
||||
|
||||
/*
|
||||
@ -390,8 +410,8 @@ kvm_secondary_got_guest:
|
||||
lbz r4, HSTATE_PTID(r13)
|
||||
cmpwi r4, 0
|
||||
bne 63f
|
||||
lis r6, 0x7fff
|
||||
ori r6, r6, 0xffff
|
||||
LOAD_REG_ADDR(r6, decrementer_max)
|
||||
ld r6, 0(r6)
|
||||
mtspr SPRN_HDEC, r6
|
||||
/* and set per-LPAR registers, if doing dynamic micro-threading */
|
||||
ld r6, HSTATE_SPLIT_MODE(r13)
|
||||
@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
* *
|
||||
*****************************************************************************/
|
||||
|
||||
/* Stack frame offsets */
|
||||
#define STACK_SLOT_TID (112-16)
|
||||
#define STACK_SLOT_PSSCR (112-24)
|
||||
#define STACK_SLOT_PID (112-32)
|
||||
|
||||
.global kvmppc_hv_entry
|
||||
kvmppc_hv_entry:
|
||||
|
||||
@ -565,7 +580,7 @@ kvmppc_hv_entry:
|
||||
*/
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -112(r1)
|
||||
stdu r1, -SFS(r1)
|
||||
|
||||
/* Save R1 in the PACA */
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
|
||||
mfspr r5, SPRN_TIDR
|
||||
mfspr r6, SPRN_PSSCR
|
||||
mfspr r7, SPRN_PID
|
||||
mfspr r8, SPRN_IAMR
|
||||
std r5, STACK_SLOT_TID(r1)
|
||||
std r6, STACK_SLOT_PSSCR(r1)
|
||||
std r7, STACK_SLOT_PID(r1)
|
||||
std r8, STACK_SLOT_IAMR(r1)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r5, SPRN_CIABR
|
||||
mfspr r6, SPRN_DAWR
|
||||
mfspr r7, SPRN_DAWRX
|
||||
std r5, STACK_SLOT_CIABR(r1)
|
||||
std r6, STACK_SLOT_DAWR(r1)
|
||||
std r7, STACK_SLOT_DAWRX(r1)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/* Set partition DABR */
|
||||
@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
||||
|
||||
/* Check if HDEC expires soon */
|
||||
mfspr r3, SPRN_HDEC
|
||||
cmpwi r3, 512 /* 1 microsecond */
|
||||
EXTEND_HDEC(r3)
|
||||
cmpdi r3, 512 /* 1 microsecond */
|
||||
blt hdec_soon
|
||||
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
||||
* set by the guest could disrupt the host.
|
||||
*/
|
||||
li r0, 0
|
||||
mtspr SPRN_IAMR, r0
|
||||
mtspr SPRN_CIABR, r0
|
||||
mtspr SPRN_DAWRX, r0
|
||||
mtspr SPRN_PSPB, r0
|
||||
mtspr SPRN_WORT, r0
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_IAMR, r0
|
||||
mtspr SPRN_TCSCR, r0
|
||||
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
|
||||
li r0, 1
|
||||
@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||
std r6,VCPU_UAMOR(r9)
|
||||
li r6,0
|
||||
mtspr SPRN_AMR,r6
|
||||
mtspr SPRN_UAMOR, r6
|
||||
|
||||
/* Switch DSCR back to host value */
|
||||
mfspr r8, SPRN_DSCR
|
||||
@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
ptesync
|
||||
|
||||
/* Restore host values of some registers */
|
||||
BEGIN_FTR_SECTION
|
||||
ld r5, STACK_SLOT_CIABR(r1)
|
||||
ld r6, STACK_SLOT_DAWR(r1)
|
||||
ld r7, STACK_SLOT_DAWRX(r1)
|
||||
mtspr SPRN_CIABR, r5
|
||||
mtspr SPRN_DAWR, r6
|
||||
mtspr SPRN_DAWRX, r7
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
BEGIN_FTR_SECTION
|
||||
ld r5, STACK_SLOT_TID(r1)
|
||||
ld r6, STACK_SLOT_PSSCR(r1)
|
||||
ld r7, STACK_SLOT_PID(r1)
|
||||
ld r8, STACK_SLOT_IAMR(r1)
|
||||
mtspr SPRN_TIDR, r5
|
||||
mtspr SPRN_PSSCR, r6
|
||||
mtspr SPRN_PID, r7
|
||||
mtspr SPRN_IAMR, r8
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
BEGIN_FTR_SECTION
|
||||
PPC_INVALIDATE_ERAT
|
||||
@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
||||
li r0, KVM_GUEST_MODE_NONE
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
|
||||
ld r0, 112+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, 112
|
||||
ld r0, SFS+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, SFS
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
mfspr r3, SPRN_DEC
|
||||
mfspr r4, SPRN_HDEC
|
||||
mftb r5
|
||||
cmpw r3, r4
|
||||
extsw r3, r3
|
||||
EXTEND_HDEC(r4)
|
||||
cmpd r3, r4
|
||||
ble 67f
|
||||
mtspr SPRN_DEC, r4
|
||||
67:
|
||||
/* save expiry time of guest decrementer */
|
||||
extsw r3, r3
|
||||
add r3, r3, r5
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
|
@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs_user_copy)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
|
||||
PERF_SAMPLE_REGS_ABI_NONE;
|
||||
}
|
||||
|
@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
|
||||
return mmio_atsd_reg;
|
||||
}
|
||||
|
||||
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
|
||||
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
|
||||
{
|
||||
unsigned long launch;
|
||||
|
||||
@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
|
||||
/* PID */
|
||||
launch |= pid << PPC_BITLSHIFT(38);
|
||||
|
||||
/* No flush */
|
||||
launch |= !flush << PPC_BITLSHIFT(39);
|
||||
|
||||
/* Invalidating the entire process doesn't use a va */
|
||||
return mmio_launch_invalidate(npu, launch, 0);
|
||||
}
|
||||
|
||||
static int mmio_invalidate_va(struct npu *npu, unsigned long va,
|
||||
unsigned long pid)
|
||||
unsigned long pid, bool flush)
|
||||
{
|
||||
unsigned long launch;
|
||||
|
||||
@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
|
||||
/* PID */
|
||||
launch |= pid << PPC_BITLSHIFT(38);
|
||||
|
||||
/* No flush */
|
||||
launch |= !flush << PPC_BITLSHIFT(39);
|
||||
|
||||
return mmio_launch_invalidate(npu, launch, va);
|
||||
}
|
||||
|
||||
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
|
||||
|
||||
struct mmio_atsd_reg {
|
||||
struct npu *npu;
|
||||
int reg;
|
||||
};
|
||||
|
||||
static void mmio_invalidate_wait(
|
||||
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
|
||||
{
|
||||
struct npu *npu;
|
||||
int i, reg;
|
||||
|
||||
/* Wait for all invalidations to complete */
|
||||
for (i = 0; i <= max_npu2_index; i++) {
|
||||
if (mmio_atsd_reg[i].reg < 0)
|
||||
continue;
|
||||
|
||||
/* Wait for completion */
|
||||
npu = mmio_atsd_reg[i].npu;
|
||||
reg = mmio_atsd_reg[i].reg;
|
||||
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
|
||||
cpu_relax();
|
||||
|
||||
put_mmio_atsd_reg(npu, reg);
|
||||
|
||||
/*
|
||||
* The GPU requires two flush ATSDs to ensure all entries have
|
||||
* been flushed. We use PID 0 as it will never be used for a
|
||||
* process on the GPU.
|
||||
*/
|
||||
if (flush)
|
||||
mmio_invalidate_pid(npu, 0, true);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate either a single address or an entire PID depending on
|
||||
* the value of va.
|
||||
*/
|
||||
static void mmio_invalidate(struct npu_context *npu_context, int va,
|
||||
unsigned long address)
|
||||
unsigned long address, bool flush)
|
||||
{
|
||||
int i, j, reg;
|
||||
int i, j;
|
||||
struct npu *npu;
|
||||
struct pnv_phb *nphb;
|
||||
struct pci_dev *npdev;
|
||||
struct {
|
||||
struct npu *npu;
|
||||
int reg;
|
||||
} mmio_atsd_reg[NV_MAX_NPUS];
|
||||
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
|
||||
unsigned long pid = npu_context->mm->context.id;
|
||||
|
||||
/*
|
||||
@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
|
||||
|
||||
if (va)
|
||||
mmio_atsd_reg[i].reg =
|
||||
mmio_invalidate_va(npu, address, pid);
|
||||
mmio_invalidate_va(npu, address, pid,
|
||||
flush);
|
||||
else
|
||||
mmio_atsd_reg[i].reg =
|
||||
mmio_invalidate_pid(npu, pid);
|
||||
mmio_invalidate_pid(npu, pid, flush);
|
||||
|
||||
/*
|
||||
* The NPU hardware forwards the shootdown to all GPUs
|
||||
@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
|
||||
*/
|
||||
flush_tlb_mm(npu_context->mm);
|
||||
|
||||
/* Wait for all invalidations to complete */
|
||||
for (i = 0; i <= max_npu2_index; i++) {
|
||||
if (mmio_atsd_reg[i].reg < 0)
|
||||
continue;
|
||||
|
||||
/* Wait for completion */
|
||||
npu = mmio_atsd_reg[i].npu;
|
||||
reg = mmio_atsd_reg[i].reg;
|
||||
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
|
||||
cpu_relax();
|
||||
put_mmio_atsd_reg(npu, reg);
|
||||
}
|
||||
mmio_invalidate_wait(mmio_atsd_reg, flush);
|
||||
if (flush)
|
||||
/* Wait for the flush to complete */
|
||||
mmio_invalidate_wait(mmio_atsd_reg, false);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_release(struct mmu_notifier *mn,
|
||||
@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
|
||||
* There should be no more translation requests for this PID, but we
|
||||
* need to ensure any entries for it are removed from the TLB.
|
||||
*/
|
||||
mmio_invalidate(npu_context, 0, 0);
|
||||
mmio_invalidate(npu_context, 0, 0, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
|
||||
@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
|
||||
{
|
||||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
|
||||
@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
|
||||
{
|
||||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
|
||||
@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
|
||||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
unsigned long address;
|
||||
|
||||
for (address = start; address <= end; address += PAGE_SIZE)
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
for (address = start; address < end; address += PAGE_SIZE)
|
||||
mmio_invalidate(npu_context, 1, address, false);
|
||||
|
||||
/* Do the flush only on the final addess == end */
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
|
||||
@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
|
||||
/* No nvlink associated with this GPU device */
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (!mm) {
|
||||
/* kernel thread contexts are not supported */
|
||||
if (!mm || mm->context.id == 0) {
|
||||
/*
|
||||
* Kernel thread contexts are not supported and context id 0 is
|
||||
* reserved on the GPU.
|
||||
*/
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
||||
ptr = asce.origin * 4096;
|
||||
if (asce.r) {
|
||||
*fake = 1;
|
||||
ptr = 0;
|
||||
asce.dt = ASCE_TYPE_REGION1;
|
||||
}
|
||||
switch (asce.dt) {
|
||||
case ASCE_TYPE_REGION1:
|
||||
if (vaddr.rfx01 > asce.tl && !asce.r)
|
||||
if (vaddr.rfx01 > asce.tl && !*fake)
|
||||
return PGM_REGION_FIRST_TRANS;
|
||||
break;
|
||||
case ASCE_TYPE_REGION2:
|
||||
@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
||||
union region1_table_entry rfte;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 16EB guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
|
||||
ptr += (unsigned long) vaddr.rfx << 53;
|
||||
rfte.val = ptr;
|
||||
goto shadow_r2t;
|
||||
}
|
||||
@ -1036,8 +1036,7 @@ shadow_r2t:
|
||||
union region2_table_entry rste;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 8PB guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
|
||||
ptr += (unsigned long) vaddr.rsx << 42;
|
||||
rste.val = ptr;
|
||||
goto shadow_r3t;
|
||||
}
|
||||
@ -1064,8 +1063,7 @@ shadow_r3t:
|
||||
union region3_table_entry rtte;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 4TB guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
|
||||
ptr += (unsigned long) vaddr.rtx << 31;
|
||||
rtte.val = ptr;
|
||||
goto shadow_sgt;
|
||||
}
|
||||
@ -1101,8 +1099,7 @@ shadow_sgt:
|
||||
union segment_table_entry ste;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 2G guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
|
||||
ptr += (unsigned long) vaddr.sx << 20;
|
||||
ste.val = ptr;
|
||||
goto shadow_pgt;
|
||||
}
|
||||
|
@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
|
||||
[ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
|
||||
[ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */
|
||||
[ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
|
@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
|
||||
|
||||
bool perm_ok; /* do not check permissions if true */
|
||||
bool ud; /* inject an #UD if host doesn't support insn */
|
||||
bool tf; /* TF value before instruction (after for syscall/sysret) */
|
||||
|
||||
bool have_exception;
|
||||
struct x86_exception exception;
|
||||
|
@ -2,8 +2,7 @@
|
||||
#define _ASM_X86_MSHYPER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/hyperv.h>
|
||||
|
||||
/*
|
||||
|
@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
|
||||
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
|
||||
}
|
||||
|
||||
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
|
@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
||||
|
||||
ctxt->eflags = kvm_get_rflags(vcpu);
|
||||
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
|
||||
|
||||
ctxt->eip = kvm_rip_read(vcpu);
|
||||
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
|
||||
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
|
||||
@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
|
||||
return dr6;
|
||||
}
|
||||
|
||||
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
|
||||
static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
|
||||
{
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
|
||||
/*
|
||||
* rflags is the old, "raw" value of the flags. The new value has
|
||||
* not been saved yet.
|
||||
*
|
||||
* This is correct even for TF set by the guest, because "the
|
||||
* processor will not generate this exception after the instruction
|
||||
* that sets the TF flag".
|
||||
*/
|
||||
if (unlikely(rflags & X86_EFLAGS_TF)) {
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
||||
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
|
||||
DR6_RTM;
|
||||
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
|
||||
kvm_run->debug.arch.exception = DB_VECTOR;
|
||||
kvm_run->exit_reason = KVM_EXIT_DEBUG;
|
||||
*r = EMULATE_USER_EXIT;
|
||||
} else {
|
||||
/*
|
||||
* "Certain debug exceptions may clear bit 0-3. The
|
||||
* remaining contents of the DR6 register are never
|
||||
* cleared by the processor".
|
||||
*/
|
||||
vcpu->arch.dr6 &= ~15;
|
||||
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
}
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
||||
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
|
||||
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
|
||||
kvm_run->debug.arch.exception = DB_VECTOR;
|
||||
kvm_run->exit_reason = KVM_EXIT_DEBUG;
|
||||
*r = EMULATE_USER_EXIT;
|
||||
} else {
|
||||
/*
|
||||
* "Certain debug exceptions may clear bit 0-3. The
|
||||
* remaining contents of the DR6 register are never
|
||||
* cleared by the processor".
|
||||
*/
|
||||
vcpu->arch.dr6 &= ~15;
|
||||
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
int r = EMULATE_DONE;
|
||||
|
||||
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
|
||||
|
||||
/*
|
||||
* rflags is the old, "raw" value of the flags. The new value has
|
||||
* not been saved yet.
|
||||
*
|
||||
* This is correct even for TF set by the guest, because "the
|
||||
* processor will not generate this exception after the instruction
|
||||
* that sets the TF flag".
|
||||
*/
|
||||
if (unlikely(rflags & X86_EFLAGS_TF))
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
return r == EMULATE_DONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
|
||||
@ -5726,8 +5727,9 @@ restart:
|
||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE)
|
||||
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
|
||||
if (r == EMULATE_DONE &&
|
||||
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
if (!ctxt->have_exception ||
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
||||
__kvm_set_rflags(vcpu, ctxt->eflags);
|
||||
|
@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
|
||||
__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a hardware queue as needing a restart. For shared queues, maintain
|
||||
* a count of how many hardware queues are marked for restart.
|
||||
*/
|
||||
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
return;
|
||||
|
||||
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_inc(&q->shared_hctx_restart);
|
||||
} else
|
||||
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
}
|
||||
|
||||
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
return false;
|
||||
|
||||
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_dec(&q->shared_hctx_restart);
|
||||
} else
|
||||
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
|
||||
if (blk_mq_hctx_has_pending(hctx)) {
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
struct request *blk_mq_sched_get_request(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
unsigned int op,
|
||||
@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
|
||||
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
if (blk_mq_hctx_has_pending(hctx)) {
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
|
||||
* @pos: loop cursor.
|
||||
@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
|
||||
unsigned int i, j;
|
||||
|
||||
if (set->flags & BLK_MQ_F_TAG_SHARED) {
|
||||
/*
|
||||
* If this is 0, then we know that no hardware queues
|
||||
* have RESTART marked. We're done.
|
||||
*/
|
||||
if (!atomic_read(&queue->shared_hctx_restart))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
|
||||
tag_set_list) {
|
||||
|
@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a hardware queue as needing a restart.
|
||||
*/
|
||||
static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
}
|
||||
|
||||
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
|
@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Caller needs to ensure that we're either frozen/quiesced, or that
|
||||
* the queue isn't live yet.
|
||||
*/
|
||||
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (shared)
|
||||
if (shared) {
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_inc(&q->shared_hctx_restart);
|
||||
hctx->flags |= BLK_MQ_F_TAG_SHARED;
|
||||
else
|
||||
} else {
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_dec(&q->shared_hctx_restart);
|
||||
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
|
||||
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
|
||||
bool shared)
|
||||
{
|
||||
struct request_queue *q;
|
||||
|
||||
|
@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
|
||||
adev->flags.coherent_dma = cca;
|
||||
}
|
||||
|
||||
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
|
||||
{
|
||||
bool *is_spi_i2c_slave_p = data;
|
||||
|
||||
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* devices that are connected to UART still need to be enumerated to
|
||||
* platform bus
|
||||
*/
|
||||
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
|
||||
*is_spi_i2c_slave_p = true;
|
||||
|
||||
/* no need to do more checking */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
|
||||
{
|
||||
struct list_head resource_list;
|
||||
bool is_spi_i2c_slave = false;
|
||||
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
|
||||
&is_spi_i2c_slave);
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
|
||||
return is_spi_i2c_slave;
|
||||
}
|
||||
|
||||
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
|
||||
int type, unsigned long long sta)
|
||||
{
|
||||
@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
|
||||
acpi_bus_get_flags(device);
|
||||
device->flags.match_driver = false;
|
||||
device->flags.initialized = true;
|
||||
device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
|
||||
acpi_device_clear_enumerated(device);
|
||||
device_initialize(&device->dev);
|
||||
dev_set_uevent_suppress(&device->dev, true);
|
||||
@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
|
||||
{
|
||||
bool *is_spi_i2c_slave_p = data;
|
||||
|
||||
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* devices that are connected to UART still need to be enumerated to
|
||||
* platform bus
|
||||
*/
|
||||
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
|
||||
*is_spi_i2c_slave_p = true;
|
||||
|
||||
/* no need to do more checking */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void acpi_default_enumeration(struct acpi_device *device)
|
||||
{
|
||||
struct list_head resource_list;
|
||||
bool is_spi_i2c_slave = false;
|
||||
|
||||
/*
|
||||
* Do not enumerate SPI/I2C slaves as they will be enumerated by their
|
||||
* respective parents.
|
||||
*/
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
|
||||
&is_spi_i2c_slave);
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
if (!is_spi_i2c_slave) {
|
||||
if (!device->flags.spi_i2c_slave) {
|
||||
acpi_create_platform_device(device, NULL);
|
||||
acpi_device_set_enumerated(device);
|
||||
} else {
|
||||
@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
|
||||
return;
|
||||
|
||||
device->flags.match_driver = true;
|
||||
if (ret > 0) {
|
||||
if (ret > 0 && !device->flags.spi_i2c_slave) {
|
||||
acpi_device_set_enumerated(device);
|
||||
goto ok;
|
||||
}
|
||||
@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
if (device->pnp.type.platform_id)
|
||||
acpi_default_enumeration(device);
|
||||
else
|
||||
if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
|
||||
acpi_device_set_enumerated(device);
|
||||
else
|
||||
acpi_default_enumeration(device);
|
||||
|
||||
ok:
|
||||
list_for_each_entry(child, &device->children, node)
|
||||
|
@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
|
||||
unsigned long timeout;
|
||||
int ret;
|
||||
|
||||
xen_blkif_get(blkif);
|
||||
|
||||
set_freezable();
|
||||
while (!kthread_should_stop()) {
|
||||
if (try_to_freeze())
|
||||
@ -665,7 +663,6 @@ purge_gnt_list:
|
||||
print_stats(ring);
|
||||
|
||||
ring->xenblkd = NULL;
|
||||
xen_blkif_put(blkif);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
|
||||
static void make_response(struct xen_blkif_ring *ring, u64 id,
|
||||
unsigned short op, int st)
|
||||
{
|
||||
struct blkif_response resp;
|
||||
struct blkif_response *resp;
|
||||
unsigned long flags;
|
||||
union blkif_back_rings *blk_rings;
|
||||
int notify;
|
||||
|
||||
resp.id = id;
|
||||
resp.operation = op;
|
||||
resp.status = st;
|
||||
|
||||
spin_lock_irqsave(&ring->blk_ring_lock, flags);
|
||||
blk_rings = &ring->blk_rings;
|
||||
/* Place on the response ring for the relevant domain. */
|
||||
switch (ring->blkif->blk_protocol) {
|
||||
case BLKIF_PROTOCOL_NATIVE:
|
||||
memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
|
||||
&resp, sizeof(resp));
|
||||
resp = RING_GET_RESPONSE(&blk_rings->native,
|
||||
blk_rings->native.rsp_prod_pvt);
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_32:
|
||||
memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
|
||||
&resp, sizeof(resp));
|
||||
resp = RING_GET_RESPONSE(&blk_rings->x86_32,
|
||||
blk_rings->x86_32.rsp_prod_pvt);
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_64:
|
||||
memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
|
||||
&resp, sizeof(resp));
|
||||
resp = RING_GET_RESPONSE(&blk_rings->x86_64,
|
||||
blk_rings->x86_64.rsp_prod_pvt);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
resp->id = id;
|
||||
resp->operation = op;
|
||||
resp->status = st;
|
||||
|
||||
blk_rings->common.rsp_prod_pvt++;
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
|
||||
spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
|
||||
|
@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
|
||||
struct blkif_common_request {
|
||||
char dummy;
|
||||
};
|
||||
struct blkif_common_response {
|
||||
char dummy;
|
||||
};
|
||||
|
||||
/* i386 protocol version */
|
||||
|
||||
struct blkif_x86_32_request_rw {
|
||||
uint8_t nr_segments; /* number of segments */
|
||||
@ -129,14 +128,6 @@ struct blkif_x86_32_request {
|
||||
} u;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
/* i386 protocol version */
|
||||
#pragma pack(push, 4)
|
||||
struct blkif_x86_32_response {
|
||||
uint64_t id; /* copied from request */
|
||||
uint8_t operation; /* copied from request */
|
||||
int16_t status; /* BLKIF_RSP_??? */
|
||||
};
|
||||
#pragma pack(pop)
|
||||
/* x86_64 protocol version */
|
||||
|
||||
struct blkif_x86_64_request_rw {
|
||||
@ -193,18 +184,12 @@ struct blkif_x86_64_request {
|
||||
} u;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
struct blkif_x86_64_response {
|
||||
uint64_t __attribute__((__aligned__(8))) id;
|
||||
uint8_t operation; /* copied from request */
|
||||
int16_t status; /* BLKIF_RSP_??? */
|
||||
};
|
||||
|
||||
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
|
||||
struct blkif_common_response);
|
||||
struct blkif_response);
|
||||
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
|
||||
struct blkif_x86_32_response);
|
||||
struct blkif_response __packed);
|
||||
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
|
||||
struct blkif_x86_64_response);
|
||||
struct blkif_response);
|
||||
|
||||
union blkif_back_rings {
|
||||
struct blkif_back_ring native;
|
||||
@ -281,6 +266,7 @@ struct xen_blkif_ring {
|
||||
|
||||
wait_queue_head_t wq;
|
||||
atomic_t inflight;
|
||||
bool active;
|
||||
/* One thread per blkif ring. */
|
||||
struct task_struct *xenblkd;
|
||||
unsigned int waiting_reqs;
|
||||
|
@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
|
||||
init_waitqueue_head(&ring->shutdown_wq);
|
||||
ring->blkif = blkif;
|
||||
ring->st_print = jiffies;
|
||||
xen_blkif_get(blkif);
|
||||
ring->active = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
|
||||
struct xen_blkif_ring *ring = &blkif->rings[r];
|
||||
unsigned int i = 0;
|
||||
|
||||
if (!ring->active)
|
||||
continue;
|
||||
|
||||
if (ring->xenblkd) {
|
||||
kthread_stop(ring->xenblkd);
|
||||
wake_up(&ring->shutdown_wq);
|
||||
ring->xenblkd = NULL;
|
||||
}
|
||||
|
||||
/* The above kthread_stop() guarantees that at this point we
|
||||
@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
|
||||
BUG_ON(ring->free_pages_num != 0);
|
||||
BUG_ON(ring->persistent_gnt_c != 0);
|
||||
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
|
||||
xen_blkif_put(blkif);
|
||||
ring->active = false;
|
||||
}
|
||||
blkif->nr_ring_pages = 0;
|
||||
/*
|
||||
@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
|
||||
|
||||
static void xen_blkif_free(struct xen_blkif *blkif)
|
||||
{
|
||||
|
||||
xen_blkif_disconnect(blkif);
|
||||
WARN_ON(xen_blkif_disconnect(blkif));
|
||||
xen_vbd_free(&blkif->vbd);
|
||||
kfree(blkif->be->mode);
|
||||
kfree(blkif->be);
|
||||
|
||||
/* Make sure everything is drained before shutting down */
|
||||
kmem_cache_free(xen_blkif_cachep, blkif);
|
||||
@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
|
||||
xen_blkif_put(be->blkif);
|
||||
}
|
||||
|
||||
kfree(be->mode);
|
||||
kfree(be);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -121,7 +121,6 @@ config QCOM_EBI2
|
||||
config SIMPLE_PM_BUS
|
||||
bool "Simple Power-Managed Bus Driver"
|
||||
depends on OF && PM
|
||||
depends on ARCH_RENESAS || COMPILE_TEST
|
||||
help
|
||||
Driver for transparent busses that don't need a real driver, but
|
||||
where the bus controller is part of a PM domain, or under the control
|
||||
|
@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
|
||||
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
|
||||
cp++; crng_init_cnt++; len--;
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
|
||||
invalidate_batched_entropy();
|
||||
crng_init = 1;
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
pr_notice("random: fast init done\n");
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
||||
}
|
||||
memzero_explicit(&buf, sizeof(buf));
|
||||
crng->init_time = jiffies;
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
if (crng == &primary_crng && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
crng_init = 2;
|
||||
@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
pr_notice("random: crng init done\n");
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
}
|
||||
|
||||
static inline void crng_wait_ready(void)
|
||||
@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
|
||||
u64 get_random_u64(void)
|
||||
{
|
||||
u64 ret;
|
||||
bool use_lock = crng_init < 2;
|
||||
unsigned long flags;
|
||||
bool use_lock = READ_ONCE(crng_init) < 2;
|
||||
unsigned long flags = 0;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
|
||||
u32 get_random_u32(void)
|
||||
{
|
||||
u32 ret;
|
||||
bool use_lock = crng_init < 2;
|
||||
unsigned long flags;
|
||||
bool use_lock = READ_ONCE(crng_init) < 2;
|
||||
unsigned long flags = 0;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
if (arch_get_random_int(&ret))
|
||||
|
@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
|
||||
config COMMON_CLK_GXBB
|
||||
bool
|
||||
depends on COMMON_CLK_AMLOGIC
|
||||
select RESET_CONTROLLER
|
||||
help
|
||||
Support for the clock controller on AmLogic S905 devices, aka gxbb.
|
||||
Say Y if you want peripherals and CPU frequency scaling to work.
|
||||
|
@ -156,6 +156,7 @@ config SUN8I_R_CCU
|
||||
bool "Support for Allwinner SoCs' PRCM CCUs"
|
||||
select SUNXI_CCU_DIV
|
||||
select SUNXI_CCU_GATE
|
||||
select SUNXI_CCU_MP
|
||||
default MACH_SUN8I || (ARCH_SUNXI && ARM64)
|
||||
|
||||
endif
|
||||
|
@ -31,7 +31,9 @@
|
||||
#define CLK_PLL_VIDEO0_2X 8
|
||||
#define CLK_PLL_VE 9
|
||||
#define CLK_PLL_DDR0 10
|
||||
#define CLK_PLL_PERIPH0 11
|
||||
|
||||
/* PLL_PERIPH0 exported for PRCM */
|
||||
|
||||
#define CLK_PLL_PERIPH0_2X 12
|
||||
#define CLK_PLL_PERIPH1 13
|
||||
#define CLK_PLL_PERIPH1_2X 14
|
||||
|
@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb",
|
||||
static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb",
|
||||
0x060, BIT(6), 0);
|
||||
static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb",
|
||||
0x060, BIT(6), 0);
|
||||
0x060, BIT(7), 0);
|
||||
static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb",
|
||||
0x060, BIT(8), 0);
|
||||
static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
|
||||
|
@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
|
||||
0x12c, 0, 4, 24, 3, BIT(31),
|
||||
CLK_SET_RATE_PARENT);
|
||||
static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
|
||||
0x12c, 0, 4, 24, 3, BIT(31),
|
||||
0x130, 0, 4, 24, 3, BIT(31),
|
||||
CLK_SET_RATE_PARENT);
|
||||
|
||||
static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
|
||||
|
@ -29,7 +29,9 @@
|
||||
#define CLK_PLL_VIDEO 6
|
||||
#define CLK_PLL_VE 7
|
||||
#define CLK_PLL_DDR 8
|
||||
#define CLK_PLL_PERIPH0 9
|
||||
|
||||
/* PLL_PERIPH0 exported for PRCM */
|
||||
|
||||
#define CLK_PLL_PERIPH0_2X 10
|
||||
#define CLK_PLL_GPU 11
|
||||
#define CLK_PLL_PERIPH1 12
|
||||
|
@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
|
||||
[RST_BUS_EMAC] = { 0x2c0, BIT(17) },
|
||||
[RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
|
||||
[RST_BUS_SPI0] = { 0x2c0, BIT(20) },
|
||||
[RST_BUS_OTG] = { 0x2c0, BIT(23) },
|
||||
[RST_BUS_OTG] = { 0x2c0, BIT(24) },
|
||||
[RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
|
||||
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
|
||||
|
||||
|
@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rate = readl_relaxed(frame + CNTFRQ);
|
||||
rate = readl_relaxed(base + CNTFRQ);
|
||||
|
||||
iounmap(frame);
|
||||
iounmap(base);
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
|
@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
|
||||
u32 set;
|
||||
|
||||
if (!of_device_is_compatible(mvchip->chip.of_node,
|
||||
"marvell,armada-370-xp-gpio"))
|
||||
"marvell,armada-370-gpio"))
|
||||
return 0;
|
||||
|
||||
if (IS_ERR(mvchip->clk))
|
||||
@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
|
||||
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
|
||||
},
|
||||
{
|
||||
.compatible = "marvell,armada-370-xp-gpio",
|
||||
.compatible = "marvell,armada-370-gpio",
|
||||
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
|
||||
},
|
||||
{
|
||||
@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
|
||||
mvchip);
|
||||
}
|
||||
|
||||
/* Armada 370/XP has simple PWM support for GPIO lines */
|
||||
/* Some MVEBU SoCs have simple PWM support for GPIO lines */
|
||||
if (IS_ENABLED(CONFIG_PWM))
|
||||
return mvebu_pwm_probe(pdev, mvchip, id);
|
||||
|
||||
|
@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
|
||||
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
|
||||
adev->clock.default_dispclk / 100);
|
||||
adev->clock.default_dispclk = 60000;
|
||||
} else if (adev->clock.default_dispclk <= 60000) {
|
||||
DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
|
||||
adev->clock.default_dispclk / 100);
|
||||
adev->clock.default_dispclk = 62500;
|
||||
}
|
||||
adev->clock.dp_extclk =
|
||||
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
|
||||
|
@ -475,6 +475,7 @@ static const struct pci_device_id pciidlist[] = {
|
||||
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
/* Vega 10 */
|
||||
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
|
@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
|
||||
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
|
||||
ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
|
||||
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
|
||||
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
|
||||
ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
|
@ -261,21 +261,14 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
|
||||
{
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
struct hdlcd_drm_private *hdlcd;
|
||||
struct drm_gem_cma_object *gem;
|
||||
u32 src_x, src_y, dest_h;
|
||||
u32 dest_h;
|
||||
dma_addr_t scanout_start;
|
||||
|
||||
if (!fb)
|
||||
return;
|
||||
|
||||
src_x = plane->state->src.x1 >> 16;
|
||||
src_y = plane->state->src.y1 >> 16;
|
||||
dest_h = drm_rect_height(&plane->state->dst);
|
||||
gem = drm_fb_cma_get_gem_obj(fb, 0);
|
||||
|
||||
scanout_start = gem->paddr + fb->offsets[0] +
|
||||
src_y * fb->pitches[0] +
|
||||
src_x * fb->format->cpp[0];
|
||||
scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
|
||||
|
||||
hdlcd = plane->dev->dev_private;
|
||||
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
|
||||
|
@ -297,6 +297,9 @@ static int hdlcd_drm_bind(struct device *dev)
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
/* Set the CRTC's port so that the encoder component can find it */
|
||||
hdlcd->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
|
||||
|
||||
ret = component_bind_all(dev, drm);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to bind all components\n");
|
||||
@ -340,11 +343,14 @@ err_register:
|
||||
}
|
||||
err_fbdev:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_vblank_cleanup(drm);
|
||||
err_vblank:
|
||||
pm_runtime_disable(drm->dev);
|
||||
err_pm_active:
|
||||
component_unbind_all(dev, drm);
|
||||
err_unload:
|
||||
of_node_put(hdlcd->crtc.port);
|
||||
hdlcd->crtc.port = NULL;
|
||||
drm_irq_uninstall(drm);
|
||||
of_reserved_mem_device_release(drm->dev);
|
||||
err_free:
|
||||
@ -367,6 +373,9 @@ static void hdlcd_drm_unbind(struct device *dev)
|
||||
}
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
component_unbind_all(dev, drm);
|
||||
of_node_put(hdlcd->crtc.port);
|
||||
hdlcd->crtc.port = NULL;
|
||||
drm_vblank_cleanup(drm);
|
||||
pm_runtime_get_sync(drm->dev);
|
||||
drm_irq_uninstall(drm);
|
||||
pm_runtime_put_sync(drm->dev);
|
||||
|
@ -1293,21 +1293,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
||||
if (!connector)
|
||||
return -ENOENT;
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
encoder = drm_connector_get_encoder(connector);
|
||||
if (encoder)
|
||||
out_resp->encoder_id = encoder->base.id;
|
||||
else
|
||||
out_resp->encoder_id = 0;
|
||||
|
||||
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
|
||||
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
|
||||
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
|
||||
&out_resp->count_props);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
|
||||
if (connector->encoder_ids[i] != 0)
|
||||
encoders_count++;
|
||||
@ -1320,7 +1305,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
||||
if (put_user(connector->encoder_ids[i],
|
||||
encoder_ptr + copied)) {
|
||||
ret = -EFAULT;
|
||||
goto out_unref;
|
||||
goto out;
|
||||
}
|
||||
copied++;
|
||||
}
|
||||
@ -1364,15 +1349,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
||||
if (copy_to_user(mode_ptr + copied,
|
||||
&u_mode, sizeof(u_mode))) {
|
||||
ret = -EFAULT;
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
goto out;
|
||||
}
|
||||
copied++;
|
||||
}
|
||||
}
|
||||
out_resp->count_modes = mode_count;
|
||||
out:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
out_unref:
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
encoder = drm_connector_get_encoder(connector);
|
||||
if (encoder)
|
||||
out_resp->encoder_id = encoder->base.id;
|
||||
else
|
||||
out_resp->encoder_id = 0;
|
||||
|
||||
/* Only grab properties after probing, to make sure EDID and other
|
||||
* properties reflect the latest status. */
|
||||
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
|
||||
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
|
||||
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
|
||||
&out_resp->count_props);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
|
||||
out:
|
||||
drm_connector_put(connector);
|
||||
|
||||
return ret;
|
||||
|
@ -129,7 +129,16 @@ i915-y += i915_vgpu.o
|
||||
|
||||
# perf code
|
||||
i915-y += i915_perf.o \
|
||||
i915_oa_hsw.o
|
||||
i915_oa_hsw.o \
|
||||
i915_oa_bdw.o \
|
||||
i915_oa_chv.o \
|
||||
i915_oa_sklgt2.o \
|
||||
i915_oa_sklgt3.o \
|
||||
i915_oa_sklgt4.o \
|
||||
i915_oa_bxt.o \
|
||||
i915_oa_kblgt2.o \
|
||||
i915_oa_kblgt3.o \
|
||||
i915_oa_glk.o
|
||||
|
||||
ifeq ($(CONFIG_DRM_I915_GVT),y)
|
||||
i915-y += intel_gvt.o
|
||||
|
@ -217,9 +217,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
|
||||
|
||||
name = ch7xxx_get_id(vendor);
|
||||
if (!name) {
|
||||
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
|
||||
"slave %d.\n",
|
||||
vendor, adapter->name, dvo->slave_addr);
|
||||
DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n",
|
||||
vendor, adapter->name, dvo->slave_addr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -229,9 +228,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
|
||||
|
||||
devid = ch7xxx_get_did(device);
|
||||
if (!devid) {
|
||||
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
|
||||
"slave %d.\n",
|
||||
vendor, adapter->name, dvo->slave_addr);
|
||||
DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n",
|
||||
device, adapter->name, dvo->slave_addr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,6 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
|
||||
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
|
||||
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
|
||||
|
||||
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
|
||||
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
|
||||
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
|
||||
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
|
||||
|
@ -2414,53 +2414,13 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
|
||||
hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
|
||||
}
|
||||
|
||||
#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
|
||||
|
||||
static void trace_cs_command(struct parser_exec_state *s,
|
||||
cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
|
||||
{
|
||||
/* This buffer is used by ftrace to store all commands copied from
|
||||
* guest gma space. Sometimes commands can cross pages, this should
|
||||
* not be handled in ftrace logic. So this is just used as a
|
||||
* 'bounce buffer'
|
||||
*/
|
||||
u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
|
||||
int i;
|
||||
u32 cmd_len = cmd_length(s);
|
||||
/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
|
||||
* following two considerations:
|
||||
* 1) From observation, most common ring commands is not that long.
|
||||
* But there are execeptions. So it indeed makes sence to observe
|
||||
* longer commands.
|
||||
* 2) From the performance and debugging point of view, dumping all
|
||||
* contents of very commands is not necessary.
|
||||
* We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
|
||||
* future for performance considerations.
|
||||
*/
|
||||
if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
|
||||
gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
|
||||
cmd_len = GVT_MAX_CMD_LENGTH;
|
||||
}
|
||||
|
||||
for (i = 0; i < cmd_len; i++)
|
||||
cmd_trace_buf[i] = cmd_val(s, i);
|
||||
|
||||
trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
|
||||
cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
|
||||
cost_pre_cmd_handler, cost_cmd_handler);
|
||||
}
|
||||
|
||||
/* call the cmd handler, and advance ip */
|
||||
static int cmd_parser_exec(struct parser_exec_state *s)
|
||||
{
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
struct cmd_info *info;
|
||||
u32 cmd;
|
||||
int ret = 0;
|
||||
cycles_t t0, t1, t2;
|
||||
struct parser_exec_state s_before_advance_custom;
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
|
||||
t0 = get_cycles();
|
||||
|
||||
cmd = cmd_val(s, 0);
|
||||
|
||||
@ -2471,13 +2431,10 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gvt_dbg_cmd("%s\n", info->name);
|
||||
|
||||
s->info = info;
|
||||
|
||||
t1 = get_cycles();
|
||||
|
||||
s_before_advance_custom = *s;
|
||||
trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
|
||||
cmd_length(s), s->buf_type);
|
||||
|
||||
if (info->handler) {
|
||||
ret = info->handler(s);
|
||||
@ -2486,9 +2443,6 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
t2 = get_cycles();
|
||||
|
||||
trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
|
||||
|
||||
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
|
||||
ret = cmd_advance_default(s);
|
||||
@ -2522,8 +2476,6 @@ static int command_scan(struct parser_exec_state *s,
|
||||
gma_tail = rb_start + rb_tail;
|
||||
gma_bottom = rb_start + rb_len;
|
||||
|
||||
gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
|
||||
|
||||
while (s->ip_gma != gma_tail) {
|
||||
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
|
||||
if (!(s->ip_gma >= rb_start) ||
|
||||
@ -2552,8 +2504,6 @@ static int command_scan(struct parser_exec_state *s,
|
||||
}
|
||||
}
|
||||
|
||||
gvt_dbg_cmd("scan_end\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -708,53 +708,43 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
|
||||
struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
|
||||
unsigned long valid_desc_bitmap = 0;
|
||||
bool emulate_schedule_in = true;
|
||||
int ret;
|
||||
int i;
|
||||
struct execlist_ctx_descriptor_format desc[2];
|
||||
int i, ret;
|
||||
|
||||
memset(valid_desc, 0, sizeof(valid_desc));
|
||||
desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
|
||||
desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
|
||||
|
||||
desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
|
||||
desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
|
||||
if (!desc[0].valid) {
|
||||
gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
|
||||
goto inv_desc;
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (!desc[i]->valid)
|
||||
for (i = 0; i < ARRAY_SIZE(desc); i++) {
|
||||
if (!desc[i].valid)
|
||||
continue;
|
||||
|
||||
if (!desc[i]->privilege_access) {
|
||||
if (!desc[i].privilege_access) {
|
||||
gvt_vgpu_err("unexpected GGTT elsp submission\n");
|
||||
return -EINVAL;
|
||||
goto inv_desc;
|
||||
}
|
||||
|
||||
/* TODO: add another guest context checks here. */
|
||||
set_bit(i, &valid_desc_bitmap);
|
||||
valid_desc[i] = *desc[i];
|
||||
}
|
||||
|
||||
if (!valid_desc_bitmap) {
|
||||
gvt_vgpu_err("no valid desc in a elsp submission\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
|
||||
test_bit(1, (void *)&valid_desc_bitmap)) {
|
||||
gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* submit workload */
|
||||
for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
|
||||
ret = submit_context(vgpu, ring_id, &valid_desc[i],
|
||||
emulate_schedule_in);
|
||||
for (i = 0; i < ARRAY_SIZE(desc); i++) {
|
||||
if (!desc[i].valid)
|
||||
continue;
|
||||
ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to schedule workload\n");
|
||||
gvt_vgpu_err("failed to submit desc %d\n", i);
|
||||
return ret;
|
||||
}
|
||||
emulate_schedule_in = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
inv_desc:
|
||||
gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
|
||||
desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||
|
@ -102,13 +102,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||
|
||||
p = firmware + h->mmio_offset;
|
||||
|
||||
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
|
||||
int j;
|
||||
|
||||
for (j = 0; j < e->length; j += 4)
|
||||
*(u32 *)(p + e->offset + j) =
|
||||
I915_READ_NOTRACE(_MMIO(e->offset + j));
|
||||
}
|
||||
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
|
||||
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
|
||||
|
||||
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
||||
|
||||
|
@ -244,15 +244,19 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
|
||||
return readq(addr);
|
||||
}
|
||||
|
||||
static void gtt_invalidate(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
mmio_hw_access_post(dev_priv);
|
||||
}
|
||||
|
||||
static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
unsigned long index, u64 pte)
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
|
||||
writeq(pte, addr);
|
||||
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
|
||||
static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
|
||||
@ -1849,6 +1853,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
}
|
||||
|
||||
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
|
||||
gtt_invalidate(gvt->dev_priv);
|
||||
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
|
||||
return 0;
|
||||
}
|
||||
@ -2301,8 +2306,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
u32 num_entries;
|
||||
struct intel_gvt_gtt_entry e;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
|
||||
e.type = GTT_TYPE_GGTT_PTE;
|
||||
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
|
||||
@ -2318,7 +2321,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
gtt_invalidate(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -147,7 +147,9 @@ static int gvt_service_thread(void *data)
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
if (test_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
(void *)&gvt->service_request) ||
|
||||
test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
|
||||
(void *)&gvt->service_request)) {
|
||||
intel_gvt_schedule(gvt);
|
||||
}
|
||||
@ -244,7 +246,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
idr_init(&gvt->vgpu_idr);
|
||||
|
||||
spin_lock_init(&gvt->scheduler.mmio_context_lock);
|
||||
mutex_init(&gvt->lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
|
@ -165,7 +165,6 @@ struct intel_vgpu {
|
||||
struct list_head workload_q_head[I915_NUM_ENGINES];
|
||||
struct kmem_cache *workloads;
|
||||
atomic_t running_workload_num;
|
||||
ktime_t last_ctx_submit_time;
|
||||
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
||||
struct i915_gem_context *shadow_ctx;
|
||||
|
||||
@ -196,11 +195,27 @@ struct intel_gvt_fence {
|
||||
unsigned long vgpu_allocated_fence_num;
|
||||
};
|
||||
|
||||
#define INTEL_GVT_MMIO_HASH_BITS 9
|
||||
#define INTEL_GVT_MMIO_HASH_BITS 11
|
||||
|
||||
struct intel_gvt_mmio {
|
||||
u32 *mmio_attribute;
|
||||
u8 *mmio_attribute;
|
||||
/* Register contains RO bits */
|
||||
#define F_RO (1 << 0)
|
||||
/* Register contains graphics address */
|
||||
#define F_GMADR (1 << 1)
|
||||
/* Mode mask registers with high 16 bits as the mask bits */
|
||||
#define F_MODE_MASK (1 << 2)
|
||||
/* This reg can be accessed by GPU commands */
|
||||
#define F_CMD_ACCESS (1 << 3)
|
||||
/* This reg has been accessed by a VM */
|
||||
#define F_ACCESSED (1 << 4)
|
||||
/* This reg has been accessed through GPU commands */
|
||||
#define F_CMD_ACCESSED (1 << 5)
|
||||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
|
||||
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
|
||||
unsigned int num_tracked_mmio;
|
||||
};
|
||||
|
||||
struct intel_gvt_firmware {
|
||||
@ -257,7 +272,12 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
|
||||
|
||||
enum {
|
||||
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
|
||||
|
||||
/* Scheduling trigger by timer */
|
||||
INTEL_GVT_REQUEST_SCHED = 1,
|
||||
|
||||
/* Scheduling trigger by event */
|
||||
INTEL_GVT_REQUEST_EVENT_SCHED = 2,
|
||||
};
|
||||
|
||||
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
|
||||
@ -473,6 +493,80 @@ enum {
|
||||
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
|
||||
};
|
||||
|
||||
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
}
|
||||
|
||||
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
static inline void intel_gvt_mmio_set_accessed(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
static inline bool intel_gvt_mmio_is_cmd_access(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
static inline bool intel_gvt_mmio_is_unalign(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
static inline void intel_gvt_mmio_set_cmd_accessed(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
* Returns:
|
||||
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
|
||||
*
|
||||
*/
|
||||
static inline bool intel_gvt_mmio_has_mode_mask(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
|
||||
}
|
||||
|
||||
#include "trace.h"
|
||||
#include "mpt.h"
|
||||
|
||||
#endif
|
||||
|
@ -47,21 +47,6 @@
|
||||
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
|
||||
#define PCH_PP_DIVISOR _MMIO(0xc7210)
|
||||
|
||||
/* Register contains RO bits */
|
||||
#define F_RO (1 << 0)
|
||||
/* Register contains graphics address */
|
||||
#define F_GMADR (1 << 1)
|
||||
/* Mode mask registers with high 16 bits as the mask bits */
|
||||
#define F_MODE_MASK (1 << 2)
|
||||
/* This reg can be accessed by GPU commands */
|
||||
#define F_CMD_ACCESS (1 << 3)
|
||||
/* This reg has been accessed by a VM */
|
||||
#define F_ACCESSED (1 << 4)
|
||||
/* This reg has been accessed through GPU commands */
|
||||
#define F_CMD_ACCESSED (1 << 5)
|
||||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
|
||||
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
|
||||
{
|
||||
if (IS_BROADWELL(gvt->dev_priv))
|
||||
@ -92,11 +77,22 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
|
||||
}
|
||||
|
||||
static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct intel_gvt_mmio_info *e;
|
||||
|
||||
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
|
||||
if (e->offset == offset)
|
||||
return e;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int new_mmio_info(struct intel_gvt *gvt,
|
||||
u32 offset, u32 flags, u32 size,
|
||||
u32 offset, u8 flags, u32 size,
|
||||
u32 addr_mask, u32 ro_mask, u32 device,
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
|
||||
gvt_mmio_func read, gvt_mmio_func write)
|
||||
{
|
||||
struct intel_gvt_mmio_info *info, *p;
|
||||
u32 start, end, i;
|
||||
@ -116,13 +112,11 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||
return -ENOMEM;
|
||||
|
||||
info->offset = i;
|
||||
p = intel_gvt_find_mmio_info(gvt, info->offset);
|
||||
p = find_mmio_info(gvt, info->offset);
|
||||
if (p)
|
||||
gvt_err("dup mmio definition offset %x\n",
|
||||
info->offset);
|
||||
info->size = size;
|
||||
info->length = (i + 4) < end ? 4 : (end - i);
|
||||
info->addr_mask = addr_mask;
|
||||
|
||||
info->ro_mask = ro_mask;
|
||||
info->device = device;
|
||||
info->read = read ? read : intel_vgpu_default_mmio_read;
|
||||
@ -130,6 +124,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||
gvt->mmio.mmio_attribute[info->offset / 4] = flags;
|
||||
INIT_HLIST_NODE(&info->node);
|
||||
hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
|
||||
gvt->mmio.num_tracked_mmio++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -209,6 +204,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
|
||||
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
unsigned int fence_num = offset_to_fence_num(off);
|
||||
int ret;
|
||||
|
||||
@ -217,8 +213,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
return ret;
|
||||
write_vreg(vgpu, off, p_data, bytes);
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
intel_vgpu_write_fence(vgpu, fence_num,
|
||||
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -300,6 +298,9 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
|
||||
|
||||
/* sw will wait for the device to ack the reset request */
|
||||
vgpu_vreg(vgpu, offset) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1265,7 +1266,10 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
}
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
/* TRTTE is not per-context */
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1278,7 +1282,9 @@ static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
if (val & 1) {
|
||||
/* unblock hw logic */
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
I915_WRITE(_MMIO(offset), val);
|
||||
mmio_hw_access_post(dev_priv);
|
||||
}
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
return 0;
|
||||
@ -1415,7 +1421,20 @@ static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
}
|
||||
|
||||
static int instdone_mmio_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
}
|
||||
|
||||
@ -1434,7 +1453,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
|
||||
if (execlist->elsp_dwords.index == 3) {
|
||||
vgpu->last_ctx_submit_time = ktime_get();
|
||||
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
|
||||
if(ret)
|
||||
gvt_vgpu_err("fail submit workload on ring %d\n",
|
||||
@ -1603,6 +1621,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
#define RING_REG(base) (base + 0x6c)
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
|
||||
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
|
||||
#undef RING_REG
|
||||
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
|
||||
|
||||
MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
|
||||
@ -1779,10 +1803,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
|
||||
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
|
||||
|
||||
MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
|
||||
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
|
||||
MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
|
||||
@ -2187,7 +2207,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
|
||||
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
|
||||
MMIO_D(ECOBUS, D_ALL);
|
||||
MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
|
||||
MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
|
||||
@ -2219,22 +2239,19 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
|
||||
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
|
||||
MMIO_D(GEN6_PMINTRMSK, D_ALL);
|
||||
MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
|
||||
MMIO_D(RSTDBYCTL, D_ALL);
|
||||
|
||||
MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
|
||||
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
|
||||
MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
|
||||
MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
|
||||
|
||||
MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_D(TILECTL, D_ALL);
|
||||
|
||||
MMIO_D(GEN6_UCGCTL1, D_ALL);
|
||||
@ -2242,7 +2259,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
|
||||
MMIO_D(GEN6_PCODE_DATA, D_ALL);
|
||||
MMIO_D(0x13812c, D_ALL);
|
||||
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
|
||||
@ -2321,14 +2337,13 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x1a054, D_ALL);
|
||||
|
||||
MMIO_D(0x44070, D_ALL);
|
||||
MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
|
||||
MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_D(0x2b00, D_BDW_PLUS);
|
||||
MMIO_D(0x2360, D_BDW_PLUS);
|
||||
MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
@ -2766,7 +2781,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x72380, D_SKL_PLUS);
|
||||
MMIO_D(0x7039c, D_SKL_PLUS);
|
||||
|
||||
MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_D(0x8f074, D_SKL | D_KBL);
|
||||
MMIO_D(0x8f004, D_SKL | D_KBL);
|
||||
MMIO_D(0x8f034, D_SKL | D_KBL);
|
||||
@ -2840,26 +2854,36 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
|
||||
* @gvt: GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
* This function is used to find the MMIO information entry from hash table
|
||||
*
|
||||
* Returns:
|
||||
* pointer to MMIO information entry, NULL if not exists
|
||||
*/
|
||||
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
/* Special MMIO blocks. */
|
||||
static struct gvt_mmio_block {
|
||||
unsigned int device;
|
||||
i915_reg_t offset;
|
||||
unsigned int size;
|
||||
gvt_mmio_func read;
|
||||
gvt_mmio_func write;
|
||||
} gvt_mmio_blocks[] = {
|
||||
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
|
||||
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
|
||||
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
|
||||
pvinfo_mmio_read, pvinfo_mmio_write},
|
||||
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
|
||||
};
|
||||
|
||||
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct intel_gvt_mmio_info *e;
|
||||
unsigned long device = intel_gvt_get_device_type(gvt);
|
||||
struct gvt_mmio_block *block = gvt_mmio_blocks;
|
||||
int i;
|
||||
|
||||
WARN_ON(!IS_ALIGNED(offset, 4));
|
||||
|
||||
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
|
||||
if (e->offset == offset)
|
||||
return e;
|
||||
for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
|
||||
if (!(device & block->device))
|
||||
continue;
|
||||
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
|
||||
offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
|
||||
return block;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -2899,9 +2923,10 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
|
||||
int ret;
|
||||
|
||||
gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
|
||||
gvt->mmio.mmio_attribute = vzalloc(size);
|
||||
if (!gvt->mmio.mmio_attribute)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2922,77 +2947,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
gvt_dbg_mmio("traced %u virtual mmio registers\n",
|
||||
gvt->mmio.num_tracked_mmio);
|
||||
return 0;
|
||||
err:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |=
|
||||
F_ACCESSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] &
|
||||
F_CMD_ACCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] &
|
||||
F_UNALIGN;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |=
|
||||
F_CMD_ACCESSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
* Returns:
|
||||
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
|
||||
*
|
||||
*/
|
||||
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] &
|
||||
F_MODE_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_default_mmio_read - default MMIO read handler
|
||||
@ -3044,3 +3007,91 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
||||
{
|
||||
return in_whitelist(offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
|
||||
* @vgpu: a vGPU
|
||||
* @offset: register offset
|
||||
* @pdata: data buffer
|
||||
* @bytes: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *pdata, unsigned int bytes, bool is_read)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_mmio_info *mmio_info;
|
||||
struct gvt_mmio_block *mmio_block;
|
||||
gvt_mmio_func func;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(bytes > 4))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Handle special MMIO blocks.
|
||||
*/
|
||||
mmio_block = find_mmio_block(gvt, offset);
|
||||
if (mmio_block) {
|
||||
func = is_read ? mmio_block->read : mmio_block->write;
|
||||
if (func)
|
||||
return func(vgpu, offset, pdata, bytes);
|
||||
goto default_rw;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normal tracked MMIOs.
|
||||
*/
|
||||
mmio_info = find_mmio_info(gvt, offset);
|
||||
if (!mmio_info) {
|
||||
if (!vgpu->mmio.disable_warn_untrack)
|
||||
gvt_vgpu_err("untracked MMIO %08x len %d\n",
|
||||
offset, bytes);
|
||||
goto default_rw;
|
||||
}
|
||||
|
||||
if (is_read)
|
||||
return mmio_info->read(vgpu, offset, pdata, bytes);
|
||||
else {
|
||||
u64 ro_mask = mmio_info->ro_mask;
|
||||
u32 old_vreg = 0, old_sreg = 0;
|
||||
u64 data = 0;
|
||||
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
|
||||
old_vreg = vgpu_vreg(vgpu, offset);
|
||||
old_sreg = vgpu_sreg(vgpu, offset);
|
||||
}
|
||||
|
||||
if (likely(!ro_mask))
|
||||
ret = mmio_info->write(vgpu, offset, pdata, bytes);
|
||||
else if (!~ro_mask) {
|
||||
gvt_vgpu_err("try to write RO reg %x\n", offset);
|
||||
return 0;
|
||||
} else {
|
||||
/* keep the RO bits in the virtual register */
|
||||
memcpy(&data, pdata, bytes);
|
||||
data &= ~ro_mask;
|
||||
data |= vgpu_vreg(vgpu, offset) & ro_mask;
|
||||
ret = mmio_info->write(vgpu, offset, &data, bytes);
|
||||
}
|
||||
|
||||
/* higher 16bits of mode ctl regs are mask bits for change */
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
|
||||
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
|
||||
| (vgpu_vreg(vgpu, offset) & mask);
|
||||
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
|
||||
| (vgpu_sreg(vgpu, offset) & mask);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
default_rw:
|
||||
return is_read ?
|
||||
intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
|
||||
intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
#include "trace.h"
|
||||
|
||||
/* common offset among interrupt control registers */
|
||||
#define regbase_to_isr(base) (base)
|
||||
@ -178,8 +179,8 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||
u32 imr = *(u32 *)p_data;
|
||||
|
||||
gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
|
||||
reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
|
||||
trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg),
|
||||
(vgpu_vreg(vgpu, reg) ^ imr));
|
||||
|
||||
vgpu_vreg(vgpu, reg) = imr;
|
||||
|
||||
@ -209,8 +210,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
|
||||
u32 ier = *(u32 *)p_data;
|
||||
u32 virtual_ier = vgpu_vreg(vgpu, reg);
|
||||
|
||||
gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
|
||||
reg, ier, virtual_ier, virtual_ier ^ ier);
|
||||
trace_write_ir(vgpu->id, "MASTER_IRQ", reg, ier, virtual_ier,
|
||||
(virtual_ier ^ ier));
|
||||
|
||||
/*
|
||||
* GEN8_MASTER_IRQ is a special irq register,
|
||||
@ -248,8 +249,8 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_irq_info *info;
|
||||
u32 ier = *(u32 *)p_data;
|
||||
|
||||
gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
|
||||
reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
|
||||
trace_write_ir(vgpu->id, "IER", reg, ier, vgpu_vreg(vgpu, reg),
|
||||
(vgpu_vreg(vgpu, reg) ^ ier));
|
||||
|
||||
vgpu_vreg(vgpu, reg) = ier;
|
||||
|
||||
@ -285,8 +286,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
|
||||
iir_to_regbase(reg));
|
||||
u32 iir = *(u32 *)p_data;
|
||||
|
||||
gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
|
||||
reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
|
||||
trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
|
||||
(vgpu_vreg(vgpu, reg) ^ iir));
|
||||
|
||||
if (WARN_ON(!info))
|
||||
return -EINVAL;
|
||||
@ -411,8 +412,7 @@ static void propagate_event(struct intel_gvt_irq *irq,
|
||||
|
||||
if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
|
||||
regbase_to_imr(reg_base)))) {
|
||||
gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
|
||||
bit, irq_name[event], vgpu->id);
|
||||
trace_propagate_event(vgpu->id, irq_name[event], bit);
|
||||
set_bit(bit, (void *)&vgpu_vreg(vgpu,
|
||||
regbase_to_iir(reg_base)));
|
||||
}
|
||||
|
@ -123,7 +123,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_mmio_info *mmio;
|
||||
unsigned int offset = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
@ -187,32 +186,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
goto err;
|
||||
}
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (mmio) {
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
goto err;
|
||||
if (WARN_ON(mmio->offset != offset))
|
||||
goto err;
|
||||
}
|
||||
ret = mmio->read(vgpu, offset, p_data, bytes);
|
||||
} else {
|
||||
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (!vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
|
||||
offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_vgpu_err("------------------------------------------\n");
|
||||
gvt_vgpu_err("likely triggers a gfx reset\n");
|
||||
gvt_vgpu_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
intel_gvt_mmio_set_accessed(gvt, offset);
|
||||
@ -239,9 +214,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_mmio_info *mmio;
|
||||
unsigned int offset = 0;
|
||||
u32 old_vreg = 0, old_sreg = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (vgpu->failsafe) {
|
||||
@ -296,66 +269,10 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack)
|
||||
gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (mmio) {
|
||||
u64 ro_mask = mmio->ro_mask;
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
goto err;
|
||||
if (WARN_ON(mmio->offset != offset))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
|
||||
old_vreg = vgpu_vreg(vgpu, offset);
|
||||
old_sreg = vgpu_sreg(vgpu, offset);
|
||||
}
|
||||
|
||||
if (!ro_mask) {
|
||||
ret = mmio->write(vgpu, offset, p_data, bytes);
|
||||
} else {
|
||||
/* Protect RO bits like HW */
|
||||
u64 data = 0;
|
||||
|
||||
/* all register bits are RO. */
|
||||
if (ro_mask == ~(u64)0) {
|
||||
gvt_vgpu_err("try to write RO reg %x\n",
|
||||
offset);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
/* keep the RO bits in the virtual register */
|
||||
memcpy(&data, p_data, bytes);
|
||||
data &= ~mmio->ro_mask;
|
||||
data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
|
||||
ret = mmio->write(vgpu, offset, &data, bytes);
|
||||
}
|
||||
|
||||
/* higher 16bits of mode ctl regs are mask bits for change */
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
|
||||
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
|
||||
| (vgpu_vreg(vgpu, offset) & mask);
|
||||
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
|
||||
| (vgpu_sreg(vgpu, offset) & mask);
|
||||
}
|
||||
} else
|
||||
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
|
||||
bytes);
|
||||
if (ret)
|
||||
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
out:
|
||||
|
||||
intel_gvt_mmio_set_accessed(gvt, offset);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return 0;
|
||||
@ -372,20 +289,32 @@ err:
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
void *mmio = gvt->firmware.mmio;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
if (dmlr) {
|
||||
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
|
||||
vgpu->mmio.disable_warn_untrack = false;
|
||||
} else {
|
||||
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
|
||||
/* only reset the engine related, so starting with 0x44200
|
||||
* interrupt include DE,display mmio related will not be
|
||||
* touched
|
||||
*/
|
||||
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
|
||||
memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
|
||||
}
|
||||
|
||||
vgpu->mmio.disable_warn_untrack = false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -405,7 +334,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
intel_vgpu_reset_mmio(vgpu, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -39,36 +39,28 @@
|
||||
struct intel_gvt;
|
||||
struct intel_vgpu;
|
||||
|
||||
#define D_SNB (1 << 0)
|
||||
#define D_IVB (1 << 1)
|
||||
#define D_HSW (1 << 2)
|
||||
#define D_BDW (1 << 3)
|
||||
#define D_SKL (1 << 4)
|
||||
#define D_KBL (1 << 5)
|
||||
#define D_BDW (1 << 0)
|
||||
#define D_SKL (1 << 1)
|
||||
#define D_KBL (1 << 2)
|
||||
|
||||
#define D_GEN9PLUS (D_SKL | D_KBL)
|
||||
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
|
||||
#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
|
||||
#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
|
||||
|
||||
#define D_SKL_PLUS (D_SKL | D_KBL)
|
||||
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
|
||||
#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
|
||||
#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
|
||||
|
||||
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
|
||||
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
|
||||
#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
|
||||
#define D_PRE_SKL (D_BDW)
|
||||
#define D_ALL (D_BDW | D_SKL | D_KBL)
|
||||
|
||||
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
|
||||
unsigned int);
|
||||
|
||||
struct intel_gvt_mmio_info {
|
||||
u32 offset;
|
||||
u32 size;
|
||||
u32 length;
|
||||
u32 addr_mask;
|
||||
u64 ro_mask;
|
||||
u32 device;
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
|
||||
gvt_mmio_func read;
|
||||
gvt_mmio_func write;
|
||||
u32 addr_range;
|
||||
struct hlist_node node;
|
||||
};
|
||||
@ -79,8 +71,6 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
|
||||
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
|
||||
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
|
||||
|
||||
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
unsigned int offset);
|
||||
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
|
||||
typeof(reg) __reg = reg; \
|
||||
u32 *offset = (u32 *)&__reg; \
|
||||
@ -88,7 +78,7 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
})
|
||||
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
|
||||
@ -97,13 +87,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
|
||||
void *p_data, unsigned int bytes);
|
||||
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
|
||||
void *p_data, unsigned int bytes);
|
||||
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
|
||||
unsigned int offset);
|
||||
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
|
||||
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
|
||||
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
|
||||
unsigned int offset);
|
||||
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
|
||||
|
||||
int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
@ -111,4 +95,8 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
||||
unsigned int offset);
|
||||
|
||||
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *pdata, unsigned int bytes, bool is_read);
|
||||
|
||||
#endif
|
||||
|
@ -133,8 +133,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
|
||||
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
|
||||
return -EINVAL;
|
||||
|
||||
gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
|
||||
data);
|
||||
trace_inject_msi(vgpu->id, addr, data);
|
||||
|
||||
ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
|
||||
if (ret)
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
#include "trace.h"
|
||||
|
||||
struct render_mmio {
|
||||
int ring_id;
|
||||
@ -260,7 +261,8 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
|
||||
#define CTX_CONTEXT_CONTROL_VAL 0x03
|
||||
|
||||
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||
/* Switch ring mmio values (context) from host to a vgpu. */
|
||||
static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct render_mmio *mmio;
|
||||
@ -305,14 +307,15 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||
I915_WRITE(mmio->reg, v);
|
||||
POSTING_READ(mmio->reg);
|
||||
|
||||
gvt_dbg_render("load reg %x old %x new %x\n",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
trace_render_mmio(vgpu->id, "load",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
}
|
||||
handle_tlb_pending_event(vgpu, ring_id);
|
||||
}
|
||||
|
||||
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||
/* Switch ring mmio values (context) from vgpu to host. */
|
||||
static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct render_mmio *mmio;
|
||||
@ -346,8 +349,37 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||
I915_WRITE(mmio->reg, v);
|
||||
POSTING_READ(mmio->reg);
|
||||
|
||||
gvt_dbg_render("restore reg %x old %x new %x\n",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
trace_render_mmio(vgpu->id, "restore",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_switch_render_mmio - switch mmio context of specific engine
|
||||
* @pre: the last vGPU that own the engine
|
||||
* @next: the vGPU to switch to
|
||||
* @ring_id: specify the engine
|
||||
*
|
||||
* If pre is null indicates that host own the engine. If next is null
|
||||
* indicates that we are switching to host workload.
|
||||
*/
|
||||
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||
struct intel_vgpu *next, int ring_id)
|
||||
{
|
||||
if (WARN_ON(!pre && !next))
|
||||
return;
|
||||
|
||||
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
|
||||
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
|
||||
|
||||
/**
|
||||
* TODO: Optimize for vGPU to vGPU switch by merging
|
||||
* switch_mmio_to_host() and switch_mmio_to_vgpu().
|
||||
*/
|
||||
if (pre)
|
||||
switch_mmio_to_host(pre, ring_id);
|
||||
|
||||
if (next)
|
||||
switch_mmio_to_vgpu(next, ring_id);
|
||||
}
|
||||
|
@ -36,8 +36,8 @@
|
||||
#ifndef __GVT_RENDER_H__
|
||||
#define __GVT_RENDER_H__
|
||||
|
||||
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
|
||||
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||
struct intel_vgpu *next, int ring_id);
|
||||
|
||||
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
|
||||
|
||||
#endif
|
||||
|
@ -202,11 +202,6 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct vgpu_sched_data *vgpu_data;
|
||||
struct intel_vgpu *vgpu = NULL;
|
||||
static uint64_t timer_check;
|
||||
|
||||
if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
|
||||
gvt_balance_timeslice(sched_data);
|
||||
|
||||
/* no active vgpu or has already had a target */
|
||||
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
|
||||
goto out;
|
||||
@ -231,9 +226,19 @@ out:
|
||||
void intel_gvt_schedule(struct intel_gvt *gvt)
|
||||
{
|
||||
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
|
||||
static uint64_t timer_check;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
(void *)&gvt->service_request)) {
|
||||
if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
|
||||
gvt_balance_timeslice(sched_data);
|
||||
}
|
||||
clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
|
||||
|
||||
tbs_sched_func(sched_data);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
@ -303,8 +308,20 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
||||
|
||||
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
|
||||
kfree(vgpu->sched_data);
|
||||
vgpu->sched_data = NULL;
|
||||
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
|
||||
scheduler->engine_owner[ring_id] = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
}
|
||||
|
||||
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||
|
@ -138,21 +138,42 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
||||
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
|
||||
shadow_ctx_notifier_block[req->engine->id]);
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload =
|
||||
scheduler->current_workload[req->engine->id];
|
||||
enum intel_engine_id ring_id = req->engine->id;
|
||||
struct intel_vgpu_workload *workload;
|
||||
|
||||
if (!is_gvt_request(req) || unlikely(!workload))
|
||||
if (!is_gvt_request(req)) {
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
|
||||
scheduler->engine_owner[ring_id]) {
|
||||
/* Switch ring from vGPU to host. */
|
||||
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
|
||||
NULL, ring_id);
|
||||
scheduler->engine_owner[ring_id] = NULL;
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
if (unlikely(!workload))
|
||||
return NOTIFY_OK;
|
||||
|
||||
switch (action) {
|
||||
case INTEL_CONTEXT_SCHEDULE_IN:
|
||||
intel_gvt_load_render_mmio(workload->vgpu,
|
||||
workload->ring_id);
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
|
||||
/* Switch ring from host to vGPU or vGPU to vGPU. */
|
||||
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
|
||||
workload->vgpu, ring_id);
|
||||
scheduler->engine_owner[ring_id] = workload->vgpu;
|
||||
} else
|
||||
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
|
||||
ring_id, workload->vgpu->id);
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
atomic_set(&workload->shadow_ctx_active, 1);
|
||||
break;
|
||||
case INTEL_CONTEXT_SCHEDULE_OUT:
|
||||
intel_gvt_restore_render_mmio(workload->vgpu,
|
||||
workload->ring_id);
|
||||
/* If the status is -EINPROGRESS means this workload
|
||||
* doesn't meet any issue during dispatching so when
|
||||
* get the SCHEDULE_OUT set the status to be zero for
|
||||
@ -431,6 +452,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
atomic_dec(&vgpu->running_workload_num);
|
||||
wake_up(&scheduler->workload_complete_wq);
|
||||
|
||||
if (gvt->scheduler.need_reschedule)
|
||||
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,10 @@ struct intel_gvt_workload_scheduler {
|
||||
struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
|
||||
bool need_reschedule;
|
||||
|
||||
spinlock_t mmio_context_lock;
|
||||
/* can be null when owner is host */
|
||||
struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
|
||||
|
||||
wait_queue_head_t workload_complete_wq;
|
||||
struct task_struct *thread[I915_NUM_ENGINES];
|
||||
wait_queue_head_t waitq[I915_NUM_ENGINES];
|
||||
|
@ -224,58 +224,138 @@ TRACE_EVENT(oos_sync,
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
#define MAX_CMD_STR_LEN 256
|
||||
TRACE_EVENT(gvt_command,
|
||||
TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
|
||||
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
|
||||
u32 buf_type),
|
||||
|
||||
TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
|
||||
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, vm_id)
|
||||
__field(u8, ring_id)
|
||||
__field(int, i)
|
||||
__array(char, tmp_buf, MAX_CMD_STR_LEN)
|
||||
__array(char, cmd_str, MAX_CMD_STR_LEN)
|
||||
),
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, vgpu_id)
|
||||
__field(u8, ring_id)
|
||||
__field(u32, ip_gma)
|
||||
__field(u32, buf_type)
|
||||
__field(u32, cmd_len)
|
||||
__dynamic_array(u32, raw_cmd, cmd_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vm_id = vm_id;
|
||||
__entry->ring_id = ring_id;
|
||||
__entry->cmd_str[0] = '\0';
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
entry->i = 0;
|
||||
while (cmd_len > 0) {
|
||||
if (cmd_len >= 8) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
|
||||
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
|
||||
cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
|
||||
__entry->i += 8;
|
||||
cmd_len -= 8;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
} else if (cmd_len >= 4) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
|
||||
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
|
||||
__entry->i += 4;
|
||||
cmd_len -= 4;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
} else if (cmd_len >= 2) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
|
||||
__entry->i += 2;
|
||||
cmd_len -= 2;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
} else if (cmd_len == 1) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
|
||||
__entry->i += 1;
|
||||
cmd_len -= 1;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
}
|
||||
}
|
||||
strcat(__entry->cmd_str, "\n");
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->vgpu_id = vgpu_id;
|
||||
__entry->ring_id = ring_id;
|
||||
__entry->ip_gma = ip_gma;
|
||||
__entry->buf_type = buf_type;
|
||||
__entry->cmd_len = cmd_len;
|
||||
memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->cmd_str)
|
||||
|
||||
TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
|
||||
__entry->vgpu_id,
|
||||
__entry->ring_id,
|
||||
__entry->buf_type,
|
||||
__entry->ip_gma,
|
||||
__print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
|
||||
);
|
||||
|
||||
#define GVT_TEMP_STR_LEN 10
|
||||
TRACE_EVENT(write_ir,
|
||||
TP_PROTO(int id, char *reg_name, unsigned int reg, unsigned int new_val,
|
||||
unsigned int old_val, bool changed),
|
||||
|
||||
TP_ARGS(id, reg_name, reg, new_val, old_val, changed),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__array(char, buf, GVT_TEMP_STR_LEN)
|
||||
__field(unsigned int, reg)
|
||||
__field(unsigned int, new_val)
|
||||
__field(unsigned int, old_val)
|
||||
__field(bool, changed)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", reg_name);
|
||||
__entry->reg = reg;
|
||||
__entry->new_val = new_val;
|
||||
__entry->old_val = old_val;
|
||||
__entry->changed = changed;
|
||||
),
|
||||
|
||||
TP_printk("VM%u write [%s] %x, new %08x, old %08x, changed %08x\n",
|
||||
__entry->id, __entry->buf, __entry->reg, __entry->new_val,
|
||||
__entry->old_val, __entry->changed)
|
||||
);
|
||||
|
||||
TRACE_EVENT(propagate_event,
|
||||
TP_PROTO(int id, const char *irq_name, int bit),
|
||||
|
||||
TP_ARGS(id, irq_name, bit),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__array(char, buf, GVT_TEMP_STR_LEN)
|
||||
__field(int, bit)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", irq_name);
|
||||
__entry->bit = bit;
|
||||
),
|
||||
|
||||
TP_printk("Set bit (%d) for (%s) for vgpu (%d)\n",
|
||||
__entry->bit, __entry->buf, __entry->id)
|
||||
);
|
||||
|
||||
TRACE_EVENT(inject_msi,
|
||||
TP_PROTO(int id, unsigned int address, unsigned int data),
|
||||
|
||||
TP_ARGS(id, address, data),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__field(unsigned int, address)
|
||||
__field(unsigned int, data)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->address = address;
|
||||
__entry->data = data;
|
||||
),
|
||||
|
||||
TP_printk("vgpu%d:inject msi address %x data %x\n",
|
||||
__entry->id, __entry->address, __entry->data)
|
||||
);
|
||||
|
||||
TRACE_EVENT(render_mmio,
|
||||
TP_PROTO(int id, char *action, unsigned int reg,
|
||||
unsigned int old_val, unsigned int new_val),
|
||||
|
||||
TP_ARGS(id, action, reg, new_val, old_val),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__array(char, buf, GVT_TEMP_STR_LEN)
|
||||
__field(unsigned int, reg)
|
||||
__field(unsigned int, old_val)
|
||||
__field(unsigned int, new_val)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
|
||||
__entry->reg = reg;
|
||||
__entry->old_val = old_val;
|
||||
__entry->new_val = new_val;
|
||||
),
|
||||
|
||||
TP_printk("VM%u %s reg %x, old %08x new %08x\n",
|
||||
__entry->id, __entry->buf, __entry->reg,
|
||||
__entry->old_val, __entry->new_val)
|
||||
);
|
||||
|
||||
#endif /* _GVT_TRACE_H_ */
|
||||
|
||||
/* This part must be out of protection */
|
||||
|
@ -501,9 +501,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
|
||||
intel_vgpu_reset_gtt(vgpu, dmlr);
|
||||
intel_vgpu_reset_resource(vgpu);
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
|
||||
/*fence will not be reset during virtual reset */
|
||||
if (dmlr)
|
||||
intel_vgpu_reset_resource(vgpu);
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu, dmlr);
|
||||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_reset_display(vgpu);
|
||||
|
||||
|
@ -1670,12 +1670,22 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "FBC disabled: %s\n",
|
||||
dev_priv->fbc.no_fbc_reason);
|
||||
|
||||
if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
|
||||
uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
|
||||
BDW_FBC_COMPRESSION_MASK :
|
||||
IVB_FBC_COMPRESSION_MASK;
|
||||
seq_printf(m, "Compressing: %s\n",
|
||||
yesno(I915_READ(FBC_STATUS2) & mask));
|
||||
if (intel_fbc_is_active(dev_priv)) {
|
||||
u32 mask;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
|
||||
else if (INTEL_GEN(dev_priv) >= 7)
|
||||
mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
|
||||
else if (INTEL_GEN(dev_priv) >= 5)
|
||||
mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
|
||||
else if (IS_G4X(dev_priv))
|
||||
mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
|
||||
else
|
||||
mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
|
||||
FBC_STAT_COMPRESSED);
|
||||
|
||||
seq_printf(m, "Compressing: %s\n", yesno(mask));
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
@ -1684,7 +1694,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_fbc_fc_get(void *data, u64 *val)
|
||||
static int i915_fbc_false_color_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
|
||||
@ -1696,7 +1706,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_fbc_fc_set(void *data, u64 val)
|
||||
static int i915_fbc_false_color_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
u32 reg;
|
||||
@ -1717,8 +1727,8 @@ static int i915_fbc_fc_set(void *data, u64 val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
|
||||
i915_fbc_fc_get, i915_fbc_fc_set,
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
|
||||
i915_fbc_false_color_get, i915_fbc_false_color_set,
|
||||
"%llu\n");
|
||||
|
||||
static int i915_ips_status(struct seq_file *m, void *unused)
|
||||
@ -1988,6 +1998,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
seq_printf(m,
|
||||
"\tvma hashtable size=%u (actual %lu), count=%u\n",
|
||||
ctx->vma_lut.ht_size,
|
||||
BIT(ctx->vma_lut.ht_bits),
|
||||
ctx->vma_lut.ht_count);
|
||||
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
@ -4289,26 +4305,27 @@ i915_drop_caches_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
|
||||
|
||||
/* No need to check and wait for gpu resets, only libdrm auto-restarts
|
||||
* on ioctls on -EAGAIN. */
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val & DROP_ACTIVE) {
|
||||
ret = i915_gem_wait_for_idle(dev_priv,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED);
|
||||
if (val & (DROP_ACTIVE | DROP_RETIRE)) {
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
return ret;
|
||||
|
||||
if (val & DROP_RETIRE)
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
if (val & DROP_ACTIVE)
|
||||
ret = i915_gem_wait_for_idle(dev_priv,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED);
|
||||
|
||||
if (val & DROP_RETIRE)
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
lockdep_set_current_reclaim_state(GFP_KERNEL);
|
||||
if (val & DROP_BOUND)
|
||||
@ -4321,9 +4338,6 @@ i915_drop_caches_set(void *data, u64 val)
|
||||
i915_gem_shrink_all(dev_priv);
|
||||
lockdep_clear_current_reclaim_state();
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (val & DROP_FREED) {
|
||||
synchronize_rcu();
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
@ -4861,7 +4875,7 @@ static const struct i915_debugfs_files {
|
||||
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
|
||||
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
|
||||
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
|
||||
{"i915_fbc_false_color", &i915_fbc_fc_fops},
|
||||
{"i915_fbc_false_color", &i915_fbc_false_color_fops},
|
||||
{"i915_dp_test_data", &i915_displayport_test_data_fops},
|
||||
{"i915_dp_test_type", &i915_displayport_test_type_fops},
|
||||
{"i915_dp_test_active", &i915_displayport_test_active_fops},
|
||||
|
@ -139,6 +139,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
|
||||
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
ret = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
|
||||
} else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
ret = PCH_CNP;
|
||||
DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -170,24 +173,29 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
||||
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
|
||||
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
|
||||
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
|
||||
dev_priv->pch_id = id;
|
||||
unsigned short id_ext = pch->device &
|
||||
INTEL_PCH_DEVICE_ID_MASK_EXT;
|
||||
|
||||
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_IBX;
|
||||
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
|
||||
WARN_ON(!IS_GEN5(dev_priv));
|
||||
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_CPT;
|
||||
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
|
||||
WARN_ON(!(IS_GEN6(dev_priv) ||
|
||||
IS_IVYBRIDGE(dev_priv)));
|
||||
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
|
||||
/* PantherPoint is CPT compatible */
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_CPT;
|
||||
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
|
||||
WARN_ON(!(IS_GEN6(dev_priv) ||
|
||||
IS_IVYBRIDGE(dev_priv)));
|
||||
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev_priv) &&
|
||||
@ -195,6 +203,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
||||
WARN_ON(IS_HSW_ULT(dev_priv) ||
|
||||
IS_BDW_ULT(dev_priv));
|
||||
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev_priv) &&
|
||||
@ -202,20 +211,35 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
||||
WARN_ON(!IS_HSW_ULT(dev_priv) &&
|
||||
!IS_BDW_ULT(dev_priv));
|
||||
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_KABYLAKE(dev_priv));
|
||||
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
||||
} else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id_ext;
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_KABYLAKE(dev_priv));
|
||||
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_KBP;
|
||||
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_KABYLAKE(dev_priv));
|
||||
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_CNP;
|
||||
DRM_DEBUG_KMS("Found CannonPoint PCH\n");
|
||||
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
|
||||
!IS_COFFEELAKE(dev_priv));
|
||||
} else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id_ext;
|
||||
dev_priv->pch_type = PCH_CNP;
|
||||
DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
|
||||
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
|
||||
!IS_COFFEELAKE(dev_priv));
|
||||
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
|
||||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
|
||||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
|
||||
@ -223,6 +247,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
||||
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
|
||||
pch->subsystem_device ==
|
||||
PCI_SUBDEVICE_ID_QEMU)) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type =
|
||||
intel_virt_detect_pch(dev_priv);
|
||||
} else
|
||||
@ -351,6 +376,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
case I915_PARAM_HAS_EXEC_ASYNC:
|
||||
case I915_PARAM_HAS_EXEC_FENCE:
|
||||
case I915_PARAM_HAS_EXEC_CAPTURE:
|
||||
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
|
||||
/* For the time being all of these are always true;
|
||||
* if some supported hardware does not have one of these
|
||||
* features this value needs to be provided from
|
||||
@ -358,6 +384,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
*/
|
||||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_SLICE_MASK:
|
||||
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
|
||||
if (!value)
|
||||
return -ENODEV;
|
||||
break;
|
||||
case I915_PARAM_SUBSLICE_MASK:
|
||||
value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
|
||||
if (!value)
|
||||
return -ENODEV;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unknown parameter %d\n", param->param);
|
||||
return -EINVAL;
|
||||
@ -553,6 +589,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
|
||||
intel_uc_fini_hw(dev_priv);
|
||||
i915_gem_cleanup_engines(dev_priv);
|
||||
i915_gem_context_fini(dev_priv);
|
||||
i915_gem_cleanup_userptr(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
@ -997,6 +1034,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
|
||||
|
||||
intel_uc_sanitize_options(dev_priv);
|
||||
|
||||
intel_gvt_sanitize_options(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2459,9 +2498,6 @@ static int intel_runtime_resume(struct device *kdev)
|
||||
|
||||
intel_guc_resume(dev_priv);
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
intel_init_pch_refclk(dev_priv);
|
||||
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
bxt_disable_dc9(dev_priv);
|
||||
bxt_display_core_init(dev_priv, true);
|
||||
|
@ -37,7 +37,7 @@
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/pm_qos.h>
|
||||
@ -80,8 +80,8 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20170529"
|
||||
#define DRIVER_TIMESTAMP 1496041258
|
||||
#define DRIVER_DATE "20170619"
|
||||
#define DRIVER_TIMESTAMP 1497857498
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
@ -752,7 +752,6 @@ struct intel_csr {
|
||||
func(has_aliasing_ppgtt); \
|
||||
func(has_csr); \
|
||||
func(has_ddi); \
|
||||
func(has_decoupled_mmio); \
|
||||
func(has_dp_mst); \
|
||||
func(has_fbc); \
|
||||
func(has_fpga_dbg); \
|
||||
@ -827,6 +826,8 @@ enum intel_platform {
|
||||
INTEL_BROXTON,
|
||||
INTEL_KABYLAKE,
|
||||
INTEL_GEMINILAKE,
|
||||
INTEL_COFFEELAKE,
|
||||
INTEL_CANNONLAKE,
|
||||
INTEL_MAX_PLATFORMS
|
||||
};
|
||||
|
||||
@ -1152,6 +1153,7 @@ enum intel_pch {
|
||||
PCH_LPT, /* Lynxpoint PCH */
|
||||
PCH_SPT, /* Sunrisepoint PCH */
|
||||
PCH_KBP, /* Kabypoint PCH */
|
||||
PCH_CNP, /* Cannonpoint PCH */
|
||||
PCH_NOP,
|
||||
};
|
||||
|
||||
@ -1160,11 +1162,9 @@ enum intel_sbi_destination {
|
||||
SBI_MPHY,
|
||||
};
|
||||
|
||||
#define QUIRK_PIPEA_FORCE (1<<0)
|
||||
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
|
||||
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
|
||||
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
|
||||
#define QUIRK_PIPEB_FORCE (1<<4)
|
||||
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
|
||||
|
||||
struct intel_fbdev;
|
||||
@ -1454,6 +1454,13 @@ struct i915_gem_mm {
|
||||
/** LRU list of objects with fence regs on them. */
|
||||
struct list_head fence_list;
|
||||
|
||||
/**
|
||||
* Workqueue to fault in userptr pages, flushed by the execbuf
|
||||
* when required but otherwise left to userspace to try again
|
||||
* on EAGAIN.
|
||||
*/
|
||||
struct workqueue_struct *userptr_wq;
|
||||
|
||||
u64 unordered_timeline;
|
||||
|
||||
/* the indicator for dispatch video commands on two BSD rings */
|
||||
@ -2017,9 +2024,17 @@ struct i915_oa_ops {
|
||||
void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
|
||||
|
||||
/**
|
||||
* @enable_metric_set: Applies any MUX configuration to set up the
|
||||
* Boolean and Custom (B/C) counters that are part of the counter
|
||||
* reports being sampled. May apply system constraints such as
|
||||
* @select_metric_set: The auto generated code that checks whether a
|
||||
* requested OA config is applicable to the system and if so sets up
|
||||
* the mux, oa and flex eu register config pointers according to the
|
||||
* current dev_priv->perf.oa.metrics_set.
|
||||
*/
|
||||
int (*select_metric_set)(struct drm_i915_private *dev_priv);
|
||||
|
||||
/**
|
||||
* @enable_metric_set: Selects and applies any MUX configuration to set
|
||||
* up the Boolean and Custom (B/C) counters that are part of the
|
||||
* counter reports being sampled. May apply system constraints such as
|
||||
* disabling EU clock gating as required.
|
||||
*/
|
||||
int (*enable_metric_set)(struct drm_i915_private *dev_priv);
|
||||
@ -2050,20 +2065,13 @@ struct i915_oa_ops {
|
||||
size_t *offset);
|
||||
|
||||
/**
|
||||
* @oa_buffer_check: Check for OA buffer data + update tail
|
||||
* @oa_hw_tail_read: read the OA tail pointer register
|
||||
*
|
||||
* This is either called via fops or the poll check hrtimer (atomic
|
||||
* ctx) without any locks taken.
|
||||
*
|
||||
* It's safe to read OA config state here unlocked, assuming that this
|
||||
* is only called while the stream is enabled, while the global OA
|
||||
* configuration can't be modified.
|
||||
*
|
||||
* Efficiency is more important than avoiding some false positives
|
||||
* here, which will be handled gracefully - likely resulting in an
|
||||
* %EAGAIN error for userspace.
|
||||
* In particular this enables us to share all the fiddly code for
|
||||
* handling the OA unit tail pointer race that affects multiple
|
||||
* generations.
|
||||
*/
|
||||
bool (*oa_buffer_check)(struct drm_i915_private *dev_priv);
|
||||
u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
|
||||
};
|
||||
|
||||
struct intel_cdclk_state {
|
||||
@ -2394,8 +2402,6 @@ struct drm_i915_private {
|
||||
struct mutex lock;
|
||||
struct list_head streams;
|
||||
|
||||
spinlock_t hook_lock;
|
||||
|
||||
struct {
|
||||
struct i915_perf_stream *exclusive_stream;
|
||||
|
||||
@ -2413,17 +2419,23 @@ struct drm_i915_private {
|
||||
|
||||
bool periodic;
|
||||
int period_exponent;
|
||||
int timestamp_frequency;
|
||||
|
||||
int metrics_set;
|
||||
|
||||
const struct i915_oa_reg *mux_regs;
|
||||
int mux_regs_len;
|
||||
const struct i915_oa_reg *mux_regs[6];
|
||||
int mux_regs_lens[6];
|
||||
int n_mux_configs;
|
||||
|
||||
const struct i915_oa_reg *b_counter_regs;
|
||||
int b_counter_regs_len;
|
||||
const struct i915_oa_reg *flex_regs;
|
||||
int flex_regs_len;
|
||||
|
||||
struct {
|
||||
struct i915_vma *vma;
|
||||
u8 *vaddr;
|
||||
u32 last_ctx_id;
|
||||
int format;
|
||||
int format_size;
|
||||
|
||||
@ -2493,6 +2505,15 @@ struct drm_i915_private {
|
||||
} oa_buffer;
|
||||
|
||||
u32 gen7_latched_oastatus1;
|
||||
u32 ctx_oactxctrl_offset;
|
||||
u32 ctx_flexeu0_offset;
|
||||
|
||||
/**
|
||||
* The RPT_ID/reason field for Gen8+ includes a bit
|
||||
* to determine if the CTX ID in the report is valid
|
||||
* but the specific bit differs between Gen 8 and 9
|
||||
*/
|
||||
u32 gen8_valid_ctx_bit;
|
||||
|
||||
struct i915_oa_ops ops;
|
||||
const struct i915_oa_format *oa_formats;
|
||||
@ -2768,6 +2789,8 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
|
||||
#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
|
||||
#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
|
||||
#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE)
|
||||
#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE)
|
||||
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
|
||||
@ -2803,10 +2826,18 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
|
||||
INTEL_DEVID(dev_priv) == 0x5915 || \
|
||||
INTEL_DEVID(dev_priv) == 0x591E)
|
||||
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
|
||||
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
|
||||
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
|
||||
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
|
||||
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
|
||||
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
|
||||
|
||||
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
|
||||
|
||||
@ -2845,6 +2876,12 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define IS_GLK_REVID(dev_priv, since, until) \
|
||||
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
|
||||
|
||||
#define CNL_REVID_A0 0x0
|
||||
#define CNL_REVID_B0 0x1
|
||||
|
||||
#define IS_CNL_REVID(p, since, until) \
|
||||
(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
|
||||
|
||||
/*
|
||||
* The genX designation typically refers to the render engine, so render
|
||||
* capability related checks should use IS_GEN, while display and other checks
|
||||
@ -2859,6 +2896,7 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
|
||||
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
|
||||
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
|
||||
#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
|
||||
|
||||
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
|
||||
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
|
||||
@ -2959,6 +2997,7 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
|
||||
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
|
||||
@ -2967,11 +3006,16 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
||||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
|
||||
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
|
||||
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
|
||||
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
||||
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
|
||||
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
|
||||
|
||||
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
|
||||
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
|
||||
#define HAS_PCH_CNP_LP(dev_priv) \
|
||||
((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
|
||||
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
|
||||
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
|
||||
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
|
||||
@ -2986,7 +3030,7 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
|
||||
#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
|
||||
|
||||
#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
|
||||
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
|
||||
|
||||
/* DPF == dynamic parity feature */
|
||||
#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
|
||||
@ -2996,8 +3040,6 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define GT_FREQUENCY_MULTIPLIER 50
|
||||
#define GEN9_FREQ_SCALER 3
|
||||
|
||||
#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
|
||||
|
||||
#include "i915_trace.h"
|
||||
|
||||
static inline bool intel_vtd_active(void)
|
||||
@ -3194,7 +3236,8 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
@ -3534,6 +3577,9 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
|
||||
|
||||
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx,
|
||||
uint32_t *reg_state);
|
||||
|
||||
/* i915_gem_evict.c */
|
||||
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
|
||||
@ -3544,7 +3590,7 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
|
||||
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||
struct drm_mm_node *node,
|
||||
unsigned int flags);
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm);
|
||||
|
||||
/* belongs in i915_gem_gtt.h */
|
||||
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
|
||||
|
@ -49,10 +49,10 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
|
||||
|
||||
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
|
||||
if (obj->cache_dirty)
|
||||
return false;
|
||||
|
||||
if (!i915_gem_object_is_coherent(obj))
|
||||
if (!obj->cache_coherent)
|
||||
return true;
|
||||
|
||||
return obj->pin_display;
|
||||
@ -143,9 +143,9 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct drm_i915_gem_get_aperture *args = data;
|
||||
struct i915_vma *vma;
|
||||
size_t pinned;
|
||||
u64 pinned;
|
||||
|
||||
pinned = 0;
|
||||
pinned = ggtt->base.reserved;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
|
||||
if (i915_vma_is_pinned(vma))
|
||||
@ -233,6 +233,14 @@ err_phys:
|
||||
return st;
|
||||
}
|
||||
|
||||
static void __start_cpu_write(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
if (cpu_write_needs_clflush(obj))
|
||||
obj->cache_dirty = true;
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
@ -245,11 +253,10 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
|
||||
if (needs_clflush &&
|
||||
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
|
||||
!i915_gem_object_is_coherent(obj))
|
||||
!obj->cache_coherent)
|
||||
drm_clflush_sg(pages);
|
||||
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
__start_cpu_write(obj);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -684,6 +691,12 @@ i915_gem_dumb_create(struct drm_file *file,
|
||||
args->size, &args->handle);
|
||||
}
|
||||
|
||||
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return !(obj->cache_level == I915_CACHE_NONE ||
|
||||
obj->cache_level == I915_CACHE_WT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new mm object and returns a handle to it.
|
||||
* @dev: drm device pointer
|
||||
@ -753,6 +766,11 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
|
||||
case I915_GEM_DOMAIN_CPU:
|
||||
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
|
||||
break;
|
||||
|
||||
case I915_GEM_DOMAIN_RENDER:
|
||||
if (gpu_write_needs_clflush(obj))
|
||||
obj->cache_dirty = true;
|
||||
break;
|
||||
}
|
||||
|
||||
obj->base.write_domain = 0;
|
||||
@ -838,8 +856,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (i915_gem_object_is_coherent(obj) ||
|
||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
@ -854,7 +871,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||
* optimizes for the case when the gpu will dirty the data
|
||||
* anyway again before the next pread happens.
|
||||
*/
|
||||
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
|
||||
if (!obj->cache_dirty &&
|
||||
!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
|
||||
*needs_clflush = CLFLUSH_BEFORE;
|
||||
|
||||
out:
|
||||
@ -890,8 +908,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (i915_gem_object_is_coherent(obj) ||
|
||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
@ -906,14 +923,16 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
|
||||
* This optimizes for the case when the gpu will use the data
|
||||
* right away and we therefore have to clflush anyway.
|
||||
*/
|
||||
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
|
||||
if (!obj->cache_dirty) {
|
||||
*needs_clflush |= CLFLUSH_AFTER;
|
||||
|
||||
/* Same trick applies to invalidate partially written cachelines read
|
||||
* before writing.
|
||||
*/
|
||||
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
|
||||
*needs_clflush |= CLFLUSH_BEFORE;
|
||||
/*
|
||||
* Same trick applies to invalidate partially written
|
||||
* cachelines read before writing.
|
||||
*/
|
||||
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
|
||||
*needs_clflush |= CLFLUSH_BEFORE;
|
||||
}
|
||||
|
||||
out:
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
@ -2337,8 +2356,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
struct page *page;
|
||||
unsigned long last_pfn = 0; /* suppress gcc warning */
|
||||
unsigned int max_segment;
|
||||
gfp_t noreclaim;
|
||||
int ret;
|
||||
gfp_t gfp;
|
||||
|
||||
/* Assert that the object is not currently in any GPU domain. As it
|
||||
* wasn't in the GTT, there shouldn't be any way it could have been in
|
||||
@ -2367,22 +2386,30 @@ rebuild_st:
|
||||
* Fail silently without starting the shrinker
|
||||
*/
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
|
||||
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
|
||||
sg = st->sgl;
|
||||
st->nents = 0;
|
||||
for (i = 0; i < page_count; i++) {
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
if (unlikely(IS_ERR(page))) {
|
||||
i915_gem_shrink(dev_priv,
|
||||
page_count,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_PURGEABLE);
|
||||
const unsigned int shrink[] = {
|
||||
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
|
||||
0,
|
||||
}, *s = shrink;
|
||||
gfp_t gfp = noreclaim;
|
||||
|
||||
do {
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
}
|
||||
if (unlikely(IS_ERR(page))) {
|
||||
gfp_t reclaim;
|
||||
if (likely(!IS_ERR(page)))
|
||||
break;
|
||||
|
||||
if (!*s) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err_sg;
|
||||
}
|
||||
|
||||
i915_gem_shrink(dev_priv, 2 * page_count, *s++);
|
||||
cond_resched();
|
||||
|
||||
/* We've tried hard to allocate the memory by reaping
|
||||
* our own buffer, now let the real VM do its job and
|
||||
@ -2392,15 +2419,26 @@ rebuild_st:
|
||||
* defer the oom here by reporting the ENOMEM back
|
||||
* to userspace.
|
||||
*/
|
||||
reclaim = mapping_gfp_mask(mapping);
|
||||
reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
|
||||
if (!*s) {
|
||||
/* reclaim and warn, but no oom */
|
||||
gfp = mapping_gfp_mask(mapping);
|
||||
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err_sg;
|
||||
/* Our bo are always dirty and so we require
|
||||
* kswapd to reclaim our pages (direct reclaim
|
||||
* does not effectively begin pageout of our
|
||||
* buffers on its own). However, direct reclaim
|
||||
* only waits for kswapd when under allocation
|
||||
* congestion. So as a result __GFP_RECLAIM is
|
||||
* unreliable and fails to actually reclaim our
|
||||
* dirty pages -- unless you try over and over
|
||||
* again with !__GFP_NORETRY. However, we still
|
||||
* want to fail this allocation rather than
|
||||
* trigger the out-of-memory killer and for
|
||||
* this we want the future __GFP_MAYFAIL.
|
||||
*/
|
||||
}
|
||||
}
|
||||
} while (1);
|
||||
|
||||
if (!i ||
|
||||
sg->length >= max_segment ||
|
||||
page_to_pfn(page) != last_pfn + 1) {
|
||||
@ -3223,6 +3261,10 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
||||
if (vma->vm->file == fpriv)
|
||||
i915_vma_close(vma);
|
||||
|
||||
vma = obj->vma_hashed;
|
||||
if (vma && vma->ctx->file_priv == fpriv)
|
||||
i915_vma_unlink_ctx(vma);
|
||||
|
||||
if (i915_gem_object_is_active(obj) &&
|
||||
!i915_gem_object_has_active_reference(obj)) {
|
||||
i915_gem_object_set_active_reference(obj);
|
||||
@ -3376,10 +3418,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
|
||||
|
||||
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
|
||||
return;
|
||||
|
||||
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
|
||||
/*
|
||||
* We manually flush the CPU domain so that we can override and
|
||||
* force the flush for the display, and perform it asyncrhonously.
|
||||
*/
|
||||
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
|
||||
if (obj->cache_dirty)
|
||||
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
|
||||
obj->base.write_domain = 0;
|
||||
}
|
||||
|
||||
@ -3638,13 +3683,11 @@ restart:
|
||||
}
|
||||
}
|
||||
|
||||
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
|
||||
i915_gem_object_is_coherent(obj))
|
||||
obj->cache_dirty = true;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
vma->node.color = cache_level;
|
||||
obj->cache_level = cache_level;
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
obj->cache_dirty = true; /* Always invalidate stale cachelines */
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3866,9 +3909,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
|
||||
return 0;
|
||||
|
||||
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
|
||||
|
||||
/* Flush the CPU cache if it's still invalid. */
|
||||
@ -3880,15 +3920,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
||||
/* It should now be out of any other write domains, and we can update
|
||||
* the domain values for our changes.
|
||||
*/
|
||||
GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
|
||||
GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
|
||||
|
||||
/* If we're writing through the CPU, then the GPU read domains will
|
||||
* need to be invalidated at next use.
|
||||
*/
|
||||
if (write) {
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
}
|
||||
if (write)
|
||||
__start_cpu_write(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -4220,7 +4258,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
|
||||
INIT_LIST_HEAD(&obj->global_link);
|
||||
INIT_LIST_HEAD(&obj->userfault_link);
|
||||
INIT_LIST_HEAD(&obj->obj_exec_link);
|
||||
INIT_LIST_HEAD(&obj->vma_list);
|
||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||
|
||||
@ -4285,6 +4322,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
||||
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
mapping_set_gfp_mask(mapping, mask);
|
||||
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
|
||||
|
||||
i915_gem_object_init(obj, &i915_gem_object_ops);
|
||||
|
||||
@ -4308,6 +4346,9 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
||||
} else
|
||||
obj->cache_level = I915_CACHE_NONE;
|
||||
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
obj->cache_dirty = !obj->cache_coherent;
|
||||
|
||||
trace_i915_gem_object_create(obj);
|
||||
|
||||
return obj;
|
||||
@ -4356,7 +4397,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
GEM_BUG_ON(i915_gem_object_is_active(obj));
|
||||
list_for_each_entry_safe(vma, vn,
|
||||
&obj->vma_list, obj_link) {
|
||||
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
|
||||
GEM_BUG_ON(i915_vma_is_active(vma));
|
||||
vma->flags &= ~I915_VMA_PIN_MASK;
|
||||
i915_vma_close(vma);
|
||||
@ -4763,7 +4803,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
i915_gem_init_userptr(dev_priv);
|
||||
ret = i915_gem_init_userptr(dev_priv);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = i915_gem_init_ggtt(dev_priv);
|
||||
if (ret)
|
||||
@ -4974,10 +5016,8 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
for (p = phases; *p; p++) {
|
||||
list_for_each_entry(obj, *p, global_link) {
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
}
|
||||
list_for_each_entry(obj, *p, global_link)
|
||||
__start_cpu_write(obj);
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
|
@ -114,12 +114,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
|
||||
list_for_each_entry(obj, list, batch_pool_link) {
|
||||
/* The batches are strictly LRU ordered */
|
||||
if (i915_gem_object_is_active(obj)) {
|
||||
if (!reservation_object_test_signaled_rcu(obj->resv,
|
||||
true))
|
||||
struct reservation_object *resv = obj->resv;
|
||||
|
||||
if (!reservation_object_test_signaled_rcu(resv, true))
|
||||
break;
|
||||
|
||||
i915_gem_retire_requests(pool->engine->i915);
|
||||
GEM_BUG_ON(i915_gem_object_is_active(obj));
|
||||
|
||||
/*
|
||||
* The object is now idle, clear the array of shared
|
||||
* fences before we add a new request. Although, we
|
||||
* remain on the same engine, we may be on a different
|
||||
* timeline and so may continually grow the array,
|
||||
* trapping a reference to all the old fences, rather
|
||||
* than replace the existing fence.
|
||||
*/
|
||||
if (rcu_access_pointer(resv->fence)) {
|
||||
reservation_object_lock(resv, NULL);
|
||||
reservation_object_add_excl_fence(resv, NULL);
|
||||
reservation_object_unlock(resv);
|
||||
}
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
|
||||
|
@ -71,8 +71,6 @@ static const struct dma_fence_ops i915_clflush_ops = {
|
||||
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_clflush_sg(obj->mm.pages);
|
||||
obj->cache_dirty = false;
|
||||
|
||||
intel_fb_obj_flush(obj, ORIGIN_CPU);
|
||||
}
|
||||
|
||||
@ -81,9 +79,6 @@ static void i915_clflush_work(struct work_struct *work)
|
||||
struct clflush *clflush = container_of(work, typeof(*clflush), work);
|
||||
struct drm_i915_gem_object *obj = clflush->obj;
|
||||
|
||||
if (!obj->cache_dirty)
|
||||
goto out;
|
||||
|
||||
if (i915_gem_object_pin_pages(obj)) {
|
||||
DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
|
||||
goto out;
|
||||
@ -131,10 +126,10 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||
* anything not backed by physical memory we consider to be always
|
||||
* coherent and not need clflushing.
|
||||
*/
|
||||
if (!i915_gem_object_has_struct_page(obj))
|
||||
if (!i915_gem_object_has_struct_page(obj)) {
|
||||
obj->cache_dirty = false;
|
||||
return;
|
||||
|
||||
obj->cache_dirty = true;
|
||||
}
|
||||
|
||||
/* If the GPU is snooping the contents of the CPU cache,
|
||||
* we do not need to manually clear the CPU cache lines. However,
|
||||
@ -144,7 +139,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||
* snooping behaviour occurs naturally as the result of our domain
|
||||
* tracking.
|
||||
*/
|
||||
if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
|
||||
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
|
||||
return;
|
||||
|
||||
trace_i915_gem_object_clflush(obj);
|
||||
@ -153,6 +148,8 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||
if (!(flags & I915_CLFLUSH_SYNC))
|
||||
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
|
||||
if (clflush) {
|
||||
GEM_BUG_ON(!obj->cache_dirty);
|
||||
|
||||
dma_fence_init(&clflush->dma,
|
||||
&i915_clflush_ops,
|
||||
&clflush_lock,
|
||||
@ -180,4 +177,6 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||
} else {
|
||||
GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
|
||||
}
|
||||
|
||||
obj->cache_dirty = false;
|
||||
}
|
||||
|
@ -85,6 +85,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/log2.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
@ -92,6 +93,71 @@
|
||||
|
||||
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
|
||||
|
||||
/* Initial size (as log2) to preallocate the handle->object hashtable */
|
||||
#define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */
|
||||
|
||||
static void resize_vma_ht(struct work_struct *work)
|
||||
{
|
||||
struct i915_gem_context_vma_lut *lut =
|
||||
container_of(work, typeof(*lut), resize);
|
||||
unsigned int bits, new_bits, size, i;
|
||||
struct hlist_head *new_ht;
|
||||
|
||||
GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS));
|
||||
|
||||
bits = 1 + ilog2(4*lut->ht_count/3 + 1);
|
||||
new_bits = min_t(unsigned int,
|
||||
max(bits, VMA_HT_BITS),
|
||||
sizeof(unsigned int) * BITS_PER_BYTE - 1);
|
||||
if (new_bits == lut->ht_bits)
|
||||
goto out;
|
||||
|
||||
new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!new_ht)
|
||||
new_ht = vzalloc(sizeof(*new_ht)<<new_bits);
|
||||
if (!new_ht)
|
||||
/* Pretend resize succeeded and stop calling us for a bit! */
|
||||
goto out;
|
||||
|
||||
size = BIT(lut->ht_bits);
|
||||
for (i = 0; i < size; i++) {
|
||||
struct i915_vma *vma;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node)
|
||||
hlist_add_head(&vma->ctx_node,
|
||||
&new_ht[hash_32(vma->ctx_handle,
|
||||
new_bits)]);
|
||||
}
|
||||
kvfree(lut->ht);
|
||||
lut->ht = new_ht;
|
||||
lut->ht_bits = new_bits;
|
||||
out:
|
||||
smp_store_release(&lut->ht_size, BIT(bits));
|
||||
GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
|
||||
}
|
||||
|
||||
static void vma_lut_free(struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_gem_context_vma_lut *lut = &ctx->vma_lut;
|
||||
unsigned int i, size;
|
||||
|
||||
if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)
|
||||
cancel_work_sync(&lut->resize);
|
||||
|
||||
size = BIT(lut->ht_bits);
|
||||
for (i = 0; i < size; i++) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
hlist_for_each_entry(vma, &lut->ht[i], ctx_node) {
|
||||
vma->obj->vma_hashed = NULL;
|
||||
vma->ctx = NULL;
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
}
|
||||
kvfree(lut->ht);
|
||||
}
|
||||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
@ -101,6 +167,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
trace_i915_context_free(ctx);
|
||||
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
|
||||
|
||||
vma_lut_free(ctx);
|
||||
i915_ppgtt_put(ctx->ppgtt);
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
@ -118,6 +185,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
|
||||
kfree(ctx->name);
|
||||
put_pid(ctx->pid);
|
||||
|
||||
list_del(&ctx->link);
|
||||
|
||||
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
|
||||
@ -201,13 +269,24 @@ __create_hw_context(struct drm_i915_private *dev_priv,
|
||||
ctx->i915 = dev_priv;
|
||||
ctx->priority = I915_PRIORITY_NORMAL;
|
||||
|
||||
ctx->vma_lut.ht_bits = VMA_HT_BITS;
|
||||
ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
|
||||
BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS);
|
||||
ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
|
||||
sizeof(*ctx->vma_lut.ht),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->vma_lut.ht)
|
||||
goto err_out;
|
||||
|
||||
INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
|
||||
|
||||
/* Default context will never have a file_priv */
|
||||
ret = DEFAULT_CONTEXT_HANDLE;
|
||||
if (file_priv) {
|
||||
ret = idr_alloc(&file_priv->context_idr, ctx,
|
||||
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err_out;
|
||||
goto err_lut;
|
||||
}
|
||||
ctx->user_handle = ret;
|
||||
|
||||
@ -248,6 +327,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
|
||||
err_pid:
|
||||
put_pid(ctx->pid);
|
||||
idr_remove(&file_priv->context_idr, ctx->user_handle);
|
||||
err_lut:
|
||||
kvfree(ctx->vma_lut.ht);
|
||||
err_out:
|
||||
context_close(ctx);
|
||||
return ERR_PTR(ret);
|
||||
@ -1034,9 +1115,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
|
||||
if (args->flags || args->pad)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -143,6 +143,32 @@ struct i915_gem_context {
|
||||
/** ggtt_offset_bias: placement restriction for context objects */
|
||||
u32 ggtt_offset_bias;
|
||||
|
||||
struct i915_gem_context_vma_lut {
|
||||
/** ht_size: last request size to allocate the hashtable for. */
|
||||
unsigned int ht_size;
|
||||
#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
|
||||
/** ht_bits: real log2(size) of hashtable. */
|
||||
unsigned int ht_bits;
|
||||
/** ht_count: current number of entries inside the hashtable */
|
||||
unsigned int ht_count;
|
||||
|
||||
/** ht: the array of buckets comprising the simple hashtable */
|
||||
struct hlist_head *ht;
|
||||
|
||||
/**
|
||||
* resize: After an execbuf completes, we check the load factor
|
||||
* of the hashtable. If the hashtable is too full, or too empty,
|
||||
* we schedule a task to resize the hashtable. During the
|
||||
* resize, the entries are moved between different buckets and
|
||||
* so we cannot simultaneously read the hashtable as it is
|
||||
* being resized (unlike rhashtable). Therefore we treat the
|
||||
* active work as a strong barrier, pausing a subsequent
|
||||
* execbuf to wait for the resize worker to complete, if
|
||||
* required.
|
||||
*/
|
||||
struct work_struct resize;
|
||||
} vma_lut;
|
||||
|
||||
/** engine: per-engine logical HW state */
|
||||
struct intel_context {
|
||||
struct i915_vma *state;
|
||||
|
@ -50,6 +50,29 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ggtt_flush(struct drm_i915_private *i915)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Not everything in the GGTT is tracked via vma (otherwise we
|
||||
* could evict as required with minimal stalling) so we are forced
|
||||
* to idle the GPU and explicitly retire outstanding requests in
|
||||
* the hopes that we can then remove contexts and the like only
|
||||
* bound by their active reference.
|
||||
*/
|
||||
err = i915_gem_switch_to_kernel_context(i915);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_gem_wait_for_idle(i915,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
mark_free(struct drm_mm_scan *scan,
|
||||
struct i915_vma *vma,
|
||||
@ -59,13 +82,10 @@ mark_free(struct drm_mm_scan *scan,
|
||||
if (i915_vma_is_pinned(vma))
|
||||
return false;
|
||||
|
||||
if (WARN_ON(!list_empty(&vma->exec_list)))
|
||||
return false;
|
||||
|
||||
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
|
||||
return false;
|
||||
|
||||
list_add(&vma->exec_list, unwind);
|
||||
list_add(&vma->evict_link, unwind);
|
||||
return drm_mm_scan_add_block(scan, &vma->node);
|
||||
}
|
||||
|
||||
@ -157,11 +177,9 @@ search_again:
|
||||
} while (*++phase);
|
||||
|
||||
/* Nothing found, clean up and bail out! */
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
|
||||
ret = drm_mm_scan_remove_block(&scan, &vma->node);
|
||||
BUG_ON(ret);
|
||||
|
||||
INIT_LIST_HEAD(&vma->exec_list);
|
||||
}
|
||||
|
||||
/* Can we unpin some objects such as idle hw contents,
|
||||
@ -180,19 +198,7 @@ search_again:
|
||||
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
||||
}
|
||||
|
||||
/* Not everything in the GGTT is tracked via vma (otherwise we
|
||||
* could evict as required with minimal stalling) so we are forced
|
||||
* to idle the GPU and explicitly retire outstanding requests in
|
||||
* the hopes that we can then remove contexts and the like only
|
||||
* bound by their active reference.
|
||||
*/
|
||||
ret = i915_gem_switch_to_kernel_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED);
|
||||
ret = ggtt_flush(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -205,21 +211,16 @@ found:
|
||||
* calling unbind (which may remove the active reference
|
||||
* of any of our objects, thus corrupting the list).
|
||||
*/
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
|
||||
if (drm_mm_scan_remove_block(&scan, &vma->node))
|
||||
__i915_vma_pin(vma);
|
||||
else
|
||||
list_del_init(&vma->exec_list);
|
||||
list_del(&vma->evict_link);
|
||||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
ret = 0;
|
||||
while (!list_empty(&eviction_list)) {
|
||||
vma = list_first_entry(&eviction_list,
|
||||
struct i915_vma,
|
||||
exec_list);
|
||||
|
||||
list_del_init(&vma->exec_list);
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret == 0)
|
||||
ret = i915_vma_unbind(vma);
|
||||
@ -315,7 +316,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||
}
|
||||
|
||||
/* Overlap of objects in the same batch? */
|
||||
if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
|
||||
if (i915_vma_is_pinned(vma)) {
|
||||
ret = -ENOSPC;
|
||||
if (vma->exec_entry &&
|
||||
vma->exec_entry->flags & EXEC_OBJECT_PINNED)
|
||||
@ -332,11 +333,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||
* reference) another in our eviction list.
|
||||
*/
|
||||
__i915_vma_pin(vma);
|
||||
list_add(&vma->exec_list, &eviction_list);
|
||||
list_add(&vma->evict_link, &eviction_list);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
|
||||
list_del_init(&vma->exec_list);
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret == 0)
|
||||
ret = i915_vma_unbind(vma);
|
||||
@ -348,10 +348,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||
/**
|
||||
* i915_gem_evict_vm - Evict all idle vmas from a vm
|
||||
* @vm: Address space to cleanse
|
||||
* @do_idle: Boolean directing whether to idle first.
|
||||
*
|
||||
* This function evicts all idles vmas from a vm. If all unpinned vmas should be
|
||||
* evicted the @do_idle needs to be set to true.
|
||||
* This function evicts all vmas from a vm.
|
||||
*
|
||||
* This is used by the execbuf code as a last-ditch effort to defragment the
|
||||
* address space.
|
||||
@ -359,37 +357,50 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||
* To clarify: This is for freeing up virtual address space, not for freeing
|
||||
* memory in e.g. the shrinker.
|
||||
*/
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm)
|
||||
{
|
||||
struct list_head *phases[] = {
|
||||
&vm->inactive_list,
|
||||
&vm->active_list,
|
||||
NULL
|
||||
}, **phase;
|
||||
struct list_head eviction_list;
|
||||
struct i915_vma *vma, *next;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
||||
trace_i915_gem_evict_vm(vm);
|
||||
|
||||
if (do_idle) {
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
ret = i915_gem_switch_to_kernel_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED);
|
||||
/* Switch back to the default context in order to unpin
|
||||
* the existing context objects. However, such objects only
|
||||
* pin themselves inside the global GTT and performing the
|
||||
* switch otherwise is ineffective.
|
||||
*/
|
||||
if (i915_is_ggtt(vm)) {
|
||||
ret = ggtt_flush(vm->i915);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
WARN_ON(!list_empty(&vm->active_list));
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
|
||||
if (!i915_vma_is_pinned(vma))
|
||||
WARN_ON(i915_vma_unbind(vma));
|
||||
INIT_LIST_HEAD(&eviction_list);
|
||||
phase = phases;
|
||||
do {
|
||||
list_for_each_entry(vma, *phase, vm_link) {
|
||||
if (i915_vma_is_pinned(vma))
|
||||
continue;
|
||||
|
||||
return 0;
|
||||
__i915_vma_pin(vma);
|
||||
list_add(&vma->evict_link, &eviction_list);
|
||||
}
|
||||
} while (*++phase);
|
||||
|
||||
ret = 0;
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret == 0)
|
||||
ret = i915_vma_unbind(vma);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1884,7 +1884,7 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
|
||||
* called on driver load and after a GPU reset, so you can place
|
||||
* workarounds here even if they get overwritten by GPU reset.
|
||||
*/
|
||||
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
|
||||
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
@ -3095,13 +3095,17 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
|
||||
|
||||
void i915_ggtt_enable_guc(struct drm_i915_private *i915)
|
||||
{
|
||||
GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
|
||||
|
||||
i915->ggtt.invalidate = guc_ggtt_invalidate;
|
||||
}
|
||||
|
||||
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
|
||||
{
|
||||
if (i915->ggtt.invalidate == guc_ggtt_invalidate)
|
||||
i915->ggtt.invalidate = gen6_ggtt_invalidate;
|
||||
/* We should only be called after i915_ggtt_enable_guc() */
|
||||
GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
|
||||
|
||||
i915->ggtt.invalidate = gen6_ggtt_invalidate;
|
||||
}
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
|
||||
@ -3398,6 +3402,9 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
|
||||
if (err != -ENOSPC)
|
||||
return err;
|
||||
|
||||
if (flags & PIN_NOEVICT)
|
||||
return -ENOSPC;
|
||||
|
||||
err = i915_gem_evict_for_node(vm, node, flags);
|
||||
if (err == 0)
|
||||
err = drm_mm_reserve_node(&vm->mm, node);
|
||||
@ -3512,6 +3519,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
|
||||
if (err != -ENOSPC)
|
||||
return err;
|
||||
|
||||
if (flags & PIN_NOEVICT)
|
||||
return -ENOSPC;
|
||||
|
||||
/* No free space, pick a slot at random.
|
||||
*
|
||||
* There is a pathological case here using a GTT shared between
|
||||
|
@ -255,6 +255,7 @@ struct i915_address_space {
|
||||
struct drm_i915_file_private *file;
|
||||
struct list_head global_link;
|
||||
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
u64 reserved; /* size addr space reserved */
|
||||
|
||||
bool closed;
|
||||
|
||||
@ -588,6 +589,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
|
||||
#define PIN_MAPPABLE BIT(1)
|
||||
#define PIN_ZONE_4G BIT(2)
|
||||
#define PIN_NONFAULT BIT(3)
|
||||
#define PIN_NOEVICT BIT(4)
|
||||
|
||||
#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
|
||||
#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
|
||||
|
@ -188,9 +188,11 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
|
||||
drm_gem_private_object_init(&i915->drm, &obj->base, size);
|
||||
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
|
||||
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
obj->cache_dirty = !obj->cache_coherent;
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
@ -68,9 +68,25 @@ struct drm_i915_gem_object {
|
||||
|
||||
const struct drm_i915_gem_object_ops *ops;
|
||||
|
||||
/** List of VMAs backed by this object */
|
||||
/**
|
||||
* @vma_list: List of VMAs backed by this object
|
||||
*
|
||||
* The VMA on this list are ordered by type, all GGTT vma are placed
|
||||
* at the head and all ppGTT vma are placed at the tail. The different
|
||||
* types of GGTT vma are unordered between themselves, use the
|
||||
* @vma_tree (which has a defined order between all VMA) to find an
|
||||
* exact match.
|
||||
*/
|
||||
struct list_head vma_list;
|
||||
/**
|
||||
* @vma_tree: Ordered tree of VMAs backed by this object
|
||||
*
|
||||
* All VMA created for this object are placed in the @vma_tree for
|
||||
* fast retrieval via a binary search in i915_vma_instance().
|
||||
* They are also added to @vma_list for easy iteration.
|
||||
*/
|
||||
struct rb_root vma_tree;
|
||||
struct i915_vma *vma_hashed;
|
||||
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
@ -85,9 +101,6 @@ struct drm_i915_gem_object {
|
||||
*/
|
||||
struct list_head userfault_link;
|
||||
|
||||
/** Used in execbuf to temporarily hold a ref */
|
||||
struct list_head obj_exec_link;
|
||||
|
||||
struct list_head batch_pool_link;
|
||||
I915_SELFTEST_DECLARE(struct list_head st_link);
|
||||
|
||||
@ -106,6 +119,7 @@ struct drm_i915_gem_object {
|
||||
unsigned long gt_ro:1;
|
||||
unsigned int cache_level:3;
|
||||
unsigned int cache_dirty:1;
|
||||
unsigned int cache_coherent:1;
|
||||
|
||||
atomic_t frontbuffer_bits;
|
||||
unsigned int frontbuffer_ggtt_origin; /* write once */
|
||||
|
@ -62,7 +62,7 @@ static bool i915_fence_enable_signaling(struct dma_fence *fence)
|
||||
return false;
|
||||
|
||||
intel_engine_enable_signaling(to_request(fence), true);
|
||||
return true;
|
||||
return !i915_fence_signaled(fence);
|
||||
}
|
||||
|
||||
static signed long i915_fence_wait(struct dma_fence *fence,
|
||||
@ -683,7 +683,6 @@ static int
|
||||
i915_gem_request_await_request(struct drm_i915_gem_request *to,
|
||||
struct drm_i915_gem_request *from)
|
||||
{
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(to == from);
|
||||
@ -707,19 +706,15 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
seqno = i915_gem_request_global_seqno(from);
|
||||
if (!seqno)
|
||||
goto await_dma_fence;
|
||||
if (to->engine->semaphore.sync_to) {
|
||||
u32 seqno;
|
||||
|
||||
if (!to->engine->semaphore.sync_to) {
|
||||
if (!__i915_gem_request_started(from, seqno))
|
||||
goto await_dma_fence;
|
||||
|
||||
if (!__i915_spin_request(from, seqno, TASK_INTERRUPTIBLE, 2))
|
||||
goto await_dma_fence;
|
||||
} else {
|
||||
GEM_BUG_ON(!from->engine->semaphore.signal);
|
||||
|
||||
seqno = i915_gem_request_global_seqno(from);
|
||||
if (!seqno)
|
||||
goto await_dma_fence;
|
||||
|
||||
if (seqno <= to->timeline->global_sync[from->engine->id])
|
||||
return 0;
|
||||
|
||||
@ -729,10 +724,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
|
||||
return ret;
|
||||
|
||||
to->timeline->global_sync[from->engine->id] = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
await_dma_fence:
|
||||
ret = i915_sw_fence_await_dma_fence(&to->submit,
|
||||
&from->fence, 0,
|
||||
|
@ -38,16 +38,21 @@
|
||||
static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
|
||||
{
|
||||
switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
|
||||
case MUTEX_TRYLOCK_FAILED:
|
||||
return false;
|
||||
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
*unlock = true;
|
||||
return true;
|
||||
|
||||
case MUTEX_TRYLOCK_RECURSIVE:
|
||||
*unlock = false;
|
||||
return true;
|
||||
|
||||
case MUTEX_TRYLOCK_FAILED:
|
||||
do {
|
||||
cpu_relax();
|
||||
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
*unlock = true;
|
||||
return true;
|
||||
}
|
||||
} while (!need_resched());
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
BUG();
|
||||
@ -332,6 +337,15 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
sc->nr_to_scan - freed,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
if (freed < sc->nr_to_scan && current_is_kswapd()) {
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
freed += i915_gem_shrink(dev_priv,
|
||||
sc->nr_to_scan - freed,
|
||||
I915_SHRINK_ACTIVE |
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
shrinker_unlock(dev_priv, unlock);
|
||||
|
||||
|
@ -590,6 +590,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
|
||||
obj->stolen = stolen;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
|
||||
obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
obj->cache_coherent = true; /* assumptions! more like cache_oblivious */
|
||||
|
||||
if (i915_gem_object_pin_pages(obj))
|
||||
goto cleanup;
|
||||
|
@ -378,7 +378,7 @@ __i915_mm_struct_free(struct kref *kref)
|
||||
mutex_unlock(&mm->i915->mm_lock);
|
||||
|
||||
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
|
||||
schedule_work(&mm->work);
|
||||
queue_work(mm->i915->mm.userptr_wq, &mm->work);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -598,7 +598,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
|
||||
get_task_struct(work->task);
|
||||
|
||||
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
|
||||
schedule_work(&work->work);
|
||||
queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
|
||||
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
@ -802,9 +802,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
||||
|
||||
drm_gem_private_object_init(dev, &obj->base, args->user_size);
|
||||
i915_gem_object_init(obj, &i915_gem_userptr_ops);
|
||||
obj->cache_level = I915_CACHE_LLC;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->cache_level = I915_CACHE_LLC;
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
obj->cache_dirty = !obj->cache_coherent;
|
||||
|
||||
obj->userptr.ptr = args->user_ptr;
|
||||
obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
|
||||
@ -828,8 +830,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
|
||||
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mutex_init(&dev_priv->mm_lock);
|
||||
hash_init(dev_priv->mm_structs);
|
||||
|
||||
dev_priv->mm.userptr_wq =
|
||||
alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
|
||||
if (!dev_priv->mm.userptr_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
destroy_workqueue(dev_priv->mm.userptr_wq);
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
|
||||
end += offset;
|
||||
}
|
||||
|
||||
id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
|
||||
id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
|
||||
if (id == end)
|
||||
return -ENOSPC;
|
||||
|
||||
|
@ -2548,7 +2548,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
I915_WRITE(SDEIIR, iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
|
||||
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
|
||||
HAS_PCH_CNP(dev_priv))
|
||||
spt_irq_handler(dev_priv, iir);
|
||||
else
|
||||
cpt_irq_handler(dev_priv, iir);
|
||||
@ -4289,7 +4290,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
dev->driver->disable_vblank = gen8_disable_vblank;
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
|
||||
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
|
||||
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
|
||||
HAS_PCH_CNP(dev_priv))
|
||||
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
|
||||
else
|
||||
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
|
||||
|
5376
drivers/gpu/drm/i915/i915_oa_bdw.c
Normal file
5376
drivers/gpu/drm/i915/i915_oa_bdw.c
Normal file
File diff suppressed because it is too large
Load Diff
40
drivers/gpu/drm/i915/i915_oa_bdw.h
Normal file
40
drivers/gpu/drm/i915/i915_oa_bdw.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Autogenerated file by GPU Top : https://github.com/rib/gputop
|
||||
* DO NOT EDIT manually!
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_OA_BDW_H__
|
||||
#define __I915_OA_BDW_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_bdw;
|
||||
|
||||
extern int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
2690
drivers/gpu/drm/i915/i915_oa_bxt.c
Normal file
2690
drivers/gpu/drm/i915/i915_oa_bxt.c
Normal file
File diff suppressed because it is too large
Load Diff
40
drivers/gpu/drm/i915/i915_oa_bxt.h
Normal file
40
drivers/gpu/drm/i915/i915_oa_bxt.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Autogenerated file by GPU Top : https://github.com/rib/gputop
|
||||
* DO NOT EDIT manually!
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_OA_BXT_H__
|
||||
#define __I915_OA_BXT_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_bxt;
|
||||
|
||||
extern int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
2873
drivers/gpu/drm/i915/i915_oa_chv.c
Normal file
2873
drivers/gpu/drm/i915/i915_oa_chv.c
Normal file
File diff suppressed because it is too large
Load Diff
40
drivers/gpu/drm/i915/i915_oa_chv.h
Normal file
40
drivers/gpu/drm/i915/i915_oa_chv.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Autogenerated file by GPU Top : https://github.com/rib/gputop
|
||||
* DO NOT EDIT manually!
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_OA_CHV_H__
|
||||
#define __I915_OA_CHV_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_chv;
|
||||
|
||||
extern int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
2602
drivers/gpu/drm/i915/i915_oa_glk.c
Normal file
2602
drivers/gpu/drm/i915/i915_oa_glk.c
Normal file
File diff suppressed because it is too large
Load Diff
40
drivers/gpu/drm/i915/i915_oa_glk.h
Normal file
40
drivers/gpu/drm/i915/i915_oa_glk.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Autogenerated file by GPU Top : https://github.com/rib/gputop
|
||||
* DO NOT EDIT manually!
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_OA_GLK_H__
|
||||
#define __I915_OA_GLK_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_glk;
|
||||
|
||||
extern int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user