mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
Merge branch 'for-linus' into for-next
Merging the HD-audio fixes back to base devel branch for further working on it.
This commit is contained in:
commit
4aa01c408b
27
Documentation/CodeOfConflict
Normal file
27
Documentation/CodeOfConflict
Normal file
@ -0,0 +1,27 @@
|
||||
Code of Conflict
|
||||
----------------
|
||||
|
||||
The Linux kernel development effort is a very personal process compared
|
||||
to "traditional" ways of developing software. Your code and ideas
|
||||
behind it will be carefully reviewed, often resulting in critique and
|
||||
criticism. The review will almost always require improvements to the
|
||||
code before it can be included in the kernel. Know that this happens
|
||||
because everyone involved wants to see the best possible solution for
|
||||
the overall success of Linux. This development process has been proven
|
||||
to create the most robust operating system kernel ever, and we do not
|
||||
want to do anything to cause the quality of submission and eventual
|
||||
result to ever decrease.
|
||||
|
||||
If however, anyone feels personally abused, threatened, or otherwise
|
||||
uncomfortable due to this process, that is not acceptable. If so,
|
||||
please contact the Linux Foundation's Technical Advisory Board at
|
||||
<tab@lists.linux-foundation.org>, or the individual members, and they
|
||||
will work to resolve the issue to the best of their ability. For more
|
||||
information on who is on the Technical Advisory Board and what their
|
||||
role is, please see:
|
||||
http://www.linuxfoundation.org/programs/advisory-councils/tab
|
||||
|
||||
As a reviewer of code, please strive to keep things civil and focused on
|
||||
the technical issues involved. We are all humans, and frustrations can
|
||||
be high on both sides of the process. Try to keep in mind the immortal
|
||||
words of Bill and Ted, "Be excellent to each other."
|
@ -7,6 +7,7 @@ Required properties:
|
||||
- "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC
|
||||
- reg : Should contain I2C/HS-I2C registers location and length
|
||||
- interrupts : Should contain I2C/HS-I2C interrupt
|
||||
- clocks : Should contain the I2C/HS-I2C clock specifier
|
||||
|
||||
Optional properties:
|
||||
- clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz.
|
||||
|
@ -27,6 +27,8 @@ property is used.
|
||||
- amd,serdes-cdr-rate: CDR rate speed selection
|
||||
- amd,serdes-pq-skew: PQ (data sampling) skew
|
||||
- amd,serdes-tx-amp: TX amplitude boost
|
||||
- amd,serdes-dfe-tap-config: DFE taps available to run
|
||||
- amd,serdes-dfe-tap-enable: DFE taps to enable
|
||||
|
||||
Example:
|
||||
xgbe_phy@e1240800 {
|
||||
@ -41,4 +43,6 @@ Example:
|
||||
amd,serdes-cdr-rate = <2>, <2>, <7>;
|
||||
amd,serdes-pq-skew = <10>, <10>, <30>;
|
||||
amd,serdes-tx-amp = <15>, <15>, <10>;
|
||||
amd,serdes-dfe-tap-config = <3>, <3>, <1>;
|
||||
amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
|
||||
};
|
||||
|
@ -21,6 +21,18 @@ Optional properties:
|
||||
- reg-io-width : the size (in bytes) of the IO accesses that should be
|
||||
performed on the device. If this property is not present then single byte
|
||||
accesses are used.
|
||||
- dcd-override : Override the DCD modem status signal. This signal will always
|
||||
be reported as active instead of being obtained from the modem status
|
||||
register. Define this if your serial port does not use this pin.
|
||||
- dsr-override : Override the DTS modem status signal. This signal will always
|
||||
be reported as active instead of being obtained from the modem status
|
||||
register. Define this if your serial port does not use this pin.
|
||||
- cts-override : Override the CTS modem status signal. This signal will always
|
||||
be reported as active instead of being obtained from the modem status
|
||||
register. Define this if your serial port does not use this pin.
|
||||
- ri-override : Override the RI modem status signal. This signal will always be
|
||||
reported as inactive instead of being obtained from the modem status register.
|
||||
Define this if your serial port does not use this pin.
|
||||
|
||||
Example:
|
||||
|
||||
@ -31,6 +43,10 @@ Example:
|
||||
interrupts = <10>;
|
||||
reg-shift = <2>;
|
||||
reg-io-width = <4>;
|
||||
dcd-override;
|
||||
dsr-override;
|
||||
cts-override;
|
||||
ri-override;
|
||||
};
|
||||
|
||||
Example with one clock:
|
||||
|
@ -40,8 +40,10 @@ but also to IPIs and to some other special-purpose interrupts.
|
||||
|
||||
The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when
|
||||
requesting a special-purpose interrupt. It causes suspend_device_irqs() to
|
||||
leave the corresponding IRQ enabled so as to allow the interrupt to work all
|
||||
the time as expected.
|
||||
leave the corresponding IRQ enabled so as to allow the interrupt to work as
|
||||
expected during the suspend-resume cycle, but does not guarantee that the
|
||||
interrupt will wake the system from a suspended state -- for such cases it is
|
||||
necessary to use enable_irq_wake().
|
||||
|
||||
Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one
|
||||
user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed
|
||||
@ -110,8 +112,9 @@ any special interrupt handling logic for it to work.
|
||||
IRQF_NO_SUSPEND and enable_irq_wake()
|
||||
-------------------------------------
|
||||
|
||||
There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND
|
||||
flag on the same IRQ.
|
||||
There are very few valid reasons to use both enable_irq_wake() and the
|
||||
IRQF_NO_SUSPEND flag on the same IRQ, and it is never valid to use both for the
|
||||
same device.
|
||||
|
||||
First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND
|
||||
interrupts (interrupt handlers are invoked after suspend_device_irqs()) are
|
||||
@ -120,4 +123,13 @@ handlers are not invoked after suspend_device_irqs()).
|
||||
|
||||
Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not
|
||||
to individual interrupt handlers, so sharing an IRQ between a system wakeup
|
||||
interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense.
|
||||
interrupt source and an IRQF_NO_SUSPEND interrupt source does not generally
|
||||
make sense.
|
||||
|
||||
In rare cases an IRQ can be shared between a wakeup device driver and an
|
||||
IRQF_NO_SUSPEND user. In order for this to be safe, the wakeup device driver
|
||||
must be able to discern spurious IRQs from genuine wakeup events (signalling
|
||||
the latter to the core with pm_system_wakeup()), must use enable_irq_wake() to
|
||||
ensure that the IRQ will function as a wakeup source, and must request the IRQ
|
||||
with IRQF_COND_SUSPEND to tell the core that it meets these requirements. If
|
||||
these requirements are not met, it is not valid to use IRQF_COND_SUSPEND.
|
||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -2065,7 +2065,7 @@ F: include/net/bluetooth/
|
||||
BONDING DRIVER
|
||||
M: Jay Vosburgh <j.vosburgh@gmail.com>
|
||||
M: Veaceslav Falico <vfalico@gmail.com>
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
M: Andy Gospodarek <gospo@cumulusnetworks.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
S: Supported
|
||||
@ -8480,6 +8480,14 @@ S: Supported
|
||||
L: netdev@vger.kernel.org
|
||||
F: drivers/net/ethernet/samsung/sxgbe/
|
||||
|
||||
SAMSUNG THERMAL DRIVER
|
||||
M: Lukasz Majewski <l.majewski@samsung.com>
|
||||
L: linux-pm@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Supported
|
||||
T: https://github.com/lmajewski/linux-samsung-thermal.git
|
||||
F: drivers/thermal/samsung/
|
||||
|
||||
SAMSUNG USB2 PHY DRIVER
|
||||
M: Kamil Debski <k.debski@samsung.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -47,9 +47,6 @@ struct thread_struct {
|
||||
/* Forward declaration, a strange C thing */
|
||||
struct task_struct;
|
||||
|
||||
/* Return saved PC of a blocked thread */
|
||||
unsigned long thread_saved_pc(struct task_struct *t);
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
|
||||
|
||||
@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t);
|
||||
#define release_segments(mm) do { } while (0)
|
||||
|
||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
|
||||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
|
||||
|
||||
/*
|
||||
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
|
||||
* Look in process.c for details of kernel stack layout
|
||||
*/
|
||||
#define KSTK_ESP(tsk) (tsk->thread.ksp)
|
||||
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
|
||||
|
||||
#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
|
||||
#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
|
||||
sizeof(struct callee_regs) + off)))
|
||||
|
||||
#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
|
||||
#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
|
||||
#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
|
||||
#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
|
||||
|
||||
#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
|
||||
|
||||
extern void start_thread(struct pt_regs * regs, unsigned long pc,
|
||||
unsigned long usp);
|
||||
|
37
arch/arc/include/asm/stacktrace.h
Normal file
37
arch/arc/include/asm/stacktrace.h
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
* Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_STACKTRACE_H
|
||||
#define __ASM_STACKTRACE_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/**
|
||||
* arc_unwind_core - Unwind the kernel mode stack for an execution context
|
||||
* @tsk: NULL for current task, specific task otherwise
|
||||
* @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
|
||||
* If NULL, use pt_regs of @tsk (if !NULL) otherwise
|
||||
* use the current values of {SP, FP, BLINK, PC}
|
||||
* @consumer_fn: Callback invoked for each frame unwound
|
||||
* Returns 0 to continue unwinding, -1 to stop
|
||||
* @arg: Arg to callback
|
||||
*
|
||||
* Returns the address of first function in stack
|
||||
*
|
||||
* Semantics:
|
||||
* - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL
|
||||
* - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL
|
||||
* - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
|
||||
*/
|
||||
notrace noinline unsigned int arc_unwind_core(
|
||||
struct task_struct *tsk, struct pt_regs *regs,
|
||||
int (*consumer_fn) (unsigned int, void *),
|
||||
void *arg);
|
||||
|
||||
#endif /* __ASM_STACKTRACE_H */
|
@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* API: expected by schedular Code: If thread is sleeping where is that.
|
||||
* What is this good for? it will be always the scheduler or ret_from_fork.
|
||||
* So we hard code that anyways.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *t)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(t);
|
||||
unsigned long blink = 0;
|
||||
|
||||
/*
|
||||
* If the thread being queried for in not itself calling this, then it
|
||||
* implies it is not executing, which in turn implies it is sleeping,
|
||||
* which in turn implies it got switched OUT by the schedular.
|
||||
* In that case, it's kernel mode blink can reliably retrieved as per
|
||||
* the picture above (right above pt_regs).
|
||||
*/
|
||||
if (t != current && t->state != TASK_RUNNING)
|
||||
blink = *((unsigned int *)regs - 1);
|
||||
|
||||
return blink;
|
||||
}
|
||||
|
||||
int elf_check_arch(const struct elf32_hdr *x)
|
||||
{
|
||||
unsigned int eflags;
|
||||
|
@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
|
||||
struct pt_regs *regs,
|
||||
struct unwind_frame_info *frame_info)
|
||||
{
|
||||
/*
|
||||
* synchronous unwinding (e.g. dump_stack)
|
||||
* - uses current values of SP and friends
|
||||
*/
|
||||
if (tsk == NULL && regs == NULL) {
|
||||
unsigned long fp, sp, blink, ret;
|
||||
frame_info->task = current;
|
||||
@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
|
||||
frame_info->regs.r63 = ret;
|
||||
frame_info->call_frame = 0;
|
||||
} else if (regs == NULL) {
|
||||
/*
|
||||
* Asynchronous unwinding of sleeping task
|
||||
* - Gets SP etc from task's pt_regs (saved bottom of kernel
|
||||
* mode stack of task)
|
||||
*/
|
||||
|
||||
frame_info->task = tsk;
|
||||
|
||||
frame_info->regs.r27 = KSTK_FP(tsk);
|
||||
frame_info->regs.r28 = KSTK_ESP(tsk);
|
||||
frame_info->regs.r31 = KSTK_BLINK(tsk);
|
||||
frame_info->regs.r27 = TSK_K_FP(tsk);
|
||||
frame_info->regs.r28 = TSK_K_ESP(tsk);
|
||||
frame_info->regs.r31 = TSK_K_BLINK(tsk);
|
||||
frame_info->regs.r63 = (unsigned int)__switch_to;
|
||||
|
||||
/* In the prologue of __switch_to, first FP is saved on stack
|
||||
@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
|
||||
frame_info->call_frame = 0;
|
||||
|
||||
} else {
|
||||
/*
|
||||
* Asynchronous unwinding of intr/exception
|
||||
* - Just uses the pt_regs passed
|
||||
*/
|
||||
frame_info->task = tsk;
|
||||
|
||||
frame_info->regs.r27 = regs->fp;
|
||||
@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
|
||||
|
||||
#endif
|
||||
|
||||
static noinline unsigned int
|
||||
notrace noinline unsigned int
|
||||
arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
||||
int (*consumer_fn) (unsigned int, void *), void *arg)
|
||||
{
|
||||
|
@ -12,6 +12,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/disasm.h>
|
||||
@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
||||
}
|
||||
}
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
|
||||
return 0;
|
||||
|
||||
fault:
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
@ -139,13 +140,20 @@ good_area:
|
||||
return;
|
||||
}
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
if (likely(!(fault & VM_FAULT_ERROR))) {
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
/* To avoid updating stats twice for retry case */
|
||||
if (fault & VM_FAULT_MAJOR)
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
else
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
||||
regs, address);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
||||
regs, address);
|
||||
}
|
||||
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
|
@ -207,7 +207,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
|
||||
|
||||
bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
|
||||
|
||||
VM_BUG_ON(size & PAGE_MASK);
|
||||
VM_BUG_ON(size & ~PAGE_MASK);
|
||||
|
||||
if (!need_flush && !icache_is_pipt())
|
||||
goto vipt_cache;
|
||||
|
@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
kvm_guest_exit();
|
||||
trace_kvm_exit(*vcpu_pc(vcpu));
|
||||
trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
|
||||
/*
|
||||
* We may have taken a host interrupt in HYP mode (ie
|
||||
* while executing the guest). This interrupt is still
|
||||
|
@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry,
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(unsigned long vcpu_pc),
|
||||
TP_ARGS(vcpu_pc),
|
||||
TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
|
||||
TP_ARGS(exit_reason, vcpu_pc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, exit_reason )
|
||||
__field( unsigned long, vcpu_pc )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->exit_reason = exit_reason;
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
),
|
||||
|
||||
TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
|
||||
TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
|
||||
__entry->exit_reason,
|
||||
__entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_guest_fault,
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/input.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/mach-types.h>
|
||||
@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = {
|
||||
[1] = {
|
||||
.start = MSM_GPIO_TO_INT(49),
|
||||
.end = MSM_GPIO_TO_INT(49),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
|
||||
},
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/usb/msm_hsusb.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/arch.h>
|
||||
@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
|
||||
},
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static int __init msm_init_smc91x(void)
|
||||
|
@ -81,11 +81,16 @@ static struct resource smc91x_resources[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static void idp_backlight_power(int on)
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/pwm_backlight.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/setup.h>
|
||||
@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = {
|
||||
[1] = {
|
||||
.start = LPD270_ETHERNET_IRQ,
|
||||
.end = LPD270_ETHERNET_IRQ,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
|
||||
},
|
||||
};
|
||||
|
||||
struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT;
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static struct resource lpd270_flash_resources[] = {
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/platform_data/video-clcd-versatile.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/smsc911x.h>
|
||||
#include <linux/smc91x.h>
|
||||
#include <linux/ata_platform.h>
|
||||
#include <linux/amba/mmci.h>
|
||||
#include <linux/gfp.h>
|
||||
@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = {
|
||||
.phy_interface = PHY_INTERFACE_MODE_MII,
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device realview_eth_device = {
|
||||
.name = "smsc911x",
|
||||
.id = 0,
|
||||
@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res)
|
||||
realview_eth_device.resource = res;
|
||||
if (strcmp(realview_eth_device.name, "smsc911x") == 0)
|
||||
realview_eth_device.dev.platform_data = &smsc911x_config;
|
||||
else
|
||||
realview_eth_device.dev.platform_data = &smc91x_platdata;
|
||||
|
||||
return platform_device_register(&realview_eth_device);
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = {
|
||||
[1] = {
|
||||
.start = IRQ_EB_ETH,
|
||||
.end = IRQ_EB_ETH,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/pm.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/map.h>
|
||||
@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev)
|
||||
0x02000000, "smc91x-attrib"),
|
||||
{ .flags = IORESOURCE_IRQ },
|
||||
};
|
||||
struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
|
||||
};
|
||||
struct platform_device_info smc91x_devinfo = {
|
||||
.parent = &dev->dev,
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.res = smc91x_resources,
|
||||
.num_res = ARRAY_SIZE(smc91x_resources),
|
||||
.data = &smc91c_platdata,
|
||||
.size_data = sizeof(smc91c_platdata),
|
||||
};
|
||||
int ret, irq;
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/setup.h>
|
||||
@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev = {
|
||||
.platform_data = &smc91c_platdata,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
|
@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages,
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
if (!is_module_address(start) || !is_module_address(end - 1))
|
||||
if (start < MODULES_VADDR || start >= MODULES_END)
|
||||
return -EINVAL;
|
||||
|
||||
if (end < MODULES_VADDR || end >= MODULES_END)
|
||||
return -EINVAL;
|
||||
|
||||
data.set_mask = set_mask;
|
||||
|
@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
||||
if (idx > current_cpu_data.tlbsize) {
|
||||
kvm_err("%s: Invalid Index: %d\n", __func__, idx);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
|
||||
TP_ARGS(vcpu, reason),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct kvm_vcpu *, vcpu)
|
||||
__field(unsigned long, pc)
|
||||
__field(unsigned int, reason)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu = vcpu;
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
__entry->reason = reason;
|
||||
),
|
||||
|
||||
TP_printk("[%s]PC: 0x%08lx",
|
||||
kvm_mips_exit_types_str[__entry->reason],
|
||||
__entry->vcpu->arch.pc)
|
||||
__entry->pc)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl,
|
||||
int pci_domain_number, unsigned long pe_num);
|
||||
extern int iommu_add_device(struct device *dev);
|
||||
extern void iommu_del_device(struct device *dev);
|
||||
extern int __init tce_iommu_bus_notifier_init(void);
|
||||
#else
|
||||
static inline void iommu_register_group(struct iommu_table *tbl,
|
||||
int pci_domain_number,
|
||||
@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev)
|
||||
static inline void iommu_del_device(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int __init tce_iommu_bus_notifier_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* !CONFIG_IOMMU_API */
|
||||
|
||||
static inline void set_iommu_table_base_and_group(struct device *dev,
|
||||
|
9
arch/powerpc/include/asm/irq_work.h
Normal file
9
arch/powerpc/include/asm/irq_work.h
Normal file
@ -0,0 +1,9 @@
|
||||
#ifndef _ASM_POWERPC_IRQ_WORK_H
|
||||
#define _ASM_POWERPC_IRQ_WORK_H
|
||||
|
||||
static inline bool arch_irq_work_has_interrupt(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_IRQ_WORK_H */
|
@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_del_device);
|
||||
|
||||
static int tce_iommu_bus_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
return iommu_add_device(dev);
|
||||
case BUS_NOTIFY_DEL_DEVICE:
|
||||
if (dev->iommu_group)
|
||||
iommu_del_device(dev);
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block tce_iommu_bus_nb = {
|
||||
.notifier_call = tce_iommu_bus_notifier,
|
||||
};
|
||||
|
||||
int __init tce_iommu_bus_notifier_init(void)
|
||||
{
|
||||
bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
if (smp_ops->give_timebase)
|
||||
smp_ops->give_timebase();
|
||||
|
||||
/* Wait until cpu puts itself in the online map */
|
||||
while (!cpu_online(cpu))
|
||||
/* Wait until cpu puts itself in the online & active maps */
|
||||
while (!cpu_online(cpu) || !cpu_active(cpu))
|
||||
cpu_relax();
|
||||
|
||||
return 0;
|
||||
|
@ -836,30 +836,4 @@ void __init pnv_pci_init(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int tce_iommu_bus_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
return iommu_add_device(dev);
|
||||
case BUS_NOTIFY_DEL_DEVICE:
|
||||
if (dev->iommu_group)
|
||||
iommu_del_device(dev);
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block tce_iommu_bus_nb = {
|
||||
.notifier_call = tce_iommu_bus_notifier,
|
||||
};
|
||||
|
||||
static int __init tce_iommu_bus_notifier_init(void)
|
||||
{
|
||||
bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
|
||||
return 0;
|
||||
}
|
||||
machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
|
||||
|
@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str)
|
||||
}
|
||||
|
||||
__setup("multitce=", disable_multitce);
|
||||
|
||||
machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
|
||||
|
@ -499,6 +499,7 @@ config X86_INTEL_QUARK
|
||||
depends on X86_IO_APIC
|
||||
select IOSF_MBI
|
||||
select INTEL_IMR
|
||||
select COMMON_CLK
|
||||
---help---
|
||||
Select to include support for Quark X1000 SoC.
|
||||
Say Y here if you have a Quark based system such as the Arduino
|
||||
|
@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
asm volatile("1:"XSAVES"\n\t"
|
||||
"2:\n\t"
|
||||
: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
xstate_fault
|
||||
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
: "memory");
|
||||
else
|
||||
asm volatile("1:"XSAVE"\n\t"
|
||||
"2:\n\t"
|
||||
: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
xstate_fault
|
||||
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
: "memory");
|
||||
|
||||
asm volatile(xstate_fault
|
||||
: "0" (0)
|
||||
: "memory");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
asm volatile("1:"XRSTORS"\n\t"
|
||||
"2:\n\t"
|
||||
: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
xstate_fault
|
||||
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
: "memory");
|
||||
else
|
||||
asm volatile("1:"XRSTOR"\n\t"
|
||||
"2:\n\t"
|
||||
: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
xstate_fault
|
||||
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
: "memory");
|
||||
|
||||
asm volatile(xstate_fault
|
||||
: "0" (0)
|
||||
: "memory");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
|
||||
*/
|
||||
alternative_input_2(
|
||||
"1:"XSAVE,
|
||||
"1:"XSAVEOPT,
|
||||
XSAVEOPT,
|
||||
X86_FEATURE_XSAVEOPT,
|
||||
"1:"XSAVES,
|
||||
XSAVES,
|
||||
X86_FEATURE_XSAVES,
|
||||
[fx] "D" (fx), "a" (lmask), "d" (hmask) :
|
||||
"memory");
|
||||
@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
|
||||
*/
|
||||
alternative_input(
|
||||
"1: " XRSTOR,
|
||||
"1: " XRSTORS,
|
||||
XRSTORS,
|
||||
X86_FEATURE_XSAVES,
|
||||
"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
|
||||
: "memory");
|
||||
|
@ -269,11 +269,14 @@ ENTRY(ret_from_fork)
|
||||
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
|
||||
jz 1f
|
||||
|
||||
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
|
||||
jnz int_ret_from_sys_call
|
||||
|
||||
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
|
||||
jmp ret_from_sys_call # go to the SYSRET fastpath
|
||||
/*
|
||||
* By the time we get here, we have no idea whether our pt_regs,
|
||||
* ti flags, and ti status came from the 64-bit SYSCALL fast path,
|
||||
* the slow path, or one of the ia32entry paths.
|
||||
* Use int_ret_from_sys_call to return, since it can safely handle
|
||||
* all of the above.
|
||||
*/
|
||||
jmp int_ret_from_sys_call
|
||||
|
||||
1:
|
||||
subq $REST_SKIP, %rsp # leave space for volatiles
|
||||
|
@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
ctxt->dst.orig_val = ctxt->dst.val;
|
||||
/* Copy full 64-bit value for CMPXCHG8B. */
|
||||
ctxt->dst.orig_val64 = ctxt->dst.val64;
|
||||
|
||||
special_insn:
|
||||
|
||||
|
@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
|
||||
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
|
||||
}
|
||||
apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
|
||||
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
|
||||
apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
|
||||
apic->highest_isr_cache = -1;
|
||||
update_divide_count(apic);
|
||||
atomic_set(&apic->lapic_timer.pending, 0);
|
||||
@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
|
||||
update_divide_count(apic);
|
||||
start_apic_timer(apic);
|
||||
apic->irr_pending = true;
|
||||
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
|
||||
apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
|
||||
1 : count_vectors(apic->regs + APIC_ISR);
|
||||
apic->highest_isr_cache = -1;
|
||||
if (kvm_x86_ops->hwapic_irr_update)
|
||||
|
@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||
return;
|
||||
}
|
||||
|
||||
static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return;
|
||||
@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = {
|
||||
.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
|
||||
.vm_has_apicv = svm_vm_has_apicv,
|
||||
.load_eoi_exitmap = svm_load_eoi_exitmap,
|
||||
.hwapic_isr_update = svm_hwapic_isr_update,
|
||||
.sync_pir_to_irr = svm_sync_pir_to_irr,
|
||||
|
||||
.set_tss_addr = svm_set_tss_addr,
|
||||
|
@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (vcpu->mode == IN_GUEST_MODE) {
|
||||
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
|
||||
POSTED_INTR_VECTOR);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
|
||||
int vector)
|
||||
{
|
||||
@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
|
||||
if (is_guest_mode(vcpu) &&
|
||||
vector == vmx->nested.posted_intr_nv) {
|
||||
/* the PIR and ON have been set by L1. */
|
||||
if (vcpu->mode == IN_GUEST_MODE)
|
||||
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
|
||||
POSTED_INTR_VECTOR);
|
||||
kvm_vcpu_trigger_posted_interrupt(vcpu);
|
||||
/*
|
||||
* If a posted intr is not recognized by hardware,
|
||||
* we will accomplish it in the next vmentry.
|
||||
@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
|
||||
|
||||
r = pi_test_and_set_on(&vmx->pi_desc);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
#ifdef CONFIG_SMP
|
||||
if (!r && (vcpu->mode == IN_GUEST_MODE))
|
||||
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
|
||||
POSTED_INTR_VECTOR);
|
||||
else
|
||||
#endif
|
||||
if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
|
@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info,
|
||||
struct list_head *list)
|
||||
{
|
||||
int ret;
|
||||
struct resource_entry *entry;
|
||||
struct resource_entry *entry, *tmp;
|
||||
|
||||
sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
|
||||
info->bridge = device;
|
||||
@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info,
|
||||
dev_dbg(&device->dev,
|
||||
"no IO and memory resources present in _CRS\n");
|
||||
else
|
||||
resource_list_for_each_entry(entry, list)
|
||||
entry->res->name = info->name;
|
||||
resource_list_for_each_entry_safe(entry, tmp, list) {
|
||||
if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
|
||||
(entry->res->flags & IORESOURCE_DISABLED))
|
||||
resource_list_destroy_entry(entry);
|
||||
else
|
||||
entry->res->name = info->name;
|
||||
}
|
||||
}
|
||||
|
||||
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
|
||||
|
@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
|
||||
* CHECKME: len might be required to check versus a minimum
|
||||
* length as well. 1 for io is fine, but for memory it does
|
||||
* not make any sense at all.
|
||||
* Note: some BIOSes report incorrect length for ACPI address space
|
||||
* descriptor, so remove check of 'reslen == len' to avoid regression.
|
||||
*/
|
||||
if (len && reslen && reslen == len && start <= end)
|
||||
if (len && reslen && start <= end)
|
||||
return true;
|
||||
|
||||
pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
|
||||
|
@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void)
|
||||
|
||||
int acpi_video_register(void)
|
||||
{
|
||||
int result = 0;
|
||||
int ret;
|
||||
|
||||
if (register_count) {
|
||||
/*
|
||||
* if the function of acpi_video_register is already called,
|
||||
@ -2122,9 +2123,9 @@ int acpi_video_register(void)
|
||||
mutex_init(&video_list_lock);
|
||||
INIT_LIST_HEAD(&video_bus_head);
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_video_bus);
|
||||
if (result < 0)
|
||||
return -ENODEV;
|
||||
ret = acpi_bus_register_driver(&acpi_video_bus);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* When the acpi_video_bus is loaded successfully, increase
|
||||
@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
|
||||
|
||||
static int __init acpi_video_init(void)
|
||||
{
|
||||
/*
|
||||
* Let the module load even if ACPI is disabled (e.g. due to
|
||||
* a broken BIOS) so that i915.ko can still be loaded on such
|
||||
* old systems without an AcpiOpRegion.
|
||||
*
|
||||
* acpi_video_register() will report -ENODEV later as well due
|
||||
* to acpi_disabled when i915.ko tries to register itself afterwards.
|
||||
*/
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
dmi_check_system(video_dmi_table);
|
||||
|
||||
if (intel_opregion_present())
|
||||
|
@ -551,7 +551,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
||||
{
|
||||
void *page_addr;
|
||||
unsigned long user_page_addr;
|
||||
struct vm_struct tmp_area;
|
||||
struct page **page;
|
||||
struct mm_struct *mm;
|
||||
|
||||
@ -600,10 +599,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
||||
proc->pid, page_addr);
|
||||
goto err_alloc_page_failed;
|
||||
}
|
||||
tmp_area.addr = page_addr;
|
||||
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
|
||||
ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
|
||||
if (ret) {
|
||||
ret = map_kernel_range_noflush((unsigned long)page_addr,
|
||||
PAGE_SIZE, PAGE_KERNEL, page);
|
||||
flush_cache_vmap((unsigned long)page_addr,
|
||||
(unsigned long)page_addr + PAGE_SIZE);
|
||||
if (ret != 1) {
|
||||
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
|
||||
proc->pid, page_addr);
|
||||
goto err_map_kernel_failed;
|
||||
|
@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
|
||||
}
|
||||
|
||||
static int pm_genpd_summary_one(struct seq_file *s,
|
||||
struct generic_pm_domain *gpd)
|
||||
struct generic_pm_domain *genpd)
|
||||
{
|
||||
static const char * const status_lookup[] = {
|
||||
[GPD_STATE_ACTIVE] = "on",
|
||||
@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s,
|
||||
struct gpd_link *link;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&gpd->lock);
|
||||
ret = mutex_lock_interruptible(&genpd->lock);
|
||||
if (ret)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
|
||||
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
|
||||
goto exit;
|
||||
seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
|
||||
seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
|
||||
|
||||
/*
|
||||
* Modifications on the list require holding locks on both
|
||||
* master and slave, so we are safe.
|
||||
* Also gpd->name is immutable.
|
||||
* Also genpd->name is immutable.
|
||||
*/
|
||||
list_for_each_entry(link, &gpd->master_links, master_node) {
|
||||
list_for_each_entry(link, &genpd->master_links, master_node) {
|
||||
seq_printf(s, "%s", link->slave->name);
|
||||
if (!list_is_last(&link->master_node, &gpd->master_links))
|
||||
if (!list_is_last(&link->master_node, &genpd->master_links))
|
||||
seq_puts(s, ", ");
|
||||
}
|
||||
|
||||
list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
|
||||
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
|
||||
kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
|
||||
if (kobj_path == NULL)
|
||||
continue;
|
||||
@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s,
|
||||
|
||||
seq_puts(s, "\n");
|
||||
exit:
|
||||
mutex_unlock(&gpd->lock);
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pm_genpd_summary_show(struct seq_file *s, void *data)
|
||||
{
|
||||
struct generic_pm_domain *gpd;
|
||||
struct generic_pm_domain *genpd;
|
||||
int ret = 0;
|
||||
|
||||
seq_puts(s, " domain status slaves\n");
|
||||
@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
|
||||
if (ret)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
|
||||
ret = pm_genpd_summary_one(s, gpd);
|
||||
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
|
||||
ret = pm_genpd_summary_one(s, genpd);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
@ -730,6 +730,7 @@ void pm_system_wakeup(void)
|
||||
pm_abort_suspend = true;
|
||||
freeze_wake();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_system_wakeup);
|
||||
|
||||
void pm_wakeup_clear(void)
|
||||
{
|
||||
|
@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
|
||||
|
||||
/* Intel Bluetooth devices */
|
||||
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
|
||||
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
|
||||
|
@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pmc_irq_suspend(struct irq_data *d)
|
||||
{
|
||||
struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
|
||||
|
||||
pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
|
||||
pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
|
||||
}
|
||||
|
||||
static void pmc_irq_resume(struct irq_data *d)
|
||||
{
|
||||
struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
|
||||
|
||||
pmc_write(pmc, AT91_PMC_IER, pmc->imr);
|
||||
}
|
||||
|
||||
static struct irq_chip pmc_irq = {
|
||||
.name = "PMC",
|
||||
.irq_disable = pmc_irq_mask,
|
||||
.irq_mask = pmc_irq_mask,
|
||||
.irq_unmask = pmc_irq_unmask,
|
||||
.irq_set_type = pmc_irq_set_type,
|
||||
.irq_suspend = pmc_irq_suspend,
|
||||
.irq_resume = pmc_irq_resume,
|
||||
};
|
||||
|
||||
static struct lock_class_key pmc_lock_class;
|
||||
@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
|
||||
goto out_free_pmc;
|
||||
|
||||
pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
|
||||
if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
|
||||
if (request_irq(pmc->virq, pmc_irq_handler,
|
||||
IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
|
||||
goto out_remove_irqdomain;
|
||||
|
||||
return pmc;
|
||||
|
@ -33,6 +33,7 @@ struct at91_pmc {
|
||||
spinlock_t lock;
|
||||
const struct at91_pmc_caps *caps;
|
||||
struct irq_domain *irqdomain;
|
||||
u32 imr;
|
||||
};
|
||||
|
||||
static inline void pmc_lock(struct at91_pmc *pmc)
|
||||
|
@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = {
|
||||
|
||||
static int exynos_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *cpus, *np;
|
||||
struct device_node *cpu0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
|
||||
@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto err_cpufreq_reg;
|
||||
|
||||
cpus = of_find_node_by_path("/cpus");
|
||||
if (!cpus) {
|
||||
pr_err("failed to find cpus node\n");
|
||||
cpu0 = of_get_cpu_node(0, NULL);
|
||||
if (!cpu0) {
|
||||
pr_err("failed to find cpu0 node\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
np = of_get_next_child(cpus, NULL);
|
||||
if (!np) {
|
||||
pr_err("failed to find cpus child node\n");
|
||||
of_node_put(cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (of_find_property(np, "#cooling-cells", NULL)) {
|
||||
cdev = of_cpufreq_cooling_register(np,
|
||||
if (of_find_property(cpu0, "#cooling-cells", NULL)) {
|
||||
cdev = of_cpufreq_cooling_register(cpu0,
|
||||
cpu_present_mask);
|
||||
if (IS_ERR(cdev))
|
||||
pr_err("running cpufreq without cooling device: %ld\n",
|
||||
PTR_ERR(cdev));
|
||||
}
|
||||
of_node_put(np);
|
||||
of_node_put(cpus);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include <linux/smp.h>
|
||||
#include <sysdev/fsl_soc.h>
|
||||
|
||||
#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
|
||||
|
||||
/**
|
||||
* struct cpu_data - per CPU data struct
|
||||
* @parent: the parent node of cpu clock
|
||||
|
@ -44,6 +44,12 @@ void disable_cpuidle(void)
|
||||
off = 1;
|
||||
}
|
||||
|
||||
bool cpuidle_not_available(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{
|
||||
return off || !initialized || !drv || !dev || !dev->enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_play_dead - cpu off-lining
|
||||
*
|
||||
@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
|
||||
* @drv: cpuidle driver for the given CPU.
|
||||
* @dev: cpuidle device for the given CPU.
|
||||
* @freeze: Whether or not the state should be suitable for suspend-to-idle.
|
||||
*/
|
||||
static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, bool freeze)
|
||||
static int find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, bool freeze)
|
||||
{
|
||||
unsigned int latency_req = 0;
|
||||
int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
|
||||
@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_find_deepest_state - Find the deepest available idle state.
|
||||
* @drv: cpuidle driver for the given CPU.
|
||||
* @dev: cpuidle device for the given CPU.
|
||||
*/
|
||||
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{
|
||||
return find_deepest_state(drv, dev, false);
|
||||
}
|
||||
|
||||
static void enter_freeze_proper(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, int index)
|
||||
{
|
||||
@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
|
||||
|
||||
/**
|
||||
* cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
|
||||
* @drv: cpuidle driver for the given CPU.
|
||||
* @dev: cpuidle device for the given CPU.
|
||||
*
|
||||
* If there are states with the ->enter_freeze callback, find the deepest of
|
||||
* them and enter it with frozen tick. Otherwise, find the deepest state
|
||||
* available and enter it normally.
|
||||
* them and enter it with frozen tick.
|
||||
*/
|
||||
void cpuidle_enter_freeze(void)
|
||||
int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
int index;
|
||||
|
||||
/*
|
||||
@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
|
||||
* that interrupts won't be enabled when it exits and allows the tick to
|
||||
* be frozen safely.
|
||||
*/
|
||||
index = cpuidle_find_deepest_state(drv, dev, true);
|
||||
if (index >= 0) {
|
||||
enter_freeze_proper(drv, dev, index);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is not safe to freeze the tick, find the deepest state available
|
||||
* at all and try to enter it normally.
|
||||
*/
|
||||
index = cpuidle_find_deepest_state(drv, dev, false);
|
||||
index = find_deepest_state(drv, dev, true);
|
||||
if (index >= 0)
|
||||
cpuidle_enter(drv, dev, index);
|
||||
else
|
||||
arch_cpu_idle();
|
||||
enter_freeze_proper(drv, dev, index);
|
||||
|
||||
/* Interrupts are enabled again here. */
|
||||
local_irq_disable();
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
*/
|
||||
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
if (off || !initialized)
|
||||
return -ENODEV;
|
||||
|
||||
if (!drv || !dev || !dev->enabled)
|
||||
return -EBUSY;
|
||||
|
||||
return cpuidle_curr_governor->select(drv, dev);
|
||||
}
|
||||
|
||||
|
@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
|
||||
if (WARN_ON(timeout < 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (timeout == 0)
|
||||
return fence_is_signaled(fence);
|
||||
|
||||
trace_fence_wait_start(fence);
|
||||
ret = fence->ops->wait(fence, intr, timeout);
|
||||
trace_fence_wait_end(fence);
|
||||
|
@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
|
||||
unsigned seq, shared_count, i = 0;
|
||||
long ret = timeout;
|
||||
|
||||
if (!timeout)
|
||||
return reservation_object_test_signaled_rcu(obj, wait_all);
|
||||
|
||||
retry:
|
||||
fence = NULL;
|
||||
shared_count = 0;
|
||||
@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
|
||||
int ret = 1;
|
||||
|
||||
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
|
||||
int ret;
|
||||
|
||||
fence = fence_get_rcu(lfence);
|
||||
if (!fence)
|
||||
return -1;
|
||||
|
@ -664,7 +664,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
|
||||
struct at_xdmac_desc *first = NULL, *prev = NULL;
|
||||
unsigned int periods = buf_len / period_len;
|
||||
int i;
|
||||
u32 cfg;
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
|
||||
__func__, &buf_addr, buf_len, period_len,
|
||||
@ -700,17 +699,17 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
|
||||
if (direction == DMA_DEV_TO_MEM) {
|
||||
desc->lld.mbr_sa = atchan->per_src_addr;
|
||||
desc->lld.mbr_da = buf_addr + i * period_len;
|
||||
cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
|
||||
desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
|
||||
} else {
|
||||
desc->lld.mbr_sa = buf_addr + i * period_len;
|
||||
desc->lld.mbr_da = atchan->per_dst_addr;
|
||||
cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
|
||||
desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
|
||||
}
|
||||
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
|
||||
| AT_XDMAC_MBR_UBC_NDEN
|
||||
| AT_XDMAC_MBR_UBC_NSEN
|
||||
| AT_XDMAC_MBR_UBC_NDE
|
||||
| period_len >> at_xdmac_get_dwidth(cfg);
|
||||
| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
|
||||
|
@ -626,7 +626,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
|
||||
dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
|
||||
|
||||
/* Check if we have any interrupt from the DMAC */
|
||||
if (!status)
|
||||
if (!status || !dw->in_use)
|
||||
return IRQ_NONE;
|
||||
|
||||
/*
|
||||
|
@ -230,6 +230,10 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
|
||||
switch (pdev->device) {
|
||||
case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
|
||||
case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
|
||||
case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
|
||||
case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
|
||||
case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
|
||||
case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -219,6 +219,9 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
|
||||
|
||||
while (dint) {
|
||||
i = __ffs(dint);
|
||||
/* only handle interrupts belonging to pdma driver*/
|
||||
if (i >= pdev->dma_channels)
|
||||
break;
|
||||
dint &= (dint - 1);
|
||||
phy = &pdev->phy[i];
|
||||
ret = mmp_pdma_chan_handler(irq, phy);
|
||||
@ -999,6 +1002,9 @@ static int mmp_pdma_probe(struct platform_device *op)
|
||||
struct resource *iores;
|
||||
int i, ret, irq = 0;
|
||||
int dma_channels = 0, irq_num = 0;
|
||||
const enum dma_slave_buswidth widths =
|
||||
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
|
||||
if (!pdev)
|
||||
@ -1066,6 +1072,10 @@ static int mmp_pdma_probe(struct platform_device *op)
|
||||
pdev->device.device_config = mmp_pdma_config;
|
||||
pdev->device.device_terminate_all = mmp_pdma_terminate_all;
|
||||
pdev->device.copy_align = PDMA_ALIGNMENT;
|
||||
pdev->device.src_addr_widths = widths;
|
||||
pdev->device.dst_addr_widths = widths;
|
||||
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
|
||||
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
||||
|
||||
if (pdev->dev->coherent_dma_mask)
|
||||
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
|
||||
|
@ -110,7 +110,7 @@ struct mmp_tdma_chan {
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
struct mmp_tdma_desc *desc_arr;
|
||||
phys_addr_t desc_arr_phys;
|
||||
dma_addr_t desc_arr_phys;
|
||||
int desc_num;
|
||||
enum dma_transfer_direction dir;
|
||||
dma_addr_t dev_addr;
|
||||
@ -166,9 +166,12 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
|
||||
static int mmp_tdma_disable_chan(struct dma_chan *chan)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
u32 tdcr;
|
||||
|
||||
writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
|
||||
tdmac->reg_base + TDCR);
|
||||
tdcr = readl(tdmac->reg_base + TDCR);
|
||||
tdcr |= TDCR_ABR;
|
||||
tdcr &= ~TDCR_CHANEN;
|
||||
writel(tdcr, tdmac->reg_base + TDCR);
|
||||
|
||||
tdmac->status = DMA_COMPLETE;
|
||||
|
||||
@ -296,12 +299,27 @@ static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
size_t reg;
|
||||
|
||||
if (tdmac->idx == 0) {
|
||||
reg = __raw_readl(tdmac->reg_base + TDSAR);
|
||||
reg -= tdmac->desc_arr[0].src_addr;
|
||||
} else if (tdmac->idx == 1) {
|
||||
reg = __raw_readl(tdmac->reg_base + TDDAR);
|
||||
reg -= tdmac->desc_arr[0].dst_addr;
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = dev_id;
|
||||
|
||||
if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
|
||||
tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
|
||||
tasklet_schedule(&tdmac->tasklet);
|
||||
return IRQ_HANDLED;
|
||||
} else
|
||||
@ -343,7 +361,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
|
||||
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
|
||||
|
||||
gpool = tdmac->pool;
|
||||
if (tdmac->desc_arr)
|
||||
if (gpool && tdmac->desc_arr)
|
||||
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
|
||||
size);
|
||||
tdmac->desc_arr = NULL;
|
||||
@ -499,6 +517,7 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
|
||||
tdmac->pos = mmp_tdma_get_pos(tdmac);
|
||||
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
|
||||
tdmac->buf_len - tdmac->pos);
|
||||
|
||||
@ -610,7 +629,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
|
||||
int i, ret;
|
||||
int irq = 0, irq_num = 0;
|
||||
int chan_num = TDMA_CHANNEL_NUM;
|
||||
struct gen_pool *pool;
|
||||
struct gen_pool *pool = NULL;
|
||||
|
||||
of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
|
||||
if (of_id)
|
||||
|
@ -162,9 +162,9 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = {
|
||||
[BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
|
||||
[BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
|
||||
[BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
|
||||
[BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
|
||||
[BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
|
||||
@ -1143,6 +1143,10 @@ static int bam_dma_probe(struct platform_device *pdev)
|
||||
dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
|
||||
|
||||
/* initialize dmaengine apis */
|
||||
bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
bdev->common.device_alloc_chan_resources = bam_alloc_chan;
|
||||
bdev->common.device_free_chan_resources = bam_free_chan;
|
||||
bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
|
||||
|
@ -582,15 +582,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void sh_dmae_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
|
||||
sh_dmae_ctl_stop(shdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int sh_dmae_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
|
||||
sh_dmae_ctl_stop(shdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -605,6 +602,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int sh_dmae_suspend(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
|
||||
sh_dmae_ctl_stop(shdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -929,13 +929,12 @@ static int sh_dmae_remove(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
static struct platform_driver sh_dmae_driver = {
|
||||
.driver = {
|
||||
.driver = {
|
||||
.pm = &sh_dmae_pm,
|
||||
.name = SH_DMAE_DRV_NAME,
|
||||
.of_match_table = sh_dmae_of_match,
|
||||
},
|
||||
.remove = sh_dmae_remove,
|
||||
.shutdown = sh_dmae_shutdown,
|
||||
};
|
||||
|
||||
static int __init sh_dmae_init(void)
|
||||
|
@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
|
||||
* We have to be cautious here. We have seen BIOSes with DMI pointers
|
||||
* pointing to completely the wrong place for example
|
||||
*/
|
||||
static void dmi_table(u8 *buf, int len, int num,
|
||||
static void dmi_table(u8 *buf, u32 len, int num,
|
||||
void (*decode)(const struct dmi_header *, void *),
|
||||
void *private_data)
|
||||
{
|
||||
@ -92,12 +92,6 @@ static void dmi_table(u8 *buf, int len, int num,
|
||||
while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
|
||||
const struct dmi_header *dm = (const struct dmi_header *)data;
|
||||
|
||||
/*
|
||||
* 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
|
||||
*/
|
||||
if (dm->type == DMI_ENTRY_END_OF_TABLE)
|
||||
break;
|
||||
|
||||
/*
|
||||
* We want to know the total length (formatted area and
|
||||
* strings) before decoding to make sure we won't run off the
|
||||
@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num,
|
||||
data++;
|
||||
if (data - buf < len - 1)
|
||||
decode(dm, private_data);
|
||||
|
||||
/*
|
||||
* 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
|
||||
*/
|
||||
if (dm->type == DMI_ENTRY_END_OF_TABLE)
|
||||
break;
|
||||
|
||||
data += 2;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
static phys_addr_t dmi_base;
|
||||
static u16 dmi_len;
|
||||
static u32 dmi_len;
|
||||
static u16 dmi_num;
|
||||
|
||||
static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
|
||||
|
@ -179,12 +179,12 @@ again:
|
||||
start = desc->phys_addr;
|
||||
end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
|
||||
|
||||
if ((start + size) > end || (start + size) > max)
|
||||
continue;
|
||||
|
||||
if (end - size > max)
|
||||
if (end > max)
|
||||
end = max;
|
||||
|
||||
if ((start + size) > end)
|
||||
continue;
|
||||
|
||||
if (round_down(end - size, align) < start)
|
||||
continue;
|
||||
|
||||
|
@ -91,29 +91,29 @@
|
||||
*/
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags flags);
|
||||
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags);
|
||||
|
||||
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_allocator_flags flags)
|
||||
{
|
||||
struct drm_mm *mm = hole_node->mm;
|
||||
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
|
||||
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
|
||||
unsigned long adj_start = hole_start;
|
||||
unsigned long adj_end = hole_end;
|
||||
u64 hole_start = drm_mm_hole_node_start(hole_node);
|
||||
u64 hole_end = drm_mm_hole_node_end(hole_node);
|
||||
u64 adj_start = hole_start;
|
||||
u64 adj_end = hole_end;
|
||||
|
||||
BUG_ON(node->allocated);
|
||||
|
||||
@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
adj_start = adj_end - size;
|
||||
|
||||
if (alignment) {
|
||||
unsigned tmp = adj_start % alignment;
|
||||
if (tmp) {
|
||||
u64 tmp = adj_start;
|
||||
unsigned rem;
|
||||
|
||||
rem = do_div(tmp, alignment);
|
||||
if (rem) {
|
||||
if (flags & DRM_MM_CREATE_TOP)
|
||||
adj_start -= tmp;
|
||||
adj_start -= rem;
|
||||
else
|
||||
adj_start += alignment - tmp;
|
||||
adj_start += alignment - rem;
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
||||
{
|
||||
struct drm_mm_node *hole;
|
||||
unsigned long end = node->start + node->size;
|
||||
unsigned long hole_start;
|
||||
unsigned long hole_end;
|
||||
u64 end = node->start + node->size;
|
||||
u64 hole_start;
|
||||
u64 hole_end;
|
||||
|
||||
BUG_ON(node == NULL);
|
||||
|
||||
@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags)
|
||||
@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic);
|
||||
|
||||
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start, unsigned long end,
|
||||
u64 start, u64 end,
|
||||
enum drm_mm_allocator_flags flags)
|
||||
{
|
||||
struct drm_mm *mm = hole_node->mm;
|
||||
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
|
||||
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
|
||||
unsigned long adj_start = hole_start;
|
||||
unsigned long adj_end = hole_end;
|
||||
u64 hole_start = drm_mm_hole_node_start(hole_node);
|
||||
u64 hole_end = drm_mm_hole_node_end(hole_node);
|
||||
u64 adj_start = hole_start;
|
||||
u64 adj_end = hole_end;
|
||||
|
||||
BUG_ON(!hole_node->hole_follows || node->allocated);
|
||||
|
||||
@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
|
||||
|
||||
if (alignment) {
|
||||
unsigned tmp = adj_start % alignment;
|
||||
if (tmp) {
|
||||
u64 tmp = adj_start;
|
||||
unsigned rem;
|
||||
|
||||
rem = do_div(tmp, alignment);
|
||||
if (rem) {
|
||||
if (flags & DRM_MM_CREATE_TOP)
|
||||
adj_start -= tmp;
|
||||
adj_start -= rem;
|
||||
else
|
||||
adj_start += alignment - tmp;
|
||||
adj_start += alignment - rem;
|
||||
}
|
||||
}
|
||||
|
||||
@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start, unsigned long end,
|
||||
u64 start, u64 end,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags)
|
||||
{
|
||||
@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_remove_node);
|
||||
|
||||
static int check_free_hole(unsigned long start, unsigned long end,
|
||||
unsigned long size, unsigned alignment)
|
||||
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
|
||||
{
|
||||
if (end - start < size)
|
||||
return 0;
|
||||
|
||||
if (alignment) {
|
||||
unsigned tmp = start % alignment;
|
||||
u64 tmp = start;
|
||||
unsigned rem;
|
||||
|
||||
rem = do_div(tmp, alignment);
|
||||
if (tmp)
|
||||
start += alignment - tmp;
|
||||
start += alignment - rem;
|
||||
}
|
||||
|
||||
return end >= start + size;
|
||||
}
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_mm_node *best;
|
||||
unsigned long adj_start;
|
||||
unsigned long adj_end;
|
||||
unsigned long best_size;
|
||||
u64 adj_start;
|
||||
u64 adj_end;
|
||||
u64 best_size;
|
||||
|
||||
BUG_ON(mm->scanned_blocks);
|
||||
|
||||
@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
|
||||
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
|
||||
flags & DRM_MM_SEARCH_BELOW) {
|
||||
unsigned long hole_size = adj_end - adj_start;
|
||||
u64 hole_size = adj_end - adj_start;
|
||||
|
||||
if (mm->color_adjust) {
|
||||
mm->color_adjust(entry, color, &adj_start, &adj_end);
|
||||
@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
}
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_mm_node *best;
|
||||
unsigned long adj_start;
|
||||
unsigned long adj_end;
|
||||
unsigned long best_size;
|
||||
u64 adj_start;
|
||||
u64 adj_end;
|
||||
u64 best_size;
|
||||
|
||||
BUG_ON(mm->scanned_blocks);
|
||||
|
||||
@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
|
||||
|
||||
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
|
||||
flags & DRM_MM_SEARCH_BELOW) {
|
||||
unsigned long hole_size = adj_end - adj_start;
|
||||
u64 hole_size = adj_end - adj_start;
|
||||
|
||||
if (adj_start < start)
|
||||
adj_start = start;
|
||||
@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
|
||||
* adding/removing nodes to/from the scan list are allowed.
|
||||
*/
|
||||
void drm_mm_init_scan(struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color)
|
||||
{
|
||||
@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan);
|
||||
* adding/removing nodes to/from the scan list are allowed.
|
||||
*/
|
||||
void drm_mm_init_scan_with_range(struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
u64 start,
|
||||
u64 end)
|
||||
{
|
||||
mm->scan_color = color;
|
||||
mm->scan_alignment = alignment;
|
||||
@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node)
|
||||
{
|
||||
struct drm_mm *mm = node->mm;
|
||||
struct drm_mm_node *prev_node;
|
||||
unsigned long hole_start, hole_end;
|
||||
unsigned long adj_start, adj_end;
|
||||
u64 hole_start, hole_end;
|
||||
u64 adj_start, adj_end;
|
||||
|
||||
mm->scanned_blocks++;
|
||||
|
||||
@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean);
|
||||
*
|
||||
* Note that @mm must be cleared to 0 before calling this function.
|
||||
*/
|
||||
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
|
||||
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
|
||||
{
|
||||
INIT_LIST_HEAD(&mm->hole_stack);
|
||||
mm->scanned_blocks = 0;
|
||||
@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_takedown);
|
||||
|
||||
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
|
||||
const char *prefix)
|
||||
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
|
||||
const char *prefix)
|
||||
{
|
||||
unsigned long hole_start, hole_end, hole_size;
|
||||
u64 hole_start, hole_end, hole_size;
|
||||
|
||||
if (entry->hole_follows) {
|
||||
hole_start = drm_mm_hole_node_start(entry);
|
||||
hole_end = drm_mm_hole_node_end(entry);
|
||||
hole_size = hole_end - hole_start;
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
|
||||
prefix, hole_start, hole_end,
|
||||
hole_size);
|
||||
pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
|
||||
hole_end, hole_size);
|
||||
return hole_size;
|
||||
}
|
||||
|
||||
@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
|
||||
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||
u64 total_used = 0, total_free = 0, total = 0;
|
||||
|
||||
total_free += drm_mm_debug_hole(&mm->head_node, prefix);
|
||||
|
||||
drm_mm_for_each_node(entry, mm) {
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
|
||||
prefix, entry->start, entry->start + entry->size,
|
||||
entry->size);
|
||||
pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
|
||||
entry->start + entry->size, entry->size);
|
||||
total_used += entry->size;
|
||||
total_free += drm_mm_debug_hole(entry, prefix);
|
||||
}
|
||||
total = total_free + total_used;
|
||||
|
||||
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
|
||||
total_used, total_free);
|
||||
pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
|
||||
total_used, total_free);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_debug_table);
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
|
||||
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
|
||||
{
|
||||
unsigned long hole_start, hole_end, hole_size;
|
||||
u64 hole_start, hole_end, hole_size;
|
||||
|
||||
if (entry->hole_follows) {
|
||||
hole_start = drm_mm_hole_node_start(entry);
|
||||
hole_end = drm_mm_hole_node_end(entry);
|
||||
hole_size = hole_end - hole_start;
|
||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
|
||||
hole_start, hole_end, hole_size);
|
||||
seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start,
|
||||
hole_end, hole_size);
|
||||
return hole_size;
|
||||
}
|
||||
|
||||
@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
|
||||
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||
u64 total_used = 0, total_free = 0, total = 0;
|
||||
|
||||
total_free += drm_mm_dump_hole(m, &mm->head_node);
|
||||
|
||||
drm_mm_for_each_node(entry, mm) {
|
||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
|
||||
entry->start, entry->start + entry->size,
|
||||
entry->size);
|
||||
seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start,
|
||||
entry->start + entry->size, entry->size);
|
||||
total_used += entry->size;
|
||||
total_free += drm_mm_dump_hole(m, entry);
|
||||
}
|
||||
total = total_free + total_used;
|
||||
|
||||
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
|
||||
seq_printf(m, "total: %llu, used %llu free %llu\n", total,
|
||||
total_used, total_free);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_dump_table);
|
||||
|
@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
seq_puts(m, " (pp");
|
||||
else
|
||||
seq_puts(m, " (g");
|
||||
seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
|
||||
seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
|
||||
vma->node.start, vma->node.size,
|
||||
vma->ggtt_view.type);
|
||||
}
|
||||
if (obj->stolen)
|
||||
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
|
||||
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
|
||||
if (obj->pin_mappable || obj->fault_mappable) {
|
||||
char s[3], *t = s;
|
||||
if (obj->pin_mappable)
|
||||
|
@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_drm_suspend_late(struct drm_device *drm_dev)
|
||||
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
int ret;
|
||||
@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev)
|
||||
}
|
||||
|
||||
pci_disable_device(drm_dev->pdev);
|
||||
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
|
||||
/*
|
||||
* During hibernation on some GEN4 platforms the BIOS may try to access
|
||||
* the device even though it's already in D3 and hang the machine. So
|
||||
* leave the device in D0 on those platforms and hope the BIOS will
|
||||
* power down the device properly. Platforms where this was seen:
|
||||
* Lenovo Thinkpad X301, X61s
|
||||
*/
|
||||
if (!(hibernation &&
|
||||
drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
|
||||
INTEL_INFO(dev_priv)->gen == 4))
|
||||
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return i915_drm_suspend_late(dev);
|
||||
return i915_drm_suspend_late(dev, false);
|
||||
}
|
||||
|
||||
static int i915_drm_resume(struct drm_device *dev)
|
||||
@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev)
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
return i915_drm_suspend_late(drm_dev);
|
||||
return i915_drm_suspend_late(drm_dev, false);
|
||||
}
|
||||
|
||||
static int i915_pm_poweroff_late(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
|
||||
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
return i915_drm_suspend_late(drm_dev, true);
|
||||
}
|
||||
|
||||
static int i915_pm_resume_early(struct device *dev)
|
||||
@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = {
|
||||
.thaw_early = i915_pm_resume_early,
|
||||
.thaw = i915_pm_resume,
|
||||
.poweroff = i915_pm_suspend,
|
||||
.poweroff_late = i915_pm_suspend_late,
|
||||
.poweroff_late = i915_pm_poweroff_late,
|
||||
.restore_early = i915_pm_resume_early,
|
||||
.restore = i915_pm_resume,
|
||||
|
||||
|
@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
|
||||
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
|
||||
ppgtt->node.size >> 20,
|
||||
ppgtt->node.start / PAGE_SIZE);
|
||||
|
||||
@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
||||
|
||||
static void i915_gtt_color_adjust(struct drm_mm_node *node,
|
||||
unsigned long color,
|
||||
unsigned long *start,
|
||||
unsigned long *end)
|
||||
u64 *start,
|
||||
u64 *end)
|
||||
{
|
||||
if (node->color != color)
|
||||
*start += 4096;
|
||||
|
@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
return !intel_crtc->cpu_fifo_underrun_disabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
|
||||
* @dev_priv: i915 device instance
|
||||
@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
|
||||
/* We may be called too early in init, thanks BIOS! */
|
||||
if (crtc == NULL)
|
||||
return;
|
||||
|
||||
/* GMCH can't disable fifo underruns, filter them. */
|
||||
if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
|
||||
!__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
|
||||
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
|
||||
return;
|
||||
|
||||
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
|
||||
|
@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
|
||||
118800000, { 0x091c, 0x091c, 0x06dc },
|
||||
}, {
|
||||
216000000, { 0x06dc, 0x0b5c, 0x091c },
|
||||
}
|
||||
}, {
|
||||
~0UL, { 0x0000, 0x0000, 0x0000 },
|
||||
},
|
||||
};
|
||||
|
||||
static const struct dw_hdmi_sym_term imx_sym_term[] = {
|
||||
@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
|
||||
.destroy = drm_encoder_cleanup,
|
||||
};
|
||||
|
||||
static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
if (mode->clock < 13500)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (mode->clock > 266000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
if (mode->clock < 13500)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (mode->clock > 270000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
|
||||
.mpll_cfg = imx_mpll_cfg,
|
||||
.cur_ctr = imx_cur_ctr,
|
||||
.sym_term = imx_sym_term,
|
||||
.dev_type = IMX6Q_HDMI,
|
||||
.mpll_cfg = imx_mpll_cfg,
|
||||
.cur_ctr = imx_cur_ctr,
|
||||
.sym_term = imx_sym_term,
|
||||
.dev_type = IMX6Q_HDMI,
|
||||
.mode_valid = imx6q_hdmi_mode_valid,
|
||||
};
|
||||
|
||||
static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
|
||||
@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
|
||||
.cur_ctr = imx_cur_ctr,
|
||||
.sym_term = imx_sym_term,
|
||||
.dev_type = IMX6DL_HDMI,
|
||||
.mode_valid = imx6dl_hdmi_mode_valid,
|
||||
};
|
||||
|
||||
static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
|
||||
|
@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
|
||||
struct imx_ldb *ldb = imx_ldb_ch->ldb;
|
||||
struct drm_display_mode *mode = &encoder->crtc->hwmode;
|
||||
u32 pixel_fmt;
|
||||
unsigned long serial_clk;
|
||||
unsigned long di_clk = mode->clock * 1000;
|
||||
int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
|
||||
|
||||
if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
|
||||
/* dual channel LVDS mode */
|
||||
serial_clk = 3500UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
|
||||
imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
|
||||
} else {
|
||||
serial_clk = 7000UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
|
||||
di_clk);
|
||||
}
|
||||
|
||||
switch (imx_ldb_ch->chno) {
|
||||
case 0:
|
||||
@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
|
||||
struct imx_ldb *ldb = imx_ldb_ch->ldb;
|
||||
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
|
||||
unsigned long serial_clk;
|
||||
unsigned long di_clk = mode->clock * 1000;
|
||||
int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
|
||||
|
||||
if (mode->clock > 170000) {
|
||||
dev_warn(ldb->dev,
|
||||
@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
|
||||
"%s: mode exceeds 85 MHz pixel clock\n", __func__);
|
||||
}
|
||||
|
||||
if (dual) {
|
||||
serial_clk = 3500UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
|
||||
imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
|
||||
} else {
|
||||
serial_clk = 7000UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
|
||||
di_clk);
|
||||
}
|
||||
|
||||
/* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
|
||||
if (imx_ldb_ch == &ldb->channel[0]) {
|
||||
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
|
@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
}
|
||||
|
||||
panel_node = of_parse_phandle(np, "fsl,panel", 0);
|
||||
if (panel_node)
|
||||
if (panel_node) {
|
||||
imxpd->panel = of_drm_find_panel(panel_node);
|
||||
if (!imxpd->panel)
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
imxpd->dev = dev;
|
||||
|
||||
|
@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
void mdp4_irq_preinstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
mdp4_enable(mdp4_kms);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
|
||||
mdp4_disable(mdp4_kms);
|
||||
}
|
||||
|
||||
int mdp4_irq_postinstall(struct msm_kms *kms)
|
||||
@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms)
|
||||
void mdp4_irq_uninstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
mdp4_enable(mdp4_kms);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
|
||||
mdp4_disable(mdp4_kms);
|
||||
}
|
||||
|
||||
irqreturn_t mdp4_irq(struct msm_kms *kms)
|
||||
|
@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
|
||||
- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
|
||||
- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
|
||||
- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
|
||||
|
||||
Copyright (C) 2013-2015 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx)
|
||||
case 2: return (mdp5_cfg->lm.base[2]);
|
||||
case 3: return (mdp5_cfg->lm.base[3]);
|
||||
case 4: return (mdp5_cfg->lm.base[4]);
|
||||
case 5: return (mdp5_cfg->lm.base[5]);
|
||||
default: return INVALID_IDX(idx);
|
||||
}
|
||||
}
|
||||
|
@ -62,8 +62,8 @@ struct mdp5_crtc {
|
||||
|
||||
/* current cursor being scanned out: */
|
||||
struct drm_gem_object *scanout_bo;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t width, height;
|
||||
uint32_t x, y;
|
||||
} cursor;
|
||||
};
|
||||
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
|
||||
@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
|
||||
struct drm_plane *plane;
|
||||
uint32_t flush_mask = 0;
|
||||
|
||||
/* we could have already released CTL in the disable path: */
|
||||
if (!mdp5_crtc->ctl)
|
||||
/* this should not happen: */
|
||||
if (WARN_ON(!mdp5_crtc->ctl))
|
||||
return;
|
||||
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
mdp5_plane_complete_flip(plane);
|
||||
}
|
||||
|
||||
if (mdp5_crtc->ctl && !crtc->state->enable) {
|
||||
mdp5_ctl_release(mdp5_crtc->ctl);
|
||||
mdp5_crtc->ctl = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
|
||||
@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
|
||||
mdp5_crtc->event = crtc->state->event;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
/*
|
||||
* If no CTL has been allocated in mdp5_crtc_atomic_check(),
|
||||
* it means we are trying to flush a CRTC whose state is disabled:
|
||||
* nothing else needs to be done.
|
||||
*/
|
||||
if (unlikely(!mdp5_crtc->ctl))
|
||||
return;
|
||||
|
||||
blend_setup(crtc);
|
||||
crtc_flush_all(crtc);
|
||||
request_pending(crtc, PENDING_FLIP);
|
||||
|
||||
if (mdp5_crtc->ctl && !crtc->state->enable) {
|
||||
mdp5_ctl_release(mdp5_crtc->ctl);
|
||||
mdp5_crtc->ctl = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
|
||||
@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
uint32_t xres = crtc->mode.hdisplay;
|
||||
uint32_t yres = crtc->mode.vdisplay;
|
||||
|
||||
/*
|
||||
* Cursor Region Of Interest (ROI) is a plane read from cursor
|
||||
* buffer to render. The ROI region is determined by the visibility of
|
||||
* the cursor point. In the default Cursor image the cursor point will
|
||||
* be at the top left of the cursor image, unless it is specified
|
||||
* otherwise using hotspot feature.
|
||||
*
|
||||
* If the cursor point reaches the right (xres - x < cursor.width) or
|
||||
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
|
||||
* width and ROI height need to be evaluated to crop the cursor image
|
||||
* accordingly.
|
||||
* (xres-x) will be new cursor width when x > (xres - cursor.width)
|
||||
* (yres-y) will be new cursor height when y > (yres - cursor.height)
|
||||
*/
|
||||
*roi_w = min(mdp5_crtc->cursor.width, xres -
|
||||
mdp5_crtc->cursor.x);
|
||||
*roi_h = min(mdp5_crtc->cursor.height, yres -
|
||||
mdp5_crtc->cursor.y);
|
||||
}
|
||||
|
||||
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file, uint32_t handle,
|
||||
uint32_t width, uint32_t height)
|
||||
@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
unsigned int depth;
|
||||
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
|
||||
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
|
||||
uint32_t roi_w, roi_h;
|
||||
unsigned long flags;
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||
old_bo = mdp5_crtc->cursor.scanout_bo;
|
||||
|
||||
mdp5_crtc->cursor.scanout_bo = cursor_bo;
|
||||
mdp5_crtc->cursor.width = width;
|
||||
mdp5_crtc->cursor.height = height;
|
||||
|
||||
get_roi(crtc, &roi_w, &roi_h);
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
|
||||
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
|
||||
@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
|
||||
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
|
||||
MDP5_LM_CURSOR_SIZE_ROI_H(height) |
|
||||
MDP5_LM_CURSOR_SIZE_ROI_W(width));
|
||||
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
|
||||
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
|
||||
|
||||
|
||||
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
|
||||
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
|
||||
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
|
||||
|
||||
mdp5_crtc->cursor.scanout_bo = cursor_bo;
|
||||
mdp5_crtc->cursor.width = width;
|
||||
mdp5_crtc->cursor.height = height;
|
||||
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
|
||||
|
||||
ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
|
||||
@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
|
||||
uint32_t xres = crtc->mode.hdisplay;
|
||||
uint32_t yres = crtc->mode.vdisplay;
|
||||
uint32_t roi_w;
|
||||
uint32_t roi_h;
|
||||
unsigned long flags;
|
||||
|
||||
x = (x > 0) ? x : 0;
|
||||
y = (y > 0) ? y : 0;
|
||||
/* In case the CRTC is disabled, just drop the cursor update */
|
||||
if (unlikely(!crtc->state->enable))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Cursor Region Of Interest (ROI) is a plane read from cursor
|
||||
* buffer to render. The ROI region is determined by the visiblity of
|
||||
* the cursor point. In the default Cursor image the cursor point will
|
||||
* be at the top left of the cursor image, unless it is specified
|
||||
* otherwise using hotspot feature.
|
||||
*
|
||||
* If the cursor point reaches the right (xres - x < cursor.width) or
|
||||
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
|
||||
* width and ROI height need to be evaluated to crop the cursor image
|
||||
* accordingly.
|
||||
* (xres-x) will be new cursor width when x > (xres - cursor.width)
|
||||
* (yres-y) will be new cursor height when y > (yres - cursor.height)
|
||||
*/
|
||||
roi_w = min(mdp5_crtc->cursor.width, xres - x);
|
||||
roi_h = min(mdp5_crtc->cursor.height, yres - y);
|
||||
mdp5_crtc->cursor.x = x = max(x, 0);
|
||||
mdp5_crtc->cursor.y = y = max(y, 0);
|
||||
|
||||
get_roi(crtc, &roi_w, &roi_h);
|
||||
|
||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
|
||||
@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
||||
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
|
||||
.mode_fixup = mdp5_crtc_mode_fixup,
|
||||
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
|
||||
.prepare = mdp5_crtc_disable,
|
||||
.commit = mdp5_crtc_enable,
|
||||
.disable = mdp5_crtc_disable,
|
||||
.enable = mdp5_crtc_enable,
|
||||
.atomic_check = mdp5_crtc_atomic_check,
|
||||
.atomic_begin = mdp5_crtc_atomic_begin,
|
||||
.atomic_flush = mdp5_crtc_atomic_flush,
|
||||
|
@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
|
||||
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
|
||||
|
||||
mdp5_encoder->enabled = false;
|
||||
mdp5_encoder->enabled = true;
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
|
||||
.mode_fixup = mdp5_encoder_mode_fixup,
|
||||
.mode_set = mdp5_encoder_mode_set,
|
||||
.prepare = mdp5_encoder_disable,
|
||||
.commit = mdp5_encoder_enable,
|
||||
.disable = mdp5_encoder_disable,
|
||||
.enable = mdp5_encoder_enable,
|
||||
};
|
||||
|
||||
/* initialize encoder */
|
||||
|
@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
void mdp5_irq_preinstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
mdp5_enable(mdp5_kms);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
|
||||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
int mdp5_irq_postinstall(struct msm_kms *kms)
|
||||
@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
|
||||
void mdp5_irq_uninstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
mdp5_enable(mdp5_kms);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
|
||||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
|
||||
|
@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev,
|
||||
* mark our set of crtc's as busy:
|
||||
*/
|
||||
ret = start_atomic(dev->dev_private, c->crtc_mask);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the point of no return - everything below never fails except
|
||||
|
@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
nouveau_fbcon_zfill(dev, fbcon);
|
||||
|
||||
/* To allow resizeing without swapping buffers */
|
||||
NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
|
||||
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
|
||||
nouveau_fb->base.width, nouveau_fb->base.height,
|
||||
nvbo->bo.offset, nvbo);
|
||||
|
||||
|
@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
(x << 16) | y);
|
||||
viewport_w = crtc->mode.hdisplay;
|
||||
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
|
||||
if ((rdev->family >= CHIP_BONAIRE) &&
|
||||
(crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
|
||||
viewport_h *= 2;
|
||||
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
||||
(viewport_w << 16) | viewport_h);
|
||||
|
||||
|
@ -1626,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
|
||||
struct radeon_connector *radeon_connector = NULL;
|
||||
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
|
||||
bool travis_quirk = false;
|
||||
int encoder_mode;
|
||||
|
||||
if (connector) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
@ -1722,13 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_dpms(encoder, mode);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1737,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
int encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
|
||||
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
|
||||
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
|
||||
radeon_encoder->active_device);
|
||||
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_dpms(encoder, mode);
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
@ -2170,12 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
/* handled in dpms */
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_mode_set(encoder, adjusted_mode);
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_DDI:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
|
||||
@ -2197,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
atombios_apply_encoder_quirks(encoder, adjusted_mode);
|
||||
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_mode_set(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -7555,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev)
|
||||
WREG32(DC_HPD5_INT_CONTROL, hpd5);
|
||||
WREG32(DC_HPD6_INT_CONTROL, hpd6);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,9 @@
|
||||
#include "radeon_audio.h"
|
||||
#include "sid.h"
|
||||
|
||||
#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8
|
||||
#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc
|
||||
|
||||
u32 dce6_endpoint_rreg(struct radeon_device *rdev,
|
||||
u32 block_offset, u32 reg)
|
||||
{
|
||||
@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev,
|
||||
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
/* Two dtos; generally use dto0 for HDMI */
|
||||
/* Two dtos; generally use dto0 for HDMI */
|
||||
u32 value = 0;
|
||||
|
||||
if (crtc)
|
||||
if (crtc)
|
||||
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
|
||||
|
||||
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
|
||||
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
|
||||
}
|
||||
|
||||
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
/* Two dtos; generally use dto1 for DP */
|
||||
/* Two dtos; generally use dto1 for DP */
|
||||
u32 value = 0;
|
||||
value |= DCCG_AUDIO_DTO_SEL;
|
||||
|
||||
if (crtc)
|
||||
if (crtc)
|
||||
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
|
||||
|
||||
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
|
||||
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
if (ASIC_IS_DCE8(rdev)) {
|
||||
WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
} else {
|
||||
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
}
|
||||
}
|
||||
|
||||
void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
|
||||
void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
uint32_t offset;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->offset;
|
||||
|
||||
if (enable) {
|
||||
if (dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset,
|
||||
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
|
||||
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
|
||||
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
|
||||
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, true);
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
|
||||
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
|
||||
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
|
||||
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
|
||||
} else {
|
||||
if (!dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, false);
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
|
||||
}
|
||||
|
||||
dig->afmt->enabled = enable;
|
||||
|
@ -4593,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
||||
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
|
||||
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
void dce4_dp_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
}
|
||||
|
||||
void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
|
||||
@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
|
||||
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
|
||||
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
|
||||
|
||||
WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
|
||||
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
|
||||
|
||||
WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
|
||||
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
|
||||
|
||||
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
|
||||
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
|
||||
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
|
||||
|
||||
WREG32(AFMT_60958_0 + offset,
|
||||
AFMT_60958_CS_CHANNEL_NUMBER_L(1));
|
||||
|
||||
@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||
if (enable && dig->afmt->enabled)
|
||||
return;
|
||||
if (!enable && !dig->afmt->enabled)
|
||||
return;
|
||||
if (enable) {
|
||||
WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
|
||||
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
|
||||
|
||||
if (!enable && dig->afmt->pin) {
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
dig->afmt->pin = NULL;
|
||||
WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
|
||||
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
|
||||
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
|
||||
|
||||
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
|
||||
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
|
||||
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
|
||||
} else {
|
||||
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
|
||||
}
|
||||
|
||||
dig->afmt->enabled = enable;
|
||||
@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
|
||||
}
|
||||
|
||||
void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
|
||||
void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
uint32_t offset;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->offset;
|
||||
|
||||
if (enable) {
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
uint32_t val;
|
||||
|
||||
if (dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
|
||||
if (radeon_connector->con_priv) {
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset);
|
||||
val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
|
||||
val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
|
||||
|
||||
if (dig_connector->dp_clock == 162000)
|
||||
@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
|
||||
else
|
||||
val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val);
|
||||
WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
|
||||
}
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset,
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
|
||||
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
|
||||
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
|
||||
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
} else {
|
||||
if (!dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
|
||||
}
|
||||
|
||||
dig->afmt->enabled = enable;
|
||||
|
@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev)
|
||||
tmp |= RADEON_FP2_DETECT_MASK;
|
||||
}
|
||||
WREG32(RADEON_GEN_INT_CNTL, tmp);
|
||||
|
||||
/* read back to post the write */
|
||||
RREG32(RADEON_GEN_INT_CNTL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev)
|
||||
WREG32(RV770_CG_THERMAL_INT, thermal_int);
|
||||
}
|
||||
|
||||
/* posting read */
|
||||
RREG32(R_000E50_SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||
if (enable && dig->afmt->enabled)
|
||||
return;
|
||||
if (!enable && !dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
if (!enable && dig->afmt->pin) {
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
dig->afmt->pin = NULL;
|
||||
}
|
||||
|
||||
/* Older chipsets require setting HDMI and routing manually */
|
||||
if (!ASIC_IS_DCE3(rdev)) {
|
||||
if (enable)
|
||||
|
@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
|
||||
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
|
||||
void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
|
||||
void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
|
||||
void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
|
||||
void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
|
||||
|
||||
static const u32 pin_offsets[7] =
|
||||
{
|
||||
@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = {
|
||||
.set_avi_packet = evergreen_set_avi_packet,
|
||||
.set_audio_packet = dce4_set_audio_packet,
|
||||
.mode_set = radeon_audio_dp_mode_set,
|
||||
.dpms = evergreen_enable_dp_audio_packets,
|
||||
.dpms = evergreen_dp_enable,
|
||||
};
|
||||
|
||||
static struct radeon_audio_funcs dce6_hdmi_funcs = {
|
||||
@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
|
||||
.set_avi_packet = evergreen_set_avi_packet,
|
||||
.set_audio_packet = dce4_set_audio_packet,
|
||||
.mode_set = radeon_audio_dp_mode_set,
|
||||
.dpms = dce6_enable_dp_audio_packets,
|
||||
.dpms = dce6_dp_enable,
|
||||
};
|
||||
|
||||
static void radeon_audio_interface_init(struct radeon_device *rdev)
|
||||
@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
void radeon_audio_detect(struct drm_connector *connector,
|
||||
enum drm_connector_status status)
|
||||
enum drm_connector_status status)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector,
|
||||
else
|
||||
radeon_encoder->audio = rdev->audio.hdmi_funcs;
|
||||
|
||||
radeon_audio_write_speaker_allocation(connector->encoder);
|
||||
radeon_audio_write_sad_regs(connector->encoder);
|
||||
if (connector->encoder->crtc)
|
||||
radeon_audio_write_latency_fields(connector->encoder,
|
||||
&connector->encoder->crtc->mode);
|
||||
dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
} else {
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
dig->afmt->pin = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
|
||||
* update the info frames with the data from the current display mode
|
||||
*/
|
||||
static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* disable audio prior to setting up hw */
|
||||
dig->afmt->pin = radeon_audio_get_pin(encoder);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
radeon_audio_set_mute(encoder, true);
|
||||
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
radeon_audio_set_dto(encoder, mode->clock);
|
||||
radeon_audio_set_vbi_packet(encoder);
|
||||
radeon_hdmi_set_color_depth(encoder);
|
||||
radeon_audio_set_mute(encoder, false);
|
||||
radeon_audio_update_acr(encoder, mode->clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
return;
|
||||
|
||||
/* enable audio after to setting up hw */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
radeon_audio_set_mute(encoder, false);
|
||||
}
|
||||
|
||||
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
||||
@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct radeon_connector_atom_dig *dig_connector =
|
||||
radeon_connector->con_priv;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* disable audio prior to setting up hw */
|
||||
dig->afmt->pin = radeon_audio_get_pin(encoder);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
|
||||
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
|
||||
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
|
||||
else
|
||||
radeon_audio_set_dto(encoder, dig_connector->dp_clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
return;
|
||||
|
||||
/* enable audio after to setting up hw */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
}
|
||||
|
||||
void radeon_audio_mode_set(struct drm_encoder *encoder,
|
||||
|
@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
u32 ring = RADEON_CS_RING_GFX;
|
||||
s32 priority = 0;
|
||||
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
|
||||
if (!cs->num_chunks) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
p->idx = 0;
|
||||
p->ib.sa_bo = NULL;
|
||||
p->const_ib.sa_bo = NULL;
|
||||
|
@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev)
|
||||
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
|
||||
if (ASIC_IS_DCE2(rdev))
|
||||
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
|
||||
|
||||
/* posting read */
|
||||
RREG32(R_000040_GEN_INT_CNTL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6203,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev)
|
||||
|
||||
WREG32(CG_THERMAL_INT, thermal_int);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -912,8 +912,8 @@
|
||||
|
||||
#define DCCG_AUDIO_DTO0_PHASE 0x05b0
|
||||
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
|
||||
#define DCCG_AUDIO_DTO1_PHASE 0x05b8
|
||||
#define DCCG_AUDIO_DTO1_MODULE 0x05bc
|
||||
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
|
||||
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
|
||||
|
||||
#define AFMT_AUDIO_SRC_CONTROL 0x713c
|
||||
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
|
||||
|
@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
|
||||
pr_err(" has_type: %d\n", man->has_type);
|
||||
pr_err(" use_type: %d\n", man->use_type);
|
||||
pr_err(" flags: 0x%08X\n", man->flags);
|
||||
pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
|
||||
pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
|
||||
pr_err(" size: %llu\n", man->size);
|
||||
pr_err(" available_caching: 0x%08X\n", man->available_caching);
|
||||
pr_err(" default_caching: 0x%08X\n", man->default_caching);
|
||||
|
@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di,
|
||||
|
||||
clkrate = clk_get_rate(di->clk_ipu);
|
||||
div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
|
||||
if (div == 0)
|
||||
div = 1;
|
||||
rate = clkrate / div;
|
||||
|
||||
error = rate / (sig->mode.pixelclock / 1000);
|
||||
|
@ -17,27 +17,31 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/iosf_mbi.h>
|
||||
|
||||
#include "i2c-designware-core.h"
|
||||
|
||||
#define SEMAPHORE_TIMEOUT 100
|
||||
#define PUNIT_SEMAPHORE 0x7
|
||||
#define PUNIT_SEMAPHORE_BIT BIT(0)
|
||||
#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
|
||||
|
||||
static unsigned long acquired;
|
||||
|
||||
static int get_sem(struct device *dev, u32 *sem)
|
||||
{
|
||||
u32 reg_val;
|
||||
u32 data;
|
||||
int ret;
|
||||
|
||||
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
|
||||
®_val);
|
||||
&data);
|
||||
if (ret) {
|
||||
dev_err(dev, "iosf failed to read punit semaphore\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
*sem = reg_val & 0x1;
|
||||
*sem = data & PUNIT_SEMAPHORE_BIT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -52,27 +56,29 @@ static void reset_semaphore(struct device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
data = data & 0xfffffffe;
|
||||
data &= ~PUNIT_SEMAPHORE_BIT;
|
||||
if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
|
||||
PUNIT_SEMAPHORE, data))
|
||||
PUNIT_SEMAPHORE, data))
|
||||
dev_err(dev, "iosf failed to reset punit semaphore during write\n");
|
||||
}
|
||||
|
||||
int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
|
||||
static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
|
||||
{
|
||||
u32 sem = 0;
|
||||
u32 sem;
|
||||
int ret;
|
||||
unsigned long start, end;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!dev || !dev->dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev->acquire_lock)
|
||||
if (!dev->release_lock)
|
||||
return 0;
|
||||
|
||||
/* host driver writes 0x2 to side band semaphore register */
|
||||
/* host driver writes to side band semaphore register */
|
||||
ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
|
||||
PUNIT_SEMAPHORE, 0x2);
|
||||
PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "iosf punit semaphore request failed\n");
|
||||
return ret;
|
||||
@ -81,7 +87,7 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
|
||||
/* host driver waits for bit 0 to be set in semaphore register */
|
||||
start = jiffies;
|
||||
end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
|
||||
while (!time_after(jiffies, end)) {
|
||||
do {
|
||||
ret = get_sem(dev->dev, &sem);
|
||||
if (!ret && sem) {
|
||||
acquired = jiffies;
|
||||
@ -91,14 +97,14 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
|
||||
}
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
} while (time_before(jiffies, end));
|
||||
|
||||
dev_err(dev->dev, "punit semaphore timed out, resetting\n");
|
||||
reset_semaphore(dev->dev);
|
||||
|
||||
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
|
||||
PUNIT_SEMAPHORE, &sem);
|
||||
if (!ret)
|
||||
PUNIT_SEMAPHORE, &sem);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "iosf failed to read punit semaphore\n");
|
||||
else
|
||||
dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
|
||||
@ -107,9 +113,8 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
EXPORT_SYMBOL(baytrail_i2c_acquire);
|
||||
|
||||
void baytrail_i2c_release(struct dw_i2c_dev *dev)
|
||||
static void baytrail_i2c_release(struct dw_i2c_dev *dev)
|
||||
{
|
||||
if (!dev || !dev->dev)
|
||||
return;
|
||||
@ -121,7 +126,6 @@ void baytrail_i2c_release(struct dw_i2c_dev *dev)
|
||||
dev_dbg(dev->dev, "punit semaphore held for %ums\n",
|
||||
jiffies_to_msecs(jiffies - acquired));
|
||||
}
|
||||
EXPORT_SYMBOL(baytrail_i2c_release);
|
||||
|
||||
int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
|
||||
{
|
||||
@ -137,7 +141,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
|
||||
return 0;
|
||||
|
||||
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
|
||||
|
||||
if (ACPI_FAILURE(status))
|
||||
return 0;
|
||||
|
||||
@ -153,7 +156,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(i2c_dw_eval_lock_support);
|
||||
|
||||
MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
|
||||
MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
|
||||
|
@ -58,20 +58,11 @@
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
|
||||
}
|
||||
|
||||
/* LSB is in nV to eliminate floating point */
|
||||
static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
|
||||
|
||||
/*
|
||||
* scales calculated as:
|
||||
* rates_to_lsb[sample_rate] / (1 << pga);
|
||||
* pga is 1 for 0, 2
|
||||
*/
|
||||
|
||||
static const int mcp3422_scales[4][4] = {
|
||||
{ 1000000, 250000, 62500, 15625 },
|
||||
{ 500000 , 125000, 31250, 7812 },
|
||||
{ 250000 , 62500 , 15625, 3906 },
|
||||
{ 125000 , 31250 , 7812 , 1953 } };
|
||||
{ 1000000, 500000, 250000, 125000 },
|
||||
{ 250000 , 125000, 62500 , 31250 },
|
||||
{ 62500 , 31250 , 15625 , 7812 },
|
||||
{ 15625 , 7812 , 3906 , 1953 } };
|
||||
|
||||
/* Constant msleep times for data acquisitions */
|
||||
static const int mcp3422_read_times[4] = {
|
||||
|
@ -296,7 +296,8 @@ static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
|
||||
if (iadc->poll_eoc) {
|
||||
ret = iadc_poll_wait_eoc(iadc, wait);
|
||||
} else {
|
||||
ret = wait_for_completion_timeout(&iadc->complete, wait);
|
||||
ret = wait_for_completion_timeout(&iadc->complete,
|
||||
usecs_to_jiffies(wait));
|
||||
if (!ret)
|
||||
ret = -ETIMEDOUT;
|
||||
else
|
||||
|
@ -640,6 +640,7 @@ static int ssp_remove(struct spi_device *spi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int ssp_suspend(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
@ -688,6 +689,7 @@ static int ssp_resume(struct device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static const struct dev_pm_ops ssp_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume)
|
||||
|
@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
|
||||
st = iio_priv(indio_dev);
|
||||
spi_set_drvdata(spi, indio_dev);
|
||||
|
||||
st->reg = devm_regulator_get(&spi->dev, "vcc");
|
||||
st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
|
||||
if (!IS_ERR(st->reg)) {
|
||||
ret = regulator_enable(st->reg);
|
||||
if (ret)
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/of_gpio.h>
|
||||
@ -39,8 +40,12 @@
|
||||
|
||||
#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */
|
||||
|
||||
#define DHT11_EDGES_PREAMBLE 4
|
||||
#define DHT11_EDGES_PREAMBLE 2
|
||||
#define DHT11_BITS_PER_READ 40
|
||||
/*
|
||||
* Note that when reading the sensor actually 84 edges are detected, but
|
||||
* since the last edge is not significant, we only store 83:
|
||||
*/
|
||||
#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
|
||||
|
||||
/* Data transmission timing (nano seconds) */
|
||||
@ -57,6 +62,7 @@ struct dht11 {
|
||||
int irq;
|
||||
|
||||
struct completion completion;
|
||||
struct mutex lock;
|
||||
|
||||
s64 timestamp;
|
||||
int temperature;
|
||||
@ -88,7 +94,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
|
||||
unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
|
||||
|
||||
/* Calculate timestamp resolution */
|
||||
for (i = 0; i < dht11->num_edges; ++i) {
|
||||
for (i = 1; i < dht11->num_edges; ++i) {
|
||||
t = dht11->edges[i].ts - dht11->edges[i-1].ts;
|
||||
if (t > 0 && t < timeres)
|
||||
timeres = t;
|
||||
@ -138,6 +144,27 @@ static int dht11_decode(struct dht11 *dht11, int offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ handler called on GPIO edges
|
||||
*/
|
||||
static irqreturn_t dht11_handle_irq(int irq, void *data)
|
||||
{
|
||||
struct iio_dev *iio = data;
|
||||
struct dht11 *dht11 = iio_priv(iio);
|
||||
|
||||
/* TODO: Consider making the handler safe for IRQ sharing */
|
||||
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
|
||||
dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
|
||||
dht11->edges[dht11->num_edges++].value =
|
||||
gpio_get_value(dht11->gpio);
|
||||
|
||||
if (dht11->num_edges >= DHT11_EDGES_PER_READ)
|
||||
complete(&dht11->completion);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int dht11_read_raw(struct iio_dev *iio_dev,
|
||||
const struct iio_chan_spec *chan,
|
||||
int *val, int *val2, long m)
|
||||
@ -145,6 +172,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
|
||||
struct dht11 *dht11 = iio_priv(iio_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dht11->lock);
|
||||
if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
|
||||
reinit_completion(&dht11->completion);
|
||||
|
||||
@ -157,8 +185,17 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = request_irq(dht11->irq, dht11_handle_irq,
|
||||
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
|
||||
iio_dev->name, iio_dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = wait_for_completion_killable_timeout(&dht11->completion,
|
||||
HZ);
|
||||
|
||||
free_irq(dht11->irq, iio_dev);
|
||||
|
||||
if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
|
||||
dev_err(&iio_dev->dev,
|
||||
"Only %d signal edges detected\n",
|
||||
@ -185,6 +222,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
|
||||
ret = -EINVAL;
|
||||
err:
|
||||
dht11->num_edges = -1;
|
||||
mutex_unlock(&dht11->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -193,27 +231,6 @@ static const struct iio_info dht11_iio_info = {
|
||||
.read_raw = dht11_read_raw,
|
||||
};
|
||||
|
||||
/*
|
||||
* IRQ handler called on GPIO edges
|
||||
*/
|
||||
static irqreturn_t dht11_handle_irq(int irq, void *data)
|
||||
{
|
||||
struct iio_dev *iio = data;
|
||||
struct dht11 *dht11 = iio_priv(iio);
|
||||
|
||||
/* TODO: Consider making the handler safe for IRQ sharing */
|
||||
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
|
||||
dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
|
||||
dht11->edges[dht11->num_edges++].value =
|
||||
gpio_get_value(dht11->gpio);
|
||||
|
||||
if (dht11->num_edges >= DHT11_EDGES_PER_READ)
|
||||
complete(&dht11->completion);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct iio_chan_spec dht11_chan_spec[] = {
|
||||
{ .type = IIO_TEMP,
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
|
||||
@ -256,11 +273,6 @@ static int dht11_probe(struct platform_device *pdev)
|
||||
dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
|
||||
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
|
||||
pdev->name, iio);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
|
||||
dht11->num_edges = -1;
|
||||
@ -268,6 +280,7 @@ static int dht11_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, iio);
|
||||
|
||||
init_completion(&dht11->completion);
|
||||
mutex_init(&dht11->lock);
|
||||
iio->name = pdev->name;
|
||||
iio->dev.parent = &pdev->dev;
|
||||
iio->info = &dht11_iio_info;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user