Merge 3.9-rc4 into driver-core-next

This is to fix up a build problem with a wireless driver due to the
dynamic-debug patches in this branch.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2013-01-17 19:48:18 -08:00
commit ed408f7c0f
355 changed files with 10014 additions and 2275 deletions

View File

@ -228,7 +228,7 @@ S: Maintained
F: drivers/platform/x86/acerhdf.c
ACER WMI LAPTOP EXTRAS
M: Joey Lee <jlee@novell.com>
M: "Lee, Chun-Yi" <jlee@suse.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/acer-wmi.c
@ -648,7 +648,7 @@ F: arch/arm/
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: MAINTAINED
S: Maintained
F: arch/arm/mach-*/
F: arch/arm/plat-*/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
@ -1351,6 +1351,14 @@ W: http://wireless.kernel.org/en/users/Drivers/ath9k
S: Supported
F: drivers/net/wireless/ath/ath9k/
WILOCITY WIL6210 WIRELESS DRIVER
M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
L: wil6210@qca.qualcomm.com
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/wil6210
F: drivers/net/wireless/ath/wil6210/
CARL9170 LINUX COMMUNITY WIRELESS DRIVER
M: Christian Lamparter <chunkeey@googlemail.com>
L: linux-wireless@vger.kernel.org
@ -1964,9 +1972,9 @@ S: Maintained
F: drivers/usb/host/ohci-ep93xx.c
CIRRUS LOGIC CS4270 SOUND DRIVER
M: Timur Tabi <timur@freescale.com>
M: Timur Tabi <timur@tabi.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
S: Odd Fixes
F: sound/soc/codecs/cs4270*
CLEANCACHE API
@ -3183,9 +3191,9 @@ F: include/uapi/video/
F: include/uapi/linux/fb.h
FREESCALE DIU FRAMEBUFFER DRIVER
M: Timur Tabi <timur@freescale.com>
M: Timur Tabi <timur@tabi.org>
L: linux-fbdev@vger.kernel.org
S: Supported
S: Maintained
F: drivers/video/fsl-diu-fb.*
FREESCALE DMA DRIVER
@ -3220,9 +3228,8 @@ F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
FREESCALE QUICC ENGINE LIBRARY
M: Timur Tabi <timur@freescale.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
S: Orphan
F: arch/powerpc/sysdev/qe_lib/
F: arch/powerpc/include/asm/*qe.h
@ -3241,16 +3248,16 @@ S: Maintained
F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC UART DRIVER
M: Timur Tabi <timur@freescale.com>
M: Timur Tabi <timur@tabi.org>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
S: Maintained
F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC SOUND DRIVERS
M: Timur Tabi <timur@freescale.com>
M: Timur Tabi <timur@tabi.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
S: Supported
S: Maintained
F: sound/soc/fsl/fsl*
F: sound/soc/fsl/mpc8610_hpcd.c
@ -5077,7 +5084,7 @@ S: Maintained
F: drivers/media/radio/radio-mr800.c
MSI LAPTOP SUPPORT
M: "Lee, Chun-Yi" <jlee@novell.com>
M: "Lee, Chun-Yi" <jlee@suse.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/msi-laptop.c
@ -5507,8 +5514,7 @@ M: Benoît Cousson <b-cousson@ti.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.c
F: arch/arm/plat-omap/include/plat/omap_hwmod.h
F: arch/arm/mach-omap2/omap_hwmod.*
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
M: Benoît Cousson <b-cousson@ti.com>
@ -7334,7 +7340,7 @@ S: Odd Fixes
F: drivers/staging/speakup/
STAGING - TI DSP BRIDGE DRIVERS
M: Omar Ramirez Luna <omar.ramirez@ti.com>
M: Omar Ramirez Luna <omar.ramirez@copitl.com>
S: Odd Fixes
F: drivers/staging/tidspbridge/
@ -8526,7 +8532,7 @@ F: Documentation/x86/
F: arch/x86/
X86 PLATFORM DRIVERS
M: Matthew Garrett <mjg@redhat.com>
M: Matthew Garrett <matthew.garrett@nebula.com>
L: platform-driver-x86@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
S: Maintained

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Terrified Chipmunk
# *DOCUMENTATION*

View File

@ -155,6 +155,7 @@ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb
targets += dtbs
targets += $(dtb-y)
endif
# *.dtb used to be generated in the directory above. Clean out the

View File

@ -306,6 +306,22 @@
};
};
ssc0 {
pinctrl_ssc0_tx: ssc0_tx-0 {
atmel,pins =
<1 16 0x1 0x0 /* PB16 periph A */
1 17 0x1 0x0 /* PB17 periph A */
1 18 0x1 0x0>; /* PB18 periph A */
};
pinctrl_ssc0_rx: ssc0_rx-0 {
atmel,pins =
<1 19 0x1 0x0 /* PB19 periph A */
1 20 0x1 0x0 /* PB20 periph A */
1 21 0x1 0x0>; /* PB21 periph A */
};
};
pioA: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@ -450,6 +466,8 @@
compatible = "atmel,at91rm9200-ssc";
reg = <0xfffbc000 0x4000>;
interrupts = <14 4 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};

View File

@ -271,6 +271,38 @@
};
};
ssc0 {
pinctrl_ssc0_tx: ssc0_tx-0 {
atmel,pins =
<1 0 0x2 0x0 /* PB0 periph B */
1 1 0x2 0x0 /* PB1 periph B */
1 2 0x2 0x0>; /* PB2 periph B */
};
pinctrl_ssc0_rx: ssc0_rx-0 {
atmel,pins =
<1 3 0x2 0x0 /* PB3 periph B */
1 4 0x2 0x0 /* PB4 periph B */
1 5 0x2 0x0>; /* PB5 periph B */
};
};
ssc1 {
pinctrl_ssc1_tx: ssc1_tx-0 {
atmel,pins =
<1 6 0x1 0x0 /* PB6 periph A */
1 7 0x1 0x0 /* PB7 periph A */
1 8 0x1 0x0>; /* PB8 periph A */
};
pinctrl_ssc1_rx: ssc1_rx-0 {
atmel,pins =
<1 9 0x1 0x0 /* PB9 periph A */
1 10 0x1 0x0 /* PB10 periph A */
1 11 0x1 0x0>; /* PB11 periph A */
};
};
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@ -368,6 +400,8 @@
compatible = "atmel,at91rm9200-ssc";
reg = <0xfff98000 0x4000>;
interrupts = <16 4 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};
@ -375,6 +409,8 @@
compatible = "atmel,at91rm9200-ssc";
reg = <0xfff9c000 0x4000>;
interrupts = <17 4 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
status = "disabled";
};

View File

@ -290,6 +290,38 @@
};
};
ssc0 {
pinctrl_ssc0_tx: ssc0_tx-0 {
atmel,pins =
<3 0 0x1 0x0 /* PD0 periph A */
3 1 0x1 0x0 /* PD1 periph A */
3 2 0x1 0x0>; /* PD2 periph A */
};
pinctrl_ssc0_rx: ssc0_rx-0 {
atmel,pins =
<3 3 0x1 0x0 /* PD3 periph A */
3 4 0x1 0x0 /* PD4 periph A */
3 5 0x1 0x0>; /* PD5 periph A */
};
};
ssc1 {
pinctrl_ssc1_tx: ssc1_tx-0 {
atmel,pins =
<3 10 0x1 0x0 /* PD10 periph A */
3 11 0x1 0x0 /* PD11 periph A */
3 12 0x1 0x0>; /* PD12 periph A */
};
pinctrl_ssc1_rx: ssc1_rx-0 {
atmel,pins =
<3 13 0x1 0x0 /* PD13 periph A */
3 14 0x1 0x0 /* PD14 periph A */
3 15 0x1 0x0>; /* PD15 periph A */
};
};
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@ -425,6 +457,8 @@
compatible = "atmel,at91sam9g45-ssc";
reg = <0xfff9c000 0x4000>;
interrupts = <16 4 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};
@ -432,6 +466,8 @@
compatible = "atmel,at91sam9g45-ssc";
reg = <0xfffa0000 0x4000>;
interrupts = <17 4 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
status = "disabled";
};

View File

@ -28,6 +28,7 @@
tcb1 = &tcb1;
i2c0 = &i2c0;
i2c1 = &i2c1;
ssc0 = &ssc0;
};
cpus {
cpu@0 {
@ -244,6 +245,22 @@
};
};
ssc0 {
pinctrl_ssc0_tx: ssc0_tx-0 {
atmel,pins =
<0 24 0x2 0x0 /* PA24 periph B */
0 25 0x2 0x0 /* PA25 periph B */
0 26 0x2 0x0>; /* PA26 periph B */
};
pinctrl_ssc0_rx: ssc0_rx-0 {
atmel,pins =
<0 27 0x2 0x0 /* PA27 periph B */
0 28 0x2 0x0 /* PA28 periph B */
0 29 0x2 0x0>; /* PA29 periph B */
};
};
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@ -294,6 +311,15 @@
status = "disabled";
};
ssc0: ssc@f0010000 {
compatible = "atmel,at91sam9g45-ssc";
reg = <0xf0010000 0x4000>;
interrupts = <28 4 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};
usart0: serial@f801c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf801c000 0x4000>;

View File

@ -88,13 +88,6 @@
interrupts = <1 4 7>;
};
ssc0: ssc@f0010000 {
compatible = "atmel,at91sam9g45-ssc";
reg = <0xf0010000 0x4000>;
interrupts = <28 4 5>;
status = "disabled";
};
tcb0: timer@f8008000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf8008000 0x100>;
@ -290,6 +283,22 @@
};
};
ssc0 {
pinctrl_ssc0_tx: ssc0_tx-0 {
atmel,pins =
<0 24 0x2 0x0 /* PA24 periph B */
0 25 0x2 0x0 /* PA25 periph B */
0 26 0x2 0x0>; /* PA26 periph B */
};
pinctrl_ssc0_rx: ssc0_rx-0 {
atmel,pins =
<0 27 0x2 0x0 /* PA27 periph B */
0 28 0x2 0x0 /* PA28 periph B */
0 29 0x2 0x0>; /* PA29 periph B */
};
};
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@ -333,6 +342,15 @@
};
};
ssc0: ssc@f0010000 {
compatible = "atmel,at91sam9g45-ssc";
reg = <0xf0010000 0x4000>;
interrupts = <28 4 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};
mmc0: mmc@f0008000 {
compatible = "atmel,hsmci";
reg = <0xf0008000 0x600>;

View File

@ -463,6 +463,9 @@
GPIO76_LCD_PCLK, \
GPIO77_LCD_BIAS
/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
extern int keypad_set_wake(unsigned int on);
#endif /* __ASM_ARCH_MFP_PXA27X_H */

View File

@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
EXPORT_SYMBOL(pxa27x_clear_otgph);
static unsigned long ac97_reset_config[] = {
GPIO113_GPIO,
GPIO113_AC97_nRESET_GPIO_HIGH,
GPIO113_AC97_nRESET,
GPIO95_GPIO,
GPIO95_AC97_nRESET_GPIO_HIGH,
GPIO95_AC97_nRESET,
};

View File

@ -1,4 +1,5 @@
targets += dtbs
targets += $(dtb-y)
dtbs: $(addprefix $(obj)/, $(dtb-y))

View File

@ -24,7 +24,8 @@
/*
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */
#define PTE_VALID (_AT(pteval_t, 1) << 0)
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
@ -60,9 +61,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
extern pgprot_t pgprot_default;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@ -72,7 +76,7 @@ extern pgprot_t pgprot_default;
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@ -125,16 +129,15 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
#define pte_present(pte) (pte_val(pte) & PTE_VALID)
#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_present_exec_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
(PTE_VALID | PTE_USER))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@ -157,10 +160,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (pte_present_exec_user(pte))
__sync_icache_dcache(pte, addr);
if (!pte_dirty(pte))
pte = pte_wrprotect(pte);
if (pte_valid_user(pte)) {
if (pte_exec(pte))
__sync_icache_dcache(pte, addr);
if (!pte_dirty(pte))
pte = pte_wrprotect(pte);
}
set_pte(ptep, pte);
}
@ -170,9 +176,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
#define __HAVE_ARCH_PTE_SPECIAL
/*
@ -264,7 +267,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
PTE_PROT_NONE | PTE_VALID;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}

View File

@ -395,8 +395,13 @@ __SYSCALL(370, sys_name_to_handle_at)
__SYSCALL(371, compat_sys_open_by_handle_at)
__SYSCALL(372, compat_sys_clock_adjtime)
__SYSCALL(373, sys_syncfs)
__SYSCALL(374, compat_sys_sendmmsg)
__SYSCALL(375, sys_setns)
__SYSCALL(376, compat_sys_process_vm_readv)
__SYSCALL(377, compat_sys_process_vm_writev)
__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */
#define __NR_compat_syscalls 374
#define __NR_compat_syscalls 379
/*
* Compat syscall numbers used by the AArch64 kernel.

View File

@ -252,10 +252,6 @@ void update_vsyscall(struct timekeeper *tk)
void update_vsyscall_tz(void)
{
++vdso_data->tb_seq_count;
smp_wmb();
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
smp_wmb();
++vdso_data->tb_seq_count;
}

View File

@ -73,8 +73,6 @@ ENTRY(__kernel_gettimeofday)
/* If tz is NULL, return 0. */
cbz x1, 3f
ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
seqcnt_read w9
seqcnt_check w9, 1b
stp w4, w5, [x1, #TZ_MINWEST]
3:
mov x0, xzr

View File

@ -6,6 +6,7 @@ config MN10300
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
select GENERIC_ATOMIC64
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA

View File

@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared {
#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
#include <uapi/asm/epapr_hcalls.h>
#include <asm/epapr_hcalls.h>
#define KVM_FEATURE_MAGIC_PAGE 1

View File

@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
#ifdef CONFIG_PPC_POWERNV
struct opal_machine_check_event *opal_evt;
#endif
long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) {
@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
handled = 0;
}
#ifdef CONFIG_PPC_POWERNV
/*
* See if OPAL has already handled the condition.
* We assume that if the condition is recovered then OPAL
@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
if (handled)
opal_evt->in_use = 0;
#endif
return handled;
}

View File

@ -24,8 +24,8 @@ CHECKFLAGS += -D__s390__ -msize-long
else
LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fpic -D__PIC__
KBUILD_CFLAGS_MODULE += -fpic -D__PIC__
KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
UTS_MACHINE := s390x

View File

@ -10,4 +10,10 @@
*/
#define MAX_DMA_ADDRESS 0x80000000
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_S390_DMA_H */

View File

@ -85,6 +85,11 @@ static inline void iounmap(volatile void __iomem *addr)
#define __raw_writel zpci_write_u32
#define __raw_writeq zpci_write_u64
#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl
#define readq_relaxed readq
#endif /* CONFIG_PCI */
#include <asm-generic/io.h>

View File

@ -2,43 +2,61 @@
#define _ASM_IRQ_H
#include <linux/hardirq.h>
#include <linux/percpu.h>
#include <linux/cache.h>
#include <linux/types.h>
enum interruption_class {
enum interruption_main_class {
EXTERNAL_INTERRUPT,
IO_INTERRUPT,
EXTINT_CLK,
EXTINT_EXC,
EXTINT_EMS,
EXTINT_TMR,
EXTINT_TLA,
EXTINT_PFL,
EXTINT_DSD,
EXTINT_VRT,
EXTINT_SCP,
EXTINT_IUC,
EXTINT_CMS,
EXTINT_CMC,
EXTINT_CMR,
IOINT_CIO,
IOINT_QAI,
IOINT_DAS,
IOINT_C15,
IOINT_C70,
IOINT_TAP,
IOINT_VMR,
IOINT_LCS,
IOINT_CLW,
IOINT_CTC,
IOINT_APB,
IOINT_ADM,
IOINT_CSC,
IOINT_PCI,
IOINT_MSI,
NMI_NMI,
NR_IRQS,
NR_IRQS
};
enum interruption_class {
IRQEXT_CLK,
IRQEXT_EXC,
IRQEXT_EMS,
IRQEXT_TMR,
IRQEXT_TLA,
IRQEXT_PFL,
IRQEXT_DSD,
IRQEXT_VRT,
IRQEXT_SCP,
IRQEXT_IUC,
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_CMR,
IRQIO_CIO,
IRQIO_QAI,
IRQIO_DAS,
IRQIO_C15,
IRQIO_C70,
IRQIO_TAP,
IRQIO_VMR,
IRQIO_LCS,
IRQIO_CLW,
IRQIO_CTC,
IRQIO_APB,
IRQIO_ADM,
IRQIO_CSC,
IRQIO_PCI,
IRQIO_MSI,
NMI_NMI,
CPU_RST,
NR_ARCH_IRQS
};
struct irq_stat {
unsigned int irqs[NR_ARCH_IRQS];
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
static __always_inline void inc_irq_stat(enum interruption_class irq)
{
__get_cpu_var(irq_stat).irqs[irq]++;
}
struct ext_code {
unsigned short subcode;
unsigned short code;

View File

@ -1387,10 +1387,7 @@ static inline int has_transparent_hugepage(void)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
return pmd_val(pmd) >> HPAGE_SHIFT;
else
return pmd_val(pmd) >> PAGE_SHIFT;
return pmd_val(pmd) >> PAGE_SHIFT;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

View File

@ -128,4 +128,32 @@ static inline unsigned long long get_clock_monotonic(void)
return get_clock_xt() - sched_clock_base_cc;
}
/**
* tod_to_ns - convert a TOD format value to nanoseconds
* @todval: to be converted TOD format value
* Returns: number of nanoseconds that correspond to the TOD format value
*
* Converting a 64 Bit TOD format value to nanoseconds means that the value
* must be divided by 4.096. In order to achieve that we multiply with 125
* and divide by 512:
*
* ns = (todval * 125) >> 9;
*
* In order to avoid an overflow with the multiplication we can rewrite this.
* With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
* we end up with
*
* ns = ((2^32 * th + tl) * 125 ) >> 9;
* -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
*
*/
static inline unsigned long long tod_to_ns(unsigned long long todval)
{
unsigned long long ns;
ns = ((todval >> 32) << 23) * 125;
ns += ((todval & 0xffffffff) * 125) >> 9;
return ns;
}
#endif

View File

@ -279,7 +279,8 @@
#define __NR_process_vm_writev 341
#define __NR_s390_runtime_instr 342
#define __NR_kcmp 343
#define NR_syscalls 344
#define __NR_finit_module 344
#define NR_syscalls 345
/*
* There are some system calls that are not present on 64 bit, some

View File

@ -1659,3 +1659,9 @@ ENTRY(sys_kcmp_wrapper)
llgfr %r5,%r5 # unsigned long
llgfr %r6,%r6 # unsigned long
jg sys_kcmp
ENTRY(sys_finit_module_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char __user *
lgfr %r4,%r4 # int
jg sys_finit_module

View File

@ -1127,13 +1127,14 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
if (i == DEBUG_MAX_VIEWS) {
pr_err("Registering view %s/%s would exceed the maximum "
"number of views %i\n", id->name, view->name, i);
debugfs_remove(pde);
rc = -1;
} else {
id->views[i] = view;
id->debugfs_entries[i] = pde;
}
spin_unlock_irqrestore(&id->lock, flags);
if (rc)
debugfs_remove(pde);
out:
return rc;
}
@ -1146,9 +1147,9 @@ EXPORT_SYMBOL(debug_register_view);
int
debug_unregister_view(debug_info_t * id, struct debug_view *view)
{
int rc = 0;
int i;
struct dentry *dentry = NULL;
unsigned long flags;
int i, rc = 0;
if (!id)
goto out;
@ -1160,10 +1161,12 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
if (i == DEBUG_MAX_VIEWS)
rc = -1;
else {
debugfs_remove(id->debugfs_entries[i]);
dentry = id->debugfs_entries[i];
id->views[i] = NULL;
id->debugfs_entries[i] = NULL;
}
spin_unlock_irqrestore(&id->lock, flags);
debugfs_remove(dentry);
out:
return rc;
}

View File

@ -24,43 +24,65 @@
#include <asm/irq.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
struct irq_class {
char *name;
char *desc;
};
static const struct irq_class intrclass_names[] = {
/*
* The list of "main" irq classes on s390. This is the list of interrrupts
* that appear both in /proc/stat ("intr" line) and /proc/interrupts.
* Historically only external and I/O interrupts have been part of /proc/stat.
* We can't add the split external and I/O sub classes since the first field
* in the "intr" line in /proc/stat is supposed to be the sum of all other
* fields.
* Since the external and I/O interrupt fields are already sums we would end
* up with having a sum which accounts each interrupt twice.
*/
static const struct irq_class irqclass_main_desc[NR_IRQS] = {
[EXTERNAL_INTERRUPT] = {.name = "EXT"},
[IO_INTERRUPT] = {.name = "I/O"},
[EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
[EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
[EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
[EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
[EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
[EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
[EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
[EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
[EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
[EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
[EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
[EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
[EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
[IOINT_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
[IOINT_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
[IOINT_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
[IOINT_C15] = {.name = "C15", .desc = "[I/O] 3215"},
[IOINT_C70] = {.name = "C70", .desc = "[I/O] 3270"},
[IOINT_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
[IOINT_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
[IOINT_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
[IOINT_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
[IOINT_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
[IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
[IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
[IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
[IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[IO_INTERRUPT] = {.name = "I/O"}
};
/*
* The list of split external and I/O interrupts that appear only in
* /proc/interrupts.
* In addition this list contains non external / I/O events like NMIs.
*/
static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
[IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
[IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
[IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
[IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
[IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
[IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
[IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
[IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
[IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
[IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
[IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
[IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
[IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
[IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
[IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
[IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"},
[IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"},
[IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
[IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
[IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
[IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
[IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
[IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
[IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
[IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
[IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
};
/*
@ -68,30 +90,34 @@ static const struct irq_class intrclass_names[] = {
*/
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
int irq = *(loff_t *) v;
int cpu;
get_online_cpus();
if (i == 0) {
if (irq == 0) {
seq_puts(p, " ");
for_each_online_cpu(j)
seq_printf(p, "CPU%d ",j);
for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
seq_printf(p, "%s: ", intrclass_names[i].name);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
if (intrclass_names[i].desc)
seq_printf(p, " %s", intrclass_names[i].desc);
seq_putc(p, '\n');
}
if (irq < NR_IRQS) {
seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
seq_putc(p, '\n');
goto skip_arch_irqs;
}
for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
if (irqclass_sub_desc[irq].desc)
seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
seq_putc(p, '\n');
}
skip_arch_irqs:
put_online_cpus();
return 0;
return 0;
}
/*
@ -222,7 +248,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
/* Serve timer interrupts first. */
clock_comparator_work();
}
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
if (ext_code.code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1;

View File

@ -254,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
int umode;
nmi_enter();
kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
inc_irq_stat(NMI_NMI);
mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
mcck = &__get_cpu_var(cpu_mcck);
umode = user_mode(regs);

View File

@ -229,7 +229,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
if (!(alert & CPU_MF_INT_CF_MASK))
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
inc_irq_stat(IRQEXT_CMC);
cpuhw = &__get_cpu_var(cpu_hw_events);
/* Measurement alerts are shared and might happen when the PMU

View File

@ -71,7 +71,7 @@ static void runtime_instr_int_handler(struct ext_code ext_code,
if (!(param32 & CPU_MF_INT_RI_MASK))
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++;
inc_irq_stat(IRQEXT_CMR);
if (!current->thread.ri_cb)
return;

View File

@ -16,7 +16,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
@ -289,6 +289,7 @@ void machine_power_off(void)
* Dummy power off function.
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL_GPL(pm_power_off);
static int __init early_parse_mem(char *p)
{

View File

@ -433,9 +433,9 @@ static void do_ext_call_interrupt(struct ext_code ext_code,
cpu = smp_processor_id();
if (ext_code.code == 0x1202)
kstat_cpu(cpu).irqs[EXTINT_EXC]++;
inc_irq_stat(IRQEXT_EXC);
else
kstat_cpu(cpu).irqs[EXTINT_EMS]++;
inc_irq_stat(IRQEXT_EMS);
/*
* handle bit signal external calls
*/
@ -623,9 +623,10 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
return info;
}
static int smp_add_present_cpu(int cpu);
static int __cpuinit smp_add_present_cpu(int cpu);
static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
int sysfs_add)
{
struct pcpu *pcpu;
cpumask_t avail;
@ -708,6 +709,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
pfault_init();
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
inc_irq_stat(CPU_RST);
local_irq_enable();
/* cpu_idle will call schedule for us */
cpu_idle();
@ -985,7 +987,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
return notifier_from_errno(err);
}
static int smp_add_present_cpu(int cpu)
static int __cpuinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &pcpu_devices[cpu].cpu;
struct device *s = &c->dev;

View File

@ -352,3 +352,4 @@ SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wr
SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper)

View File

@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace __kprobes sched_clock(void)
{
return (get_clock_monotonic() * 125) >> 9;
return tod_to_ns(get_clock_monotonic());
}
/*
@ -168,7 +168,7 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
unsigned int param32,
unsigned long param64)
{
kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
inc_irq_stat(IRQEXT_CLK);
if (S390_lowcore.clock_comparator == -1ULL)
set_clock_comparator(S390_lowcore.clock_comparator);
}
@ -179,7 +179,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
static void timing_alert_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
inc_irq_stat(IRQEXT_TLA);
if (param32 & 0x00c40000)
etr_timing_alert((struct etr_irq_parm *) &param32);
if (param32 & 0x00038000)

View File

@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
@ -42,6 +43,7 @@ static struct mask_info socket_info;
static struct mask_info book_info;
struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{

View File

@ -408,7 +408,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return 0;
}
sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);

View File

@ -613,7 +613,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
kvm_s390_deliver_pending_interrupts(vcpu);
vcpu->arch.sie_block->icptcode = 0;
preempt_disable();
kvm_guest_enter();
preempt_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,

View File

@ -569,7 +569,7 @@ static void pfault_interrupt(struct ext_code ext_code,
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();

View File

@ -233,7 +233,7 @@ static void hws_ext_handler(struct ext_code ext_code,
if (!(param32 & CPU_MF_INT_SF_MASK))
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
inc_irq_stat(IRQEXT_CMS);
atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
if (hws_wq)

View File

@ -160,35 +160,6 @@ int pci_proc_domain(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_proc_domain);
/* Store PCI function information block */
static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
{
struct zpci_fib *fib;
u8 status, cc;
fib = (void *) get_zeroed_page(GFP_KERNEL);
if (!fib)
return -ENOMEM;
do {
cc = __stpcifc(zdev->fh, 0, fib, &status);
if (cc == 2) {
msleep(ZPCI_INSN_BUSY_DELAY);
memset(fib, 0, PAGE_SIZE);
}
} while (cc == 2);
if (cc)
pr_err_once("%s: cc: %u status: %u\n",
__func__, cc, status);
/* Return PCI function controls */
*fc = fib->fc;
free_page((unsigned long) fib);
return (cc) ? -EIO : 0;
}
/* Modify PCI: Register adapter interruptions */
static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
u64 aibv)
@ -469,7 +440,7 @@ static void zpci_irq_handler(void *dont, void *need)
int rescan = 0, max = aisb_max;
struct zdev_irq_map *imap;
kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
inc_irq_stat(IRQIO_PCI);
sbit = start;
scan:
@ -481,7 +452,7 @@ scan:
/* find vector bit */
imap = bucket->imap[sbit];
for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
inc_irq_stat(IRQIO_MSI);
clear_bit(63 - mbit, &imap->aibv);
spin_lock(&imap->lock);

View File

@ -13,8 +13,6 @@
#include <linux/pci.h>
#include <asm/pci_dma.h>
static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;

View File

@ -70,6 +70,16 @@
* OFF-ON : MMC
*/
/*
* FSI - DA7210
*
* it needs amixer settings for playing
*
* amixer set 'HeadPhone' 80
* amixer set 'Out Mixer Left DAC Left' on
* amixer set 'Out Mixer Right DAC Right' on
*/
/* Heartbeat */
static unsigned char led_pos[] = { 0, 1, 2, 3 };

View File

@ -203,9 +203,9 @@ extern void __kernel_vsyscall;
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
else \
NEW_AUX_ENT(AT_IGNORE, 0);
NEW_AUX_ENT(AT_IGNORE, 0)
#else
#define VSYSCALL_AUX_ENT
#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif /* CONFIG_VSYSCALL */
#ifdef CONFIG_SH_FPU

View File

@ -39,7 +39,7 @@
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register

View File

@ -47,7 +47,7 @@ pc; })
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register

View File

@ -379,7 +379,8 @@
#define __NR_process_vm_readv 365
#define __NR_process_vm_writev 366
#define __NR_kcmp 367
#define __NR_finit_module 368
#define NR_syscalls 368
#define NR_syscalls 369
#endif /* __ASM_SH_UNISTD_32_H */

View File

@ -399,7 +399,8 @@
#define __NR_process_vm_readv 376
#define __NR_process_vm_writev 377
#define __NR_kcmp 378
#define __NR_finit_module 379
#define NR_syscalls 379
#define NR_syscalls 380
#endif /* __ASM_SH_UNISTD_64_H */

View File

@ -385,3 +385,4 @@ ENTRY(sys_call_table)
.long sys_process_vm_readv /* 365 */
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module

View File

@ -405,3 +405,4 @@ sys_call_table:
.long sys_process_vm_readv
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module

View File

@ -294,6 +294,8 @@ stack_panic:
.align 2
.L_init_thread_union:
.long init_thread_union
.L_ebss:
.long __bss_stop
.Lpanic:
.long panic
.Lpanic_s:

View File

@ -407,8 +407,9 @@
#define __NR_process_vm_writev 339
#define __NR_kern_features 340
#define __NR_kcmp 341
#define __NR_finit_module 342
#define NR_syscalls 342
#define NR_syscalls 343
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001

View File

@ -378,7 +378,8 @@ static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
* a proper 'ranges' property.
*/
static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
static void apb_fake_ranges(struct pci_dev *dev,
struct pci_bus *bus,
struct pci_pbm_info *pbm)
{
struct pci_bus_region region;
@ -403,13 +404,15 @@ static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
pcibios_bus_to_resource(dev, res, &region);
}
static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus);
#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
struct device_node *node, struct pci_dev *dev)
struct device_node *node,
struct pci_dev *dev)
{
struct pci_bus *bus;
const u32 *busrange, *ranges;
@ -500,7 +503,8 @@ after_ranges:
pci_of_scan_bus(pbm, node, bus);
}
static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus)
{
struct device_node *child;

View File

@ -366,7 +366,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
pci_config_write8(addr, 64);
}
static void psycho_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
static void psycho_scan_bus(struct pci_pbm_info *pbm,
struct device *parent)
{
pbm_config_busmastering(pbm);
pbm->is_66mhz_capable = 0;

View File

@ -442,7 +442,8 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
sabre_register_error_handlers(pbm);
}
static void sabre_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op)
static void sabre_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op)
{
psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE);
pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR;

View File

@ -1306,8 +1306,9 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
}
}
static int schizo_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op,
u32 portid, int chip_type)
static int schizo_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, u32 portid,
int chip_type)
{
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;

View File

@ -85,4 +85,4 @@ sys_call_table:
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module

View File

@ -86,7 +86,7 @@ sys_call_table32:
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module
#endif /* CONFIG_COMPAT */
@ -164,4 +164,4 @@ sys_call_table:
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module

View File

@ -302,7 +302,7 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
if (status != EFI_SUCCESS)
continue;
if (!attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)
if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM))
continue;
if (!pci->romimage || !pci->romsize)

View File

@ -43,6 +43,7 @@
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
#include <asm/kvm_guest.h>
#include <asm/context_tracking.h>
static int kvmapf = 1;
@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
struct kvm_task_sleep_node n, *e;
DEFINE_WAIT(wait);
rcu_irq_enter();
spin_lock(&b->lock);
e = _find_apf_task(b, token);
if (e) {
@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
hlist_del(&e->link);
kfree(e);
spin_unlock(&b->lock);
rcu_irq_exit();
return;
}
@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
/*
* We cannot reschedule. So halt.
*/
rcu_irq_exit();
native_safe_halt();
rcu_irq_enter();
local_irq_disable();
}
}
if (!n.halted)
finish_wait(&n.wq, &wait);
rcu_irq_exit();
return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
rcu_irq_enter();
exception_enter(regs);
exit_idle();
kvm_async_pf_task_wait((u32)read_cr2());
rcu_irq_exit();
exception_exit(regs);
break;
case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter();

View File

@ -610,6 +610,83 @@ static __init void reserve_ibft_region(void)
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
static bool __init snb_gfx_workaround_needed(void)
{
#ifdef CONFIG_PCI
int i;
u16 vendor, devid;
static const __initconst u16 snb_ids[] = {
0x0102,
0x0112,
0x0122,
0x0106,
0x0116,
0x0126,
0x010a,
};
/* Assume no if something weird is going on with PCI */
if (!early_pci_allowed())
return false;
vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
if (vendor != 0x8086)
return false;
devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
if (devid == snb_ids[i])
return true;
#endif
return false;
}
/*
* Sandy Bridge graphics has trouble with certain ranges, exclude
* them from allocation.
*/
static void __init trim_snb_memory(void)
{
static const __initconst unsigned long bad_pages[] = {
0x20050000,
0x20110000,
0x20130000,
0x20138000,
0x40004000,
};
int i;
if (!snb_gfx_workaround_needed())
return;
printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
/*
* Reserve all memory below the 1 MB mark that has not
* already been reserved.
*/
memblock_reserve(0, 1<<20);
for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
printk(KERN_WARNING "failed to reserve 0x%08lx\n",
bad_pages[i]);
}
}
/*
* Here we put platform-specific memory range workarounds, i.e.
* memory known to be corrupt or otherwise in need to be reserved on
* specific platforms.
*
* If this gets used more widely it could use a real dispatch mechanism.
*/
static void __init trim_platform_memory_ranges(void)
{
trim_snb_memory();
}
static void __init trim_bios_range(void)
{
/*
@ -630,6 +707,7 @@ static void __init trim_bios_range(void)
* take them out.
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
@ -908,6 +986,8 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode();
trim_platform_memory_ranges();
init_gbpages();
/* max_pfn_mapped is updated here */

View File

@ -120,7 +120,7 @@ struct kvm_shared_msrs {
};
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
static struct kvm_shared_msrs __percpu *shared_msrs;
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
static void shared_msr_update(unsigned slot, u32 msr)
{
struct kvm_shared_msrs *smsr;
u64 value;
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
smsr = &__get_cpu_var(shared_msrs);
/* only read, and nobody should modify it at this time,
* so don't need lock */
if (slot >= shared_msrs_global.nr) {
@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (((value ^ smsr->values[slot].curr) & mask) == 0)
return;
@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
static void drop_user_return_notifiers(void *ignore)
{
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (smsr->registered)
kvm_on_user_return(&smsr->urn);
@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
goto out;
}
r = -ENOMEM;
shared_msrs = alloc_percpu(struct kvm_shared_msrs);
if (!shared_msrs) {
printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
goto out;
}
r = kvm_mmu_module_init();
if (r)
goto out;
goto out_free_percpu;
kvm_set_mmio_spte_mask();
kvm_init_msr_list();
@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
return 0;
out_free_percpu:
free_percpu(shared_msrs);
out:
return r;
}
@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
#endif
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
free_percpu(shared_msrs);
}
int kvm_emulate_halt(struct kvm_vcpu *vcpu)

View File

@ -297,7 +297,7 @@ static int acpi_platform_notify(struct device *dev)
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer);
acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else

View File

@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
* by the cpu device.
*
* Never copy this way of doing things, or you too will be made fun of
* on the linux-kerenl list, you have been warned.
* on the linux-kernel list, you have been warned.
*/
}

View File

@ -305,7 +305,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
char *buf;
size = fw_file_size(file);
if (size < 0)
if (size <= 0)
return false;
buf = vmalloc(size);
if (!buf)

View File

@ -56,6 +56,19 @@ static const struct file_operations regmap_name_fops = {
.llseek = default_llseek,
};
static void regmap_debugfs_free_dump_cache(struct regmap *map)
{
struct regmap_debugfs_off_cache *c;
while (!list_empty(&map->debugfs_off_cache)) {
c = list_first_entry(&map->debugfs_off_cache,
struct regmap_debugfs_off_cache,
list);
list_del(&c->list);
kfree(c);
}
}
/*
* Work out where the start offset maps into register numbers, bearing
* in mind that we suppress hidden registers.
@ -91,8 +104,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
/* No cache entry? Start a new one */
if (!c) {
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
break;
if (!c) {
regmap_debugfs_free_dump_cache(map);
return base;
}
c->min = p;
c->base_reg = i;
}
@ -101,14 +116,34 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
}
}
/* Close the last entry off if we didn't scan beyond it */
if (c) {
c->max = p - 1;
list_add_tail(&c->list,
&map->debugfs_off_cache);
} else {
return base;
}
/*
* This should never happen; we return above if we fail to
* allocate and we should never be in this code if there are
* no registers at all.
*/
if (list_empty(&map->debugfs_off_cache)) {
WARN_ON(list_empty(&map->debugfs_off_cache));
return base;
}
/* Find the relevant block */
list_for_each_entry(c, &map->debugfs_off_cache, list) {
if (*pos >= c->min && *pos <= c->max) {
if (from >= c->min && from <= c->max) {
*pos = c->min;
return c->base_reg;
}
ret = c->max;
*pos = c->min;
ret = c->base_reg;
}
return ret;
@ -387,16 +422,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
struct regmap_debugfs_off_cache *c;
debugfs_remove_recursive(map->debugfs);
while (!list_empty(&map->debugfs_off_cache)) {
c = list_first_entry(&map->debugfs_off_cache,
struct regmap_debugfs_off_cache,
list);
list_del(&c->list);
kfree(c);
}
regmap_debugfs_free_dump_cache(map);
kfree(map->debugfs_name);
}

View File

@ -69,24 +69,15 @@ int cpuidle_play_dead(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int i, dead_state = -1;
int power_usage = INT_MAX;
int i;
if (!drv)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
if (s->power_usage < power_usage && s->enter_dead) {
power_usage = s->power_usage;
dead_state = i;
}
}
if (dead_state != -1)
return drv->states[dead_state].enter_dead(dev, dead_state);
for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
if (drv->states[i].enter_dead)
return drv->states[i].enter_dead(dev, i);
return -ENODEV;
}

View File

@ -19,34 +19,9 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
static void set_power_states(struct cpuidle_driver *drv)
{
int i;
/*
* cpuidle driver should set the drv->power_specified bit
* before registering if the driver provides
* power_usage numbers.
*
* If power_specified is not set,
* we fill in power_usage with decreasing values as the
* cpuidle code has an implicit assumption that state Cn
* uses less power than C(n-1).
*
* With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
* an power value of -1. So we use -2, -3, etc, for other
* c-states.
*/
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
drv->states[i].power_usage = -1 - i;
}
static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
drv->refcnt = 0;
if (!drv->power_specified)
set_power_states(drv);
}
static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)

View File

@ -312,7 +312,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int power_usage = INT_MAX;
int i;
int multiplier;
struct timespec t;
@ -383,11 +382,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->exit_latency * multiplier > data->predicted_us)
continue;
if (s->power_usage < power_usage) {
power_usage = s->power_usage;
data->last_state_idx = i;
data->exit_us = s->exit_latency;
}
data->last_state_idx = i;
data->exit_us = s->exit_latency;
}
/* not deepest C-state chosen for low predicted residency */

View File

@ -374,7 +374,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
for (i = 0; i < drv->state_count; i++) {
for (i = 0; i < device->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;

View File

@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(!hole_node->hole_follows || node->allocated);
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (adj_start < start)
adj_start = start;
if (adj_end > end)
adj_end = end;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
unsigned tmp = adj_start % alignment;
@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_hit_end = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_hit_end = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
unsigned long adj_start, adj_end;
mm->scanned_blocks++;
@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
adj_start = hole_start;
adj_end = hole_end;
if (mm->color_adjust)
mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
adj_start = hole_start = drm_mm_hole_node_start(prev_node);
adj_end = hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
if (adj_start < mm->scan_start)
@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
adj_end = mm->scan_end;
}
if (mm->color_adjust)
mm->color_adjust(prev_node, mm->scan_color,
&adj_start, &adj_end);
if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end;
mm->scan_hit_end = hole_end;
return 1;
}
@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
node_list);
prev_node->hole_follows = node->scanned_preceeds_hole;
INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
* stored. */
if (node->start >= mm->scan_hit_start &&
node->start + node->size
<= mm->scan_hit_start + mm->scan_hit_size) {
return 1;
}
return 0;
return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
node->start < mm->scan_hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

View File

@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
}
static long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
long count = 0;
@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
gtt_list) {
if (i915_gem_object_is_purgeable(obj) &&
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list,
mm_list) {
if (i915_gem_object_is_purgeable(obj) &&
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return count;
}
static long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
return __i915_gem_shrink(dev_priv, target, true);
}
static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
obj->user_pin_count++;
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
obj->user_pin_count++;
obj->pin_filp = file;
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
@ -4394,6 +4402,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}
@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;

View File

@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
{
int ret;
if (obj->tiling_mode == I915_TILING_Y)
if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
}
if (mode_cmd->pitches[0] & 63)
if (mode_cmd->pitches[0] & 63) {
DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
mode_cmd->pitches[0]);
return -EINVAL;
}
/* FIXME <= Gen4 stride limits are bit unclear */
if (mode_cmd->pitches[0] > 32768)
if (mode_cmd->pitches[0] > 32768) {
DRM_DEBUG("pitch (%d) must be at less than 32768\n",
mode_cmd->pitches[0]);
return -EINVAL;
}
if (obj->tiling_mode != I915_TILING_NONE &&
mode_cmd->pitches[0] != obj->stride)
mode_cmd->pitches[0] != obj->stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride);
return -EINVAL;
}
/* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
if (INTEL_INFO(dev)->gen > 3)
if (INTEL_INFO(dev)->gen > 3) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
if (INTEL_INFO(dev)->gen < 4)
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
if (INTEL_INFO(dev)->gen < 6)
if (INTEL_INFO(dev)->gen < 5) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
break;
default:
DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}

View File

@ -774,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "ZOTAC ZBOXSD-ID12/ID13",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD",

View File

@ -44,6 +44,14 @@
* i915.i915_enable_fbc parameter
*/
static bool intel_crtc_active(struct drm_crtc *crtc)
{
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*/
return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
}
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (to_intel_crtc(tmp_crtc)->active &&
!to_intel_crtc(tmp_crtc)->primary_disabled &&
tmp_crtc->fb) {
if (intel_crtc_active(tmp_crtc) &&
!to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (to_intel_crtc(crtc)->active && crtc->fb) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
enabled = crtc;
@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
if (!intel_crtc_active(crtc))
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (to_intel_crtc(crtc)->active && crtc->fb) {
if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (to_intel_crtc(crtc)->active && crtc->fb) {
if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}

View File

@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)

View File

@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
ret = nouveau_handle_create(nv_object(client), ~0, ~0,
nv_object(client), &client->root);
if (ret) {
nouveau_namedb_destroy(&client->base);
if (ret)
return ret;
}
/* prevent init/fini being called, os in in charge of this */
atomic_set(&nv_object(client)->usecount, 2);

View File

@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
namedb = namedb->parent;
handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
hprintk(handle, TRACE, "created\n");
*phandle = handle;
return 0;
}

View File

@ -851,20 +851,23 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
i += 3;
} else {
for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
ctrl = nv_rd32(priv, 0x610798 + (i * 8));
i += 3;
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
i += 4;
} else {
for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
ctrl = nv_rd32(priv, 0x610798 + (i * 8));
i += 4;
}
}
if (!(ctrl & (1 << head)))
return false;
i--;
data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
if (data) {
@ -898,20 +901,23 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
i += 3;
} else {
for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
ctrl = nv_rd32(priv, 0x610794 + (i * 8));
i += 3;
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
i += 4;
} else {
for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
ctrl = nv_rd32(priv, 0x610794 + (i * 8));
i += 4;
}
}
if (!(ctrl & (1 << head)))
return 0x0000;
i--;
data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
if (!data)

View File

@ -36,6 +36,9 @@ nouveau_client(void *obj)
int nouveau_client_create_(const char *name, u64 device, const char *cfg,
const char *dbg, int, void **);
#define nouveau_client_destroy(p) \
nouveau_namedb_destroy(&(p)->base)
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);

View File

@ -38,6 +38,8 @@ enum nvbios_pll_type {
PLL_UNK42 = 0x42,
PLL_VPLL0 = 0x80,
PLL_VPLL1 = 0x81,
PLL_VPLL2 = 0x82,
PLL_VPLL3 = 0x83,
PLL_MAX = 0xff
};

View File

@ -1534,7 +1534,6 @@ init_io(struct nvbios_init *init)
mdelay(10);
init_wr32(init, 0x614100, 0x10000018);
init_wr32(init, 0x614900, 0x10000018);
return;
}
value = init_rdport(init, port) & mask;

View File

@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
case PLL_VPLL2:
case PLL_VPLL3:
nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
nv_wr32(priv, info.reg + 0x10, fN << 16);

View File

@ -145,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mem->memtype = type;
mem->size = size;
mutex_lock(&mm->mutex);
mutex_lock(&pfb->base.mutex);
do {
if (back)
ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
else
ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
mutex_unlock(&pfb->base.mutex);
pfb->ram.put(pfb, &mem);
return ret;
}
@ -160,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
mutex_unlock(&pfb->base.mutex);
r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;

View File

@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
if (ret)
return ret;
mutex_lock(&imem->base.mutex);
list_add(&iobj->head, &imem->list);
mutex_unlock(&imem->base.mutex);
return 0;
}
void
nouveau_instobj_destroy(struct nouveau_instobj *iobj)
{
if (iobj->head.prev)
list_del(&iobj->head);
struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
mutex_lock(&subdev->mutex);
list_del(&iobj->head);
mutex_unlock(&subdev->mutex);
return nouveau_object_destroy(&iobj->base);
}
@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
if (ret)
return ret;
mutex_lock(&imem->base.mutex);
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
}
}
mutex_unlock(&imem->base.mutex);
return 0;
}
@ -104,17 +114,26 @@ int
nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
{
struct nouveau_instobj *iobj;
int i;
int i, ret = 0;
if (suspend) {
mutex_lock(&imem->base.mutex);
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
iobj->suspend[i / 4] = nv_ro32(iobj, i);
} else
return -ENOMEM;
if (!iobj->suspend) {
ret = -ENOMEM;
break;
}
for (i = 0; i < iobj->size; i += 4)
iobj->suspend[i / 4] = nv_ro32(iobj, i);
}
mutex_unlock(&imem->base.mutex);
if (ret)
return ret;
}
return nouveau_subdev_fini(&imem->base, suspend);

View File

@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
u64 mm_length = (offset + length) - mm_offset;
int ret;
vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
return ret;
}
*pvm = vm;
return 0;
}

View File

@ -127,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
int i;
struct nouveau_i2c_port *port = NULL;
int i, panel = -ENODEV;
/* eDP panels need powering on by us (if the VBIOS doesn't default it
* to on) before doing any AUX channel transactions. LVDS panel power
* is handled by the SOR itself, and not required for LVDS DDC.
*/
if (nv_connector->type == DCB_CONNECTOR_eDP) {
panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
if (panel == 0) {
gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
msleep(300);
}
}
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
struct nouveau_i2c_port *port = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@ -150,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
return port;
break;
}
port = NULL;
}
return NULL;
/* eDP panel not detected, restore panel power GPIO to previous
* state to avoid confusing the SOR for other output types.
*/
if (!port && panel == 0)
gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
return port;
}
static struct nouveau_encoder *

View File

@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
/* power on internal panel if it's not already. the init tables of
* some vbios default this to off for some reason, causing the
* panel to not work after resume
*/
if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
msleep(300);
}
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);

View File

@ -84,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
struct nouveau_cli *cli;
int ret;
*pcli = NULL;
ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
nouveau_debug, size, pcli);
cli = *pcli;
if (ret)
if (ret) {
if (cli)
nouveau_client_destroy(&cli->base);
*pcli = NULL;
return ret;
}
mutex_init(&cli->mutex);
return 0;

View File

@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);

View File

@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
static inline bool is_powersaving_dpms(int mode)
{
return (mode != DRM_MODE_DPMS_ON);
return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
}
static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)

View File

@ -162,6 +162,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
kfree(priv);
}
void nv17_fence_resume(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
nouveau_bo_wr32(priv->bo, 0, priv->sequence);
}
int
nv10_fence_create(struct nouveau_drm *drm)
{
@ -197,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
priv->base.resume = nv17_fence_resume;
}
}

View File

@ -122,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
priv->base.resume = nv17_fence_resume;
}
if (ret)

View File

@ -2476,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
kfree(parser->chunks[i].kpage[0]);
kfree(parser->chunks[i].kpage[1]);
if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
kfree(parser->chunks[i].kpage[0]);
kfree(parser->chunks[i].kpage[1]);
}
}
kfree(parser->chunks);
kfree(parser->chunks_array);
@ -2561,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
*cs_reloc = NULL;
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
*cs_reloc = NULL;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
idx = p->dma_reloc_idx;
if (idx >= relocs_chunk->length_dw) {
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw);
idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = p->relocs_ptr[idx];

View File

@ -279,13 +279,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
if ((p->rdev->flags & RADEON_IS_AGP)) {
if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
kfree(p->chunks[i].kpage[0]);
kfree(p->chunks[i].kpage[1]);
kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
return -ENOMEM;
}
}
@ -583,7 +583,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),

View File

@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
enum drm_connector_status found = connector_status_disconnected;
bool color = true;
/* just don't bother on RN50 those chip are often connected to remoting
* console hw and often we get failure to load detect those. So to make
* everyone happy report the encoder as always connected.
*/
if (ASIC_IS_RN50(rdev)) {
return connector_status_connected;
}
/* save the regs we need */
vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);

View File

@ -22,13 +22,17 @@
static u8 *udl_get_edid(struct udl_device *udl)
{
u8 *block;
char rbuf[3];
char *rbuf;
int ret, i;
block = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (block == NULL)
return NULL;
rbuf = kmalloc(2, GFP_KERNEL);
if (rbuf == NULL)
goto error;
for (i = 0; i < EDID_LENGTH; i++) {
ret = usb_control_msg(udl->ddev->usbdev,
usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
i--;
goto error;
}
block[i] = rbuf[1];
}
kfree(rbuf);
return block;
error:
kfree(block);
kfree(rbuf);
return NULL;
}
@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
edid = (struct edid *)udl_get_edid(udl);
/*
* We only read the main block, but if the monitor reports extension
* blocks then the drm edid code expects them to be present, so patch
* the extension count to 0.
*/
edid->checksum += edid->extensions;
edid->extensions = 0;
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);

View File

@ -19,6 +19,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/vexpress.h>

View File

@ -8,6 +8,7 @@ config HID_SENSOR_ACCEL_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Accelerometers 3D"
help
Say yes here to build support for the HID SENSOR

View File

@ -411,7 +411,11 @@ static int ad7266_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
st->vref_uv = regulator_get_voltage(st->reg);
ret = regulator_get_voltage(st->reg);
if (ret < 0)
goto error_disable_reg;
st->vref_uv = ret;
} else {
/* Use internal reference */
st->vref_uv = 2500000;

Some files were not shown because too many files have changed in this diff Show More