Merge branch 'core/urgent' into sched/core
Merge in asm goto fix, to be able to apply the asm/rmwcc.h fix. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
						commit
						ec0ad3d01f
					
				| @ -296,10 +296,15 @@ archprepare: | ||||
| # Convert bzImage to zImage
 | ||||
| bzImage: zImage | ||||
| 
 | ||||
| zImage Image xipImage bootpImage uImage: vmlinux | ||||
| BOOT_TARGETS	= zImage Image xipImage bootpImage uImage | ||||
| INSTALL_TARGETS	= zinstall uinstall install | ||||
| 
 | ||||
| PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) | ||||
| 
 | ||||
| $(BOOT_TARGETS): vmlinux | ||||
| 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ | ||||
| 
 | ||||
| zinstall uinstall install: vmlinux | ||||
| $(INSTALL_TARGETS): | ||||
| 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ | ||||
| 
 | ||||
| %.dtb: | scripts | ||||
|  | ||||
| @ -95,24 +95,24 @@ initrd: | ||||
| 	@test "$(INITRD)" != "" || \
 | ||||
| 	(echo You must specify INITRD; exit -1) | ||||
| 
 | ||||
| install: $(obj)/Image | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 | ||||
| install: | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
 | ||||
| 	$(obj)/Image System.map "$(INSTALL_PATH)" | ||||
| 
 | ||||
| zinstall: $(obj)/zImage | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 | ||||
| zinstall: | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
 | ||||
| 	$(obj)/zImage System.map "$(INSTALL_PATH)" | ||||
| 
 | ||||
| uinstall: $(obj)/uImage | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 | ||||
| uinstall: | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
 | ||||
| 	$(obj)/uImage System.map "$(INSTALL_PATH)" | ||||
| 
 | ||||
| zi: | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
 | ||||
| 	$(obj)/zImage System.map "$(INSTALL_PATH)" | ||||
| 
 | ||||
| i: | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 | ||||
| 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
 | ||||
| 	$(obj)/Image System.map "$(INSTALL_PATH)" | ||||
| 
 | ||||
| subdir-	    := bootp compressed dts | ||||
|  | ||||
| @ -20,6 +20,20 @@ | ||||
| #   $4 - default install path (blank if root directory) | ||||
| # | ||||
| 
 | ||||
| verify () { | ||||
| 	if [ ! -f "$1" ]; then | ||||
| 		echo ""                                                   1>&2 | ||||
| 		echo " *** Missing file: $1"                              1>&2 | ||||
| 		echo ' *** You need to run "make" before "make install".' 1>&2 | ||||
| 		echo ""                                                   1>&2 | ||||
| 		exit 1 | ||||
| 	fi | ||||
| } | ||||
| 
 | ||||
| # Make sure the files actually exist | ||||
| verify "$2" | ||||
| verify "$3" | ||||
| 
 | ||||
| # User may have a custom install script | ||||
| if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi | ||||
| if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi | ||||
|  | ||||
| @ -16,7 +16,7 @@ | ||||
| 
 | ||||
| static __always_inline bool arch_static_branch(struct static_key *key) | ||||
| { | ||||
| 	asm goto("1:\n\t" | ||||
| 	asm_volatile_goto("1:\n\t" | ||||
| 		 JUMP_LABEL_NOP "\n\t" | ||||
| 		 ".pushsection __jump_table,  \"aw\"\n\t" | ||||
| 		 ".word 1b, %l[l_yes], %c0\n\t" | ||||
|  | ||||
| @ -22,7 +22,7 @@ | ||||
| 
 | ||||
| static __always_inline bool arch_static_branch(struct static_key *key) | ||||
| { | ||||
| 	asm goto("1:\tnop\n\t" | ||||
| 	asm_volatile_goto("1:\tnop\n\t" | ||||
| 		"nop\n\t" | ||||
| 		".pushsection __jump_table,  \"aw\"\n\t" | ||||
| 		WORD_INSN " 1b, %l[l_yes], %0\n\t" | ||||
|  | ||||
| @ -19,7 +19,7 @@ | ||||
| 
 | ||||
| static __always_inline bool arch_static_branch(struct static_key *key) | ||||
| { | ||||
| 	asm goto("1:\n\t" | ||||
| 	asm_volatile_goto("1:\n\t" | ||||
| 		 "nop\n\t" | ||||
| 		 ".pushsection __jump_table,  \"aw\"\n\t" | ||||
| 		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" | ||||
|  | ||||
| @ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs) | ||||
| void do_IRQ(struct pt_regs *regs) | ||||
| { | ||||
| 	struct pt_regs *old_regs = set_irq_regs(regs); | ||||
| 	struct thread_info *curtp, *irqtp; | ||||
| 	struct thread_info *curtp, *irqtp, *sirqtp; | ||||
| 
 | ||||
| 	/* Switch to the irq stack to handle this */ | ||||
| 	curtp = current_thread_info(); | ||||
| 	irqtp = hardirq_ctx[raw_smp_processor_id()]; | ||||
| 	sirqtp = softirq_ctx[raw_smp_processor_id()]; | ||||
| 
 | ||||
| 	/* Already there ? */ | ||||
| 	if (unlikely(curtp == irqtp)) { | ||||
| 	if (unlikely(curtp == irqtp || curtp == sirqtp)) { | ||||
| 		__do_irq(regs); | ||||
| 		set_irq_regs(old_regs); | ||||
| 		return; | ||||
|  | ||||
| @ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||||
| BEGIN_FTR_SECTION | ||||
| 	mfspr	r8, SPRN_DSCR | ||||
| 	ld	r7, HSTATE_DSCR(r13) | ||||
| 	std	r8, VCPU_DSCR(r7) | ||||
| 	std	r8, VCPU_DSCR(r9) | ||||
| 	mtspr	SPRN_DSCR, r7 | ||||
| END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||||
| 
 | ||||
|  | ||||
| @ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||||
| 	unsigned long hva; | ||||
| 	int pfnmap = 0; | ||||
| 	int tsize = BOOK3E_PAGESZ_4K; | ||||
| 	int ret = 0; | ||||
| 	unsigned long mmu_seq; | ||||
| 	struct kvm *kvm = vcpu_e500->vcpu.kvm; | ||||
| 
 | ||||
| 	/* used to check for invalidations in progress */ | ||||
| 	mmu_seq = kvm->mmu_notifier_seq; | ||||
| 	smp_rmb(); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Translate guest physical to true physical, acquiring | ||||
| @ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||||
| 		gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | ||||
| 	} | ||||
| 
 | ||||
| 	spin_lock(&kvm->mmu_lock); | ||||
| 	if (mmu_notifier_retry(kvm, mmu_seq)) { | ||||
| 		ret = -EAGAIN; | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	kvmppc_e500_ref_setup(ref, gtlbe, pfn); | ||||
| 
 | ||||
| 	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, | ||||
| @ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||||
| 	/* Clear i-cache for new pages */ | ||||
| 	kvmppc_mmu_flush_icache(pfn); | ||||
| 
 | ||||
| out: | ||||
| 	spin_unlock(&kvm->mmu_lock); | ||||
| 
 | ||||
| 	/* Drop refcount on page, so that mmu notifiers can clear it */ | ||||
| 	kvm_release_pfn_clean(pfn); | ||||
| 
 | ||||
| 	return 0; | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /* XXX only map the one-one case, for now use TLB0 */ | ||||
|  | ||||
| @ -15,7 +15,7 @@ | ||||
| 
 | ||||
| static __always_inline bool arch_static_branch(struct static_key *key) | ||||
| { | ||||
| 	asm goto("0:	brcl 0,0\n" | ||||
| 	asm_volatile_goto("0:	brcl 0,0\n" | ||||
| 		".pushsection __jump_table, \"aw\"\n" | ||||
| 		ASM_ALIGN "\n" | ||||
| 		ASM_PTR " 0b, %l[label], %0\n" | ||||
|  | ||||
| @ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr) | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Copy up to one page to vmalloc or real memory | ||||
|  * Copy real to virtual or real memory | ||||
|  */ | ||||
| static ssize_t copy_page_real(void *buf, void *src, size_t csize) | ||||
| static int copy_from_realmem(void *dest, void *src, size_t count) | ||||
| { | ||||
| 	size_t size; | ||||
| 	unsigned long size; | ||||
| 	int rc; | ||||
| 
 | ||||
| 	if (is_vmalloc_addr(buf)) { | ||||
| 		BUG_ON(csize >= PAGE_SIZE); | ||||
| 		/* If buf is not page aligned, copy first part */ | ||||
| 		size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize); | ||||
| 		if (size) { | ||||
| 			if (memcpy_real(load_real_addr(buf), src, size)) | ||||
| 				return -EFAULT; | ||||
| 			buf += size; | ||||
| 			src += size; | ||||
| 		} | ||||
| 		/* Copy second part */ | ||||
| 		size = csize - size; | ||||
| 		return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0; | ||||
| 	} else { | ||||
| 		return memcpy_real(buf, src, csize); | ||||
| 	} | ||||
| 	if (!count) | ||||
| 		return 0; | ||||
| 	if (!is_vmalloc_or_module_addr(dest)) | ||||
| 		return memcpy_real(dest, src, count); | ||||
| 	do { | ||||
| 		size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK)); | ||||
| 		if (memcpy_real(load_real_addr(dest), src, size)) | ||||
| 			return -EFAULT; | ||||
| 		count -= size; | ||||
| 		dest += size; | ||||
| 		src += size; | ||||
| 	} while (count); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize, | ||||
| 		rc = copy_to_user_real((void __force __user *) buf, | ||||
| 				       (void *) src, csize); | ||||
| 	else | ||||
| 		rc = copy_page_real(buf, (void *) src, csize); | ||||
| 		rc = copy_from_realmem(buf, (void *) src, csize); | ||||
| 	return (rc == 0) ? rc : csize; | ||||
| } | ||||
| 
 | ||||
| @ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count) | ||||
| 	if (OLDMEM_BASE) { | ||||
| 		if ((unsigned long) src < OLDMEM_SIZE) { | ||||
| 			copied = min(count, OLDMEM_SIZE - (unsigned long) src); | ||||
| 			rc = memcpy_real(dest, src + OLDMEM_BASE, copied); | ||||
| 			rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied); | ||||
| 			if (rc) | ||||
| 				return rc; | ||||
| 		} | ||||
| @ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count) | ||||
| 				return rc; | ||||
| 		} | ||||
| 	} | ||||
| 	return memcpy_real(dest + copied, src + copied, count - copied); | ||||
| 	return copy_from_realmem(dest + copied, src + copied, count - copied); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -266,6 +266,7 @@ sysc_sigpending: | ||||
| 	tm	__TI_flags+3(%r12),_TIF_SYSCALL | ||||
| 	jno	sysc_return | ||||
| 	lm	%r2,%r7,__PT_R2(%r11)	# load svc arguments | ||||
| 	l	%r10,__TI_sysc_table(%r12)	# 31 bit system call table | ||||
| 	xr	%r8,%r8			# svc 0 returns -ENOSYS | ||||
| 	clc	__PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) | ||||
| 	jnl	sysc_nr_ok		# invalid svc number -> do svc 0 | ||||
|  | ||||
| @ -297,6 +297,7 @@ sysc_sigpending: | ||||
| 	tm	__TI_flags+7(%r12),_TIF_SYSCALL | ||||
| 	jno	sysc_return | ||||
| 	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments | ||||
| 	lg	%r10,__TI_sysc_table(%r12)	# address of system call table | ||||
| 	lghi	%r8,0			# svc 0 returns -ENOSYS | ||||
| 	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number | ||||
| 	cghi	%r1,NR_syscalls | ||||
|  | ||||
| @ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) | ||||
| 	case 0xac:	/* stnsm */ | ||||
| 	case 0xad:	/* stosm */ | ||||
| 		return -EINVAL; | ||||
| 	case 0xc6: | ||||
| 		switch (insn[0] & 0x0f) { | ||||
| 		case 0x00: /* exrl   */ | ||||
| 			return -EINVAL; | ||||
| 		} | ||||
| 	} | ||||
| 	switch (insn[0]) { | ||||
| 	case 0x0101:	/* pr	 */ | ||||
| @ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn) | ||||
| 		break; | ||||
| 	case 0xc6: | ||||
| 		switch (insn[0] & 0x0f) { | ||||
| 		case 0x00: /* exrl   */ | ||||
| 		case 0x02: /* pfdrl  */ | ||||
| 		case 0x04: /* cghrl  */ | ||||
| 		case 0x05: /* chrl   */ | ||||
|  | ||||
| @ -9,7 +9,7 @@ | ||||
| 
 | ||||
| static __always_inline bool arch_static_branch(struct static_key *key) | ||||
| { | ||||
| 		asm goto("1:\n\t" | ||||
| 		asm_volatile_goto("1:\n\t" | ||||
| 			 "nop\n\t" | ||||
| 			 "nop\n\t" | ||||
| 			 ".pushsection __jump_table,  \"aw\"\n\t" | ||||
|  | ||||
| @ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | ||||
|  * | ||||
|  * Atomically sets @v to @i and returns old @v | ||||
|  */ | ||||
| static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | ||||
| static inline long long atomic64_xchg(atomic64_t *v, long long n) | ||||
| { | ||||
| 	return xchg64(&v->counter, n); | ||||
| } | ||||
| @ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | ||||
|  * Atomically checks if @v holds @o and replaces it with @n if so. | ||||
|  * Returns the old value at @v. | ||||
|  */ | ||||
| static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | ||||
| static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, | ||||
| 					long long n) | ||||
| { | ||||
| 	return cmpxchg64(&v->counter, o, n); | ||||
| } | ||||
|  | ||||
| @ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n) | ||||
| /* A 64bit atomic type */ | ||||
| 
 | ||||
| typedef struct { | ||||
| 	u64 __aligned(8) counter; | ||||
| 	long long counter; | ||||
| } atomic64_t; | ||||
| 
 | ||||
| #define ATOMIC64_INIT(val) { (val) } | ||||
| @ -91,14 +91,14 @@ typedef struct { | ||||
|  * | ||||
|  * Atomically reads the value of @v. | ||||
|  */ | ||||
| static inline u64 atomic64_read(const atomic64_t *v) | ||||
| static inline long long atomic64_read(const atomic64_t *v) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * Requires an atomic op to read both 32-bit parts consistently. | ||||
| 	 * Casting away const is safe since the atomic support routines | ||||
| 	 * do not write to memory if the value has not been modified. | ||||
| 	 */ | ||||
| 	return _atomic64_xchg_add((u64 *)&v->counter, 0); | ||||
| 	return _atomic64_xchg_add((long long *)&v->counter, 0); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
| @ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v) | ||||
|  * | ||||
|  * Atomically adds @i to @v. | ||||
|  */ | ||||
| static inline void atomic64_add(u64 i, atomic64_t *v) | ||||
| static inline void atomic64_add(long long i, atomic64_t *v) | ||||
| { | ||||
| 	_atomic64_xchg_add(&v->counter, i); | ||||
| } | ||||
| @ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | ||||
|  * | ||||
|  * Atomically adds @i to @v and returns @i + @v | ||||
|  */ | ||||
| static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | ||||
| static inline long long atomic64_add_return(long long i, atomic64_t *v) | ||||
| { | ||||
| 	smp_mb();  /* barrier for proper semantics */ | ||||
| 	return _atomic64_xchg_add(&v->counter, i) + i; | ||||
| @ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | ||||
|  * Atomically adds @a to @v, so long as @v was not already @u. | ||||
|  * Returns non-zero if @v was not @u, and zero otherwise. | ||||
|  */ | ||||
| static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | ||||
| static inline long long atomic64_add_unless(atomic64_t *v, long long a, | ||||
| 					long long u) | ||||
| { | ||||
| 	smp_mb();  /* barrier for proper semantics */ | ||||
| 	return _atomic64_xchg_add_unless(&v->counter, a, u) != u; | ||||
| @ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | ||||
|  * atomic64_set() can't be just a raw store, since it would be lost if it | ||||
|  * fell between the load and store of one of the other atomic ops. | ||||
|  */ | ||||
| static inline void atomic64_set(atomic64_t *v, u64 n) | ||||
| static inline void atomic64_set(atomic64_t *v, long long n) | ||||
| { | ||||
| 	_atomic64_xchg(&v->counter, n); | ||||
| } | ||||
| @ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | ||||
| extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | ||||
| extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | ||||
| extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | ||||
| extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | ||||
| extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | ||||
| extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | ||||
| extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | ||||
| 				      int *lock, u64 o, u64 n); | ||||
| extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, | ||||
| 					long long o, long long n); | ||||
| extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); | ||||
| extern long long __atomic64_xchg_add(volatile long long *p, int *lock, | ||||
| 					long long n); | ||||
| extern long long __atomic64_xchg_add_unless(volatile long long *p, | ||||
| 					int *lock, long long o, long long n); | ||||
| 
 | ||||
| /* Return failure from the atomic wrappers. */ | ||||
| struct __get_user __atomic_bad_address(int __user *addr); | ||||
|  | ||||
| @ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n); | ||||
| int _atomic_xchg_add(int *v, int i); | ||||
| int _atomic_xchg_add_unless(int *v, int a, int u); | ||||
| int _atomic_cmpxchg(int *ptr, int o, int n); | ||||
| u64 _atomic64_xchg(u64 *v, u64 n); | ||||
| u64 _atomic64_xchg_add(u64 *v, u64 i); | ||||
| u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); | ||||
| u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | ||||
| long long _atomic64_xchg(long long *v, long long n); | ||||
| long long _atomic64_xchg_add(long long *v, long long i); | ||||
| long long _atomic64_xchg_add_unless(long long *v, long long a, long long u); | ||||
| long long _atomic64_cmpxchg(long long *v, long long o, long long n); | ||||
| 
 | ||||
| #define xchg(ptr, n)							\ | ||||
| 	({								\ | ||||
| @ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | ||||
| 		if (sizeof(*(ptr)) != 4)				\ | ||||
| 			__cmpxchg_called_with_bad_pointer();		\ | ||||
| 		smp_mb();						\ | ||||
| 		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ | ||||
| 		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o,	\ | ||||
| 						(int)n);		\ | ||||
| 	}) | ||||
| 
 | ||||
| #define xchg64(ptr, n)							\ | ||||
| @ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | ||||
| 		if (sizeof(*(ptr)) != 8)				\ | ||||
| 			__xchg_called_with_bad_pointer();		\ | ||||
| 		smp_mb();						\ | ||||
| 		(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n));	\ | ||||
| 		(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr),	\ | ||||
| 						(long long)(n));	\ | ||||
| 	}) | ||||
| 
 | ||||
| #define cmpxchg64(ptr, o, n)						\ | ||||
| @ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | ||||
| 		if (sizeof(*(ptr)) != 8)				\ | ||||
| 			__cmpxchg_called_with_bad_pointer();		\ | ||||
| 		smp_mb();						\ | ||||
| 		(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ | ||||
| 		(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr,	\ | ||||
| 					(long long)o, (long long)n);	\ | ||||
| 	}) | ||||
| 
 | ||||
| #else | ||||
| @ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | ||||
| 		switch (sizeof(*(ptr))) {				\ | ||||
| 		case 4:							\ | ||||
| 			__x = (typeof(__x))(unsigned long)		\ | ||||
| 				__insn_exch4((ptr), (u32)(unsigned long)(n)); \ | ||||
| 				__insn_exch4((ptr),			\ | ||||
| 					(u32)(unsigned long)(n));	\ | ||||
| 			break;						\ | ||||
| 		case 8:							\ | ||||
| 			__x = (typeof(__x))			\ | ||||
| 			__x = (typeof(__x))				\ | ||||
| 				__insn_exch((ptr), (unsigned long)(n));	\ | ||||
| 			break;						\ | ||||
| 		default:						\ | ||||
| @ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | ||||
| 		switch (sizeof(*(ptr))) {				\ | ||||
| 		case 4:							\ | ||||
| 			__x = (typeof(__x))(unsigned long)		\ | ||||
| 				__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ | ||||
| 				__insn_cmpexch4((ptr),			\ | ||||
| 					(u32)(unsigned long)(n));	\ | ||||
| 			break;						\ | ||||
| 		case 8:							\ | ||||
| 			__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ | ||||
| 			__x = (typeof(__x))__insn_cmpexch((ptr),	\ | ||||
| 						(long long)(n));	\ | ||||
| 			break;						\ | ||||
| 		default:						\ | ||||
| 			__cmpxchg_called_with_bad_pointer();		\ | ||||
|  | ||||
| @ -15,9 +15,37 @@ | ||||
| #ifndef _ASM_TILE_PERCPU_H | ||||
| #define _ASM_TILE_PERCPU_H | ||||
| 
 | ||||
| register unsigned long __my_cpu_offset __asm__("tp"); | ||||
| #define __my_cpu_offset __my_cpu_offset | ||||
| #define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) | ||||
| register unsigned long my_cpu_offset_reg asm("tp"); | ||||
| 
 | ||||
| #ifdef CONFIG_PREEMPT | ||||
| /*
 | ||||
|  * For full preemption, we can't just use the register variable | ||||
|  * directly, since we need barrier() to hazard against it, causing the | ||||
|  * compiler to reload anything computed from a previous "tp" value. | ||||
|  * But we also don't want to use volatile asm, since we'd like the | ||||
|  * compiler to be able to cache the value across multiple percpu reads. | ||||
|  * So we use a fake stack read as a hazard against barrier(). | ||||
|  * The 'U' constraint is like 'm' but disallows postincrement. | ||||
|  */ | ||||
| static inline unsigned long __my_cpu_offset(void) | ||||
| { | ||||
| 	unsigned long tp; | ||||
| 	register unsigned long *sp asm("sp"); | ||||
| 	asm("move %0, tp" : "=r" (tp) : "U" (*sp)); | ||||
| 	return tp; | ||||
| } | ||||
| #define __my_cpu_offset __my_cpu_offset() | ||||
| #else | ||||
| /*
 | ||||
|  * We don't need to hazard against barrier() since "tp" doesn't ever | ||||
|  * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only | ||||
|  * changes at function call points, at which we are already re-reading | ||||
|  * the value of "tp" due to "my_cpu_offset_reg" being a global variable. | ||||
|  */ | ||||
| #define __my_cpu_offset my_cpu_offset_reg | ||||
| #endif | ||||
| 
 | ||||
| #define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp)) | ||||
| 
 | ||||
| #include <asm-generic/percpu.h> | ||||
| 
 | ||||
|  | ||||
| @ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = { | ||||
| 		0, | ||||
| 		"udn", | ||||
| 		LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), | ||||
| 		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), | ||||
| 		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock), | ||||
| 		NULL | ||||
| 	}, | ||||
| #ifndef __tilepro__ | ||||
| @ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = { | ||||
| 		1,  /* disabled pending hypervisor support */ | ||||
| 		"idn", | ||||
| 		LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), | ||||
| 		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), | ||||
| 		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock), | ||||
| 		NULL | ||||
| 	}, | ||||
| 	{  /* access to user-space IPI */ | ||||
| @ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = { | ||||
| 		0, | ||||
| 		"ipi", | ||||
| 		LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), | ||||
| 		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), | ||||
| 		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock), | ||||
| 		NULL | ||||
| 	}, | ||||
| #endif | ||||
|  | ||||
| @ -815,6 +815,9 @@ STD_ENTRY(interrupt_return) | ||||
| 	} | ||||
| 	bzt     r28, 1f | ||||
| 	bnz     r29, 1f | ||||
| 	/* Disable interrupts explicitly for preemption. */ | ||||
| 	IRQ_DISABLE(r20,r21) | ||||
| 	TRACE_IRQS_OFF | ||||
| 	jal     preempt_schedule_irq | ||||
| 	FEEDBACK_REENTER(interrupt_return) | ||||
| 1: | ||||
|  | ||||
| @ -841,6 +841,9 @@ STD_ENTRY(interrupt_return) | ||||
| 	} | ||||
| 	beqzt   r28, 1f | ||||
| 	bnez    r29, 1f | ||||
| 	/* Disable interrupts explicitly for preemption. */ | ||||
| 	IRQ_DISABLE(r20,r21) | ||||
| 	TRACE_IRQS_OFF | ||||
| 	jal     preempt_schedule_irq | ||||
| 	FEEDBACK_REENTER(interrupt_return) | ||||
| 1: | ||||
|  | ||||
| @ -23,6 +23,7 @@ | ||||
| #include <linux/mmzone.h> | ||||
| #include <linux/dcache.h> | ||||
| #include <linux/fs.h> | ||||
| #include <linux/string.h> | ||||
| #include <asm/backtrace.h> | ||||
| #include <asm/page.h> | ||||
| #include <asm/ucontext.h> | ||||
| @ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt, | ||||
| 	} | ||||
| 
 | ||||
| 	if (vma->vm_file) { | ||||
| 		char *s; | ||||
| 		p = d_path(&vma->vm_file->f_path, buf, bufsize); | ||||
| 		if (IS_ERR(p)) | ||||
| 			p = "?"; | ||||
| 		s = strrchr(p, '/'); | ||||
| 		if (s) | ||||
| 			p = s+1; | ||||
| 		name = kbasename(p); | ||||
| 	} else { | ||||
| 		p = "anon"; | ||||
| 		name = "anon"; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Generate a string description of the vma info. */ | ||||
| 	namelen = strlen(p); | ||||
| 	namelen = strlen(name); | ||||
| 	remaining = (bufsize - 1) - namelen; | ||||
| 	memmove(buf, p, namelen); | ||||
| 	memmove(buf, name, namelen); | ||||
| 	snprintf(buf + namelen, remaining, "[%lx+%lx] ", | ||||
| 		 vma->vm_start, vma->vm_end - vma->vm_start); | ||||
| } | ||||
|  | ||||
| @ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) | ||||
| EXPORT_SYMBOL(_atomic_xor); | ||||
| 
 | ||||
| 
 | ||||
| u64 _atomic64_xchg(u64 *v, u64 n) | ||||
| long long _atomic64_xchg(long long *v, long long n) | ||||
| { | ||||
| 	return __atomic64_xchg(v, __atomic_setup(v), n); | ||||
| } | ||||
| EXPORT_SYMBOL(_atomic64_xchg); | ||||
| 
 | ||||
| u64 _atomic64_xchg_add(u64 *v, u64 i) | ||||
| long long _atomic64_xchg_add(long long *v, long long i) | ||||
| { | ||||
| 	return __atomic64_xchg_add(v, __atomic_setup(v), i); | ||||
| } | ||||
| EXPORT_SYMBOL(_atomic64_xchg_add); | ||||
| 
 | ||||
| u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) | ||||
| long long _atomic64_xchg_add_unless(long long *v, long long a, long long u) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * Note: argument order is switched here since it is easier | ||||
| @ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) | ||||
| } | ||||
| EXPORT_SYMBOL(_atomic64_xchg_add_unless); | ||||
| 
 | ||||
| u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) | ||||
| long long _atomic64_cmpxchg(long long *v, long long o, long long n) | ||||
| { | ||||
| 	return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); | ||||
| } | ||||
|  | ||||
| @ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) | ||||
| 		 * Catch too early usage of this before alternatives | ||||
| 		 * have run. | ||||
| 		 */ | ||||
| 		asm goto("1: jmp %l[t_warn]\n" | ||||
| 		asm_volatile_goto("1: jmp %l[t_warn]\n" | ||||
| 			 "2:\n" | ||||
| 			 ".section .altinstructions,\"a\"\n" | ||||
| 			 " .long 1b - .\n" | ||||
| @ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| 		asm goto("1: jmp %l[t_no]\n" | ||||
| 		asm_volatile_goto("1: jmp %l[t_no]\n" | ||||
| 			 "2:\n" | ||||
| 			 ".section .altinstructions,\"a\"\n" | ||||
| 			 " .long 1b - .\n" | ||||
| @ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) | ||||
|  * have. Thus, we force the jump to the widest, 4-byte, signed relative | ||||
|  * offset even though the last would often fit in less bytes. | ||||
|  */ | ||||
| 		asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" | ||||
| 		asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" | ||||
| 			 "2:\n" | ||||
| 			 ".section .altinstructions,\"a\"\n" | ||||
| 			 " .long 1b - .\n"		/* src offset */ | ||||
|  | ||||
| @ -18,7 +18,7 @@ | ||||
| 
 | ||||
| static __always_inline bool arch_static_branch(struct static_key *key) | ||||
| { | ||||
| 	asm goto("1:" | ||||
| 	asm_volatile_goto("1:" | ||||
| 		".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" | ||||
| 		".pushsection __jump_table,  \"aw\" \n\t" | ||||
| 		_ASM_ALIGN "\n\t" | ||||
|  | ||||
| @ -20,7 +20,7 @@ | ||||
| static inline void __mutex_fastpath_lock(atomic_t *v, | ||||
| 					 void (*fail_fn)(atomic_t *)) | ||||
| { | ||||
| 	asm volatile goto(LOCK_PREFIX "   decl %0\n" | ||||
| 	asm_volatile_goto(LOCK_PREFIX "   decl %0\n" | ||||
| 			  "   jns %l[exit]\n" | ||||
| 			  : : "m" (v->counter) | ||||
| 			  : "memory", "cc" | ||||
| @ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count) | ||||
| static inline void __mutex_fastpath_unlock(atomic_t *v, | ||||
| 					   void (*fail_fn)(atomic_t *)) | ||||
| { | ||||
| 	asm volatile goto(LOCK_PREFIX "   incl %0\n" | ||||
| 	asm_volatile_goto(LOCK_PREFIX "   incl %0\n" | ||||
| 			  "   jg %l[exit]\n" | ||||
| 			  : : "m" (v->counter) | ||||
| 			  : "memory", "cc" | ||||
|  | ||||
| @ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | ||||
| 	userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; | ||||
| 	userpg->pmc_width = x86_pmu.cntval_bits; | ||||
| 
 | ||||
| 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||||
| 	if (!sched_clock_stable) | ||||
| 		return; | ||||
| 
 | ||||
| 	userpg->cap_user_time = 1; | ||||
| @ -1899,10 +1896,8 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | ||||
| 	userpg->time_shift = CYC2NS_SCALE_FACTOR; | ||||
| 	userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; | ||||
| 
 | ||||
| 	if (sched_clock_stable && !check_tsc_disabled()) { | ||||
| 		userpg->cap_user_time_zero = 1; | ||||
| 		userpg->time_zero = this_cpu_read(cyc2ns_offset); | ||||
| 	} | ||||
| 	userpg->cap_user_time_zero = 1; | ||||
| 	userpg->time_zero = this_cpu_read(cyc2ns_offset); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | ||||
| 
 | ||||
| static void ept_load_pdptrs(struct kvm_vcpu *vcpu) | ||||
| { | ||||
| 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu; | ||||
| 
 | ||||
| 	if (!test_bit(VCPU_EXREG_PDPTR, | ||||
| 		      (unsigned long *)&vcpu->arch.regs_dirty)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | ||||
| 		vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); | ||||
| 		vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); | ||||
| 		vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); | ||||
| 		vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); | ||||
| 		vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); | ||||
| 		vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); | ||||
| 		vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); | ||||
| 		vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void ept_save_pdptrs(struct kvm_vcpu *vcpu) | ||||
| { | ||||
| 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu; | ||||
| 
 | ||||
| 	if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | ||||
| 		vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); | ||||
| 		vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); | ||||
| 		vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); | ||||
| 		vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); | ||||
| 		mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); | ||||
| 		mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); | ||||
| 		mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); | ||||
| 		mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); | ||||
| 	} | ||||
| 
 | ||||
| 	__set_bit(VCPU_EXREG_PDPTR, | ||||
| @ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | ||||
| 		vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); | ||||
| 		vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); | ||||
| 		vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); | ||||
| 		__clear_bit(VCPU_EXREG_PDPTR, | ||||
| 				(unsigned long *)&vcpu->arch.regs_avail); | ||||
| 		__clear_bit(VCPU_EXREG_PDPTR, | ||||
| 				(unsigned long *)&vcpu->arch.regs_dirty); | ||||
| 	} | ||||
| 
 | ||||
| 	kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); | ||||
|  | ||||
| @ -640,7 +640,7 @@ struct timer_rand_state { | ||||
|  */ | ||||
| void add_device_randomness(const void *buf, unsigned int size) | ||||
| { | ||||
| 	unsigned long time = get_cycles() ^ jiffies; | ||||
| 	unsigned long time = random_get_entropy() ^ jiffies; | ||||
| 
 | ||||
| 	mix_pool_bytes(&input_pool, buf, size, NULL); | ||||
| 	mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); | ||||
| @ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	sample.jiffies = jiffies; | ||||
| 	sample.cycles = get_cycles(); | ||||
| 	sample.cycles = random_get_entropy(); | ||||
| 	sample.num = num; | ||||
| 	mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); | ||||
| 
 | ||||
| @ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags) | ||||
| 	struct fast_pool	*fast_pool = &__get_cpu_var(irq_randomness); | ||||
| 	struct pt_regs		*regs = get_irq_regs(); | ||||
| 	unsigned long		now = jiffies; | ||||
| 	__u32			input[4], cycles = get_cycles(); | ||||
| 	__u32			input[4], cycles = random_get_entropy(); | ||||
| 
 | ||||
| 	input[0] = cycles ^ jiffies; | ||||
| 	input[1] = irq; | ||||
| @ -1459,12 +1459,11 @@ struct ctl_table random_table[] = { | ||||
| 
 | ||||
| static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; | ||||
| 
 | ||||
| static int __init random_int_secret_init(void) | ||||
| int random_int_secret_init(void) | ||||
| { | ||||
| 	get_random_bytes(random_int_secret, sizeof(random_int_secret)); | ||||
| 	return 0; | ||||
| } | ||||
| late_initcall(random_int_secret_init); | ||||
| 
 | ||||
| /*
 | ||||
|  * Get a random word for internal kernel use only. Similar to urandom but | ||||
| @ -1483,7 +1482,7 @@ unsigned int get_random_int(void) | ||||
| 
 | ||||
| 	hash = get_cpu_var(get_random_int_hash); | ||||
| 
 | ||||
| 	hash[0] += current->pid + jiffies + get_cycles(); | ||||
| 	hash[0] += current->pid + jiffies + random_get_entropy(); | ||||
| 	md5_transform(hash, random_int_secret); | ||||
| 	ret = hash[0]; | ||||
| 	put_cpu_var(get_random_int_hash); | ||||
|  | ||||
| @ -241,6 +241,7 @@ config HID_HOLTEK | ||||
| 	  - Sharkoon Drakonia / Perixx MX-2000 gaming mice | ||||
| 	  - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / | ||||
| 	    Zalman ZM-GM1 | ||||
| 	  - SHARKOON DarkGlider Gaming mouse | ||||
| 
 | ||||
| config HOLTEK_FF | ||||
| 	bool "Holtek On Line Grip force feedback support" | ||||
|  | ||||
| @ -1715,6 +1715,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, | ||||
| 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, | ||||
|  | ||||
| @ -27,6 +27,7 @@ | ||||
|  * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 | ||||
|  * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 | ||||
|  *   and Zalman ZM-GM1 | ||||
|  * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse | ||||
|  */ | ||||
| 
 | ||||
| static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, | ||||
| @ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, | ||||
| 			} | ||||
| 			break; | ||||
| 		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: | ||||
| 		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081: | ||||
| 			if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f | ||||
| 					&& rdesc[111] == 0xff && rdesc[112] == 0x7f) { | ||||
| 				hid_info(hdev, "Fixing up report descriptor\n"); | ||||
| @ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = { | ||||
| 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, | ||||
| 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, | ||||
| 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, | ||||
| 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, | ||||
| 	{ } | ||||
| }; | ||||
| MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); | ||||
|  | ||||
| @ -450,6 +450,7 @@ | ||||
| #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD	0xa055 | ||||
| #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067	0xa067 | ||||
| #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A	0xa04a | ||||
| #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081	0xa081 | ||||
| 
 | ||||
| #define USB_VENDOR_ID_IMATION		0x0718 | ||||
| #define USB_DEVICE_ID_DISC_STAKKA	0xd000 | ||||
|  | ||||
| @ -382,7 +382,7 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp, | ||||
| } | ||||
| #define PROFILE_ATTR(number)					\ | ||||
| static struct bin_attribute bin_attr_profile##number = {	\ | ||||
| 	.attr = { .name = "profile##number", .mode = 0660 },	\ | ||||
| 	.attr = { .name = "profile" #number, .mode = 0660 },	\ | ||||
| 	.size = sizeof(struct kone_profile),			\ | ||||
| 	.read = kone_sysfs_read_profilex,			\ | ||||
| 	.write = kone_sysfs_write_profilex,			\ | ||||
|  | ||||
| @ -229,13 +229,13 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp, | ||||
| 
 | ||||
| #define PROFILE_ATTR(number)						\ | ||||
| static struct bin_attribute bin_attr_profile##number##_settings = {	\ | ||||
| 	.attr = { .name = "profile##number##_settings", .mode = 0440 },	\ | ||||
| 	.attr = { .name = "profile" #number "_settings", .mode = 0440 },	\ | ||||
| 	.size = KONEPLUS_SIZE_PROFILE_SETTINGS,				\ | ||||
| 	.read = koneplus_sysfs_read_profilex_settings,			\ | ||||
| 	.private = &profile_numbers[number-1],				\ | ||||
| };									\ | ||||
| static struct bin_attribute bin_attr_profile##number##_buttons = {	\ | ||||
| 	.attr = { .name = "profile##number##_buttons", .mode = 0440 },	\ | ||||
| 	.attr = { .name = "profile" #number "_buttons", .mode = 0440 },	\ | ||||
| 	.size = KONEPLUS_SIZE_PROFILE_BUTTONS,				\ | ||||
| 	.read = koneplus_sysfs_read_profilex_buttons,			\ | ||||
| 	.private = &profile_numbers[number-1],				\ | ||||
|  | ||||
| @ -257,13 +257,13 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp, | ||||
| 
 | ||||
| #define PROFILE_ATTR(number)						\ | ||||
| static struct bin_attribute bin_attr_profile##number##_settings = {	\ | ||||
| 	.attr = { .name = "profile##number##_settings", .mode = 0440 },	\ | ||||
| 	.attr = { .name = "profile" #number "_settings", .mode = 0440 },	\ | ||||
| 	.size = KOVAPLUS_SIZE_PROFILE_SETTINGS,				\ | ||||
| 	.read = kovaplus_sysfs_read_profilex_settings,			\ | ||||
| 	.private = &profile_numbers[number-1],				\ | ||||
| };									\ | ||||
| static struct bin_attribute bin_attr_profile##number##_buttons = {	\ | ||||
| 	.attr = { .name = "profile##number##_buttons", .mode = 0440 },	\ | ||||
| 	.attr = { .name = "profile" #number "_buttons", .mode = 0440 },	\ | ||||
| 	.size = KOVAPLUS_SIZE_PROFILE_BUTTONS,				\ | ||||
| 	.read = kovaplus_sysfs_read_profilex_buttons,			\ | ||||
| 	.private = &profile_numbers[number-1],				\ | ||||
|  | ||||
| @ -225,13 +225,13 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp, | ||||
| 
 | ||||
| #define PROFILE_ATTR(number)						\ | ||||
| static struct bin_attribute bin_attr_profile##number##_settings = {	\ | ||||
| 	.attr = { .name = "profile##number##_settings", .mode = 0440 },	\ | ||||
| 	.attr = { .name = "profile" #number "_settings", .mode = 0440 },	\ | ||||
| 	.size = PYRA_SIZE_PROFILE_SETTINGS,				\ | ||||
| 	.read = pyra_sysfs_read_profilex_settings,			\ | ||||
| 	.private = &profile_numbers[number-1],				\ | ||||
| };									\ | ||||
| static struct bin_attribute bin_attr_profile##number##_buttons = {	\ | ||||
| 	.attr = { .name = "profile##number##_buttons", .mode = 0440 },	\ | ||||
| 	.attr = { .name = "profile" #number "_buttons", .mode = 0440 },	\ | ||||
| 	.size = PYRA_SIZE_PROFILE_BUTTONS,				\ | ||||
| 	.read = pyra_sysfs_read_profilex_buttons,			\ | ||||
| 	.private = &profile_numbers[number-1],				\ | ||||
|  | ||||
| @ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = { | ||||
|  * the rumble motor, this flag shouldn't be set. | ||||
|  */ | ||||
| 
 | ||||
| /* used by wiimod_rumble and wiipro_rumble */ | ||||
| static void wiimod_rumble_worker(struct work_struct *work) | ||||
| { | ||||
| 	struct wiimote_data *wdata = container_of(work, struct wiimote_data, | ||||
| 						  rumble_worker); | ||||
| 
 | ||||
| 	spin_lock_irq(&wdata->state.lock); | ||||
| 	wiiproto_req_rumble(wdata, wdata->state.cache_rumble); | ||||
| 	spin_unlock_irq(&wdata->state.lock); | ||||
| } | ||||
| 
 | ||||
| static int wiimod_rumble_play(struct input_dev *dev, void *data, | ||||
| 			      struct ff_effect *eff) | ||||
| { | ||||
| 	struct wiimote_data *wdata = input_get_drvdata(dev); | ||||
| 	__u8 value; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The wiimote supports only a single rumble motor so if any magnitude | ||||
| @ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data, | ||||
| 	else | ||||
| 		value = 0; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&wdata->state.lock, flags); | ||||
| 	wiiproto_req_rumble(wdata, value); | ||||
| 	spin_unlock_irqrestore(&wdata->state.lock, flags); | ||||
| 	/* Locking state.lock here might deadlock with input_event() calls.
 | ||||
| 	 * schedule_work acts as barrier. Merging multiple changes is fine. */ | ||||
| 	wdata->state.cache_rumble = value; | ||||
| 	schedule_work(&wdata->rumble_worker); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| @ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data, | ||||
| static int wiimod_rumble_probe(const struct wiimod_ops *ops, | ||||
| 			       struct wiimote_data *wdata) | ||||
| { | ||||
| 	INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); | ||||
| 
 | ||||
| 	set_bit(FF_RUMBLE, wdata->input->ffbit); | ||||
| 	if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) | ||||
| 		return -ENOMEM; | ||||
| @ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops, | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	cancel_work_sync(&wdata->rumble_worker); | ||||
| 
 | ||||
| 	spin_lock_irqsave(&wdata->state.lock, flags); | ||||
| 	wiiproto_req_rumble(wdata, 0); | ||||
| 	spin_unlock_irqrestore(&wdata->state.lock, flags); | ||||
| @ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data, | ||||
| { | ||||
| 	struct wiimote_data *wdata = input_get_drvdata(dev); | ||||
| 	__u8 value; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The wiimote supports only a single rumble motor so if any magnitude | ||||
| @ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data, | ||||
| 	else | ||||
| 		value = 0; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&wdata->state.lock, flags); | ||||
| 	wiiproto_req_rumble(wdata, value); | ||||
| 	spin_unlock_irqrestore(&wdata->state.lock, flags); | ||||
| 	/* Locking state.lock here might deadlock with input_event() calls.
 | ||||
| 	 * schedule_work acts as barrier. Merging multiple changes is fine. */ | ||||
| 	wdata->state.cache_rumble = value; | ||||
| 	schedule_work(&wdata->rumble_worker); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| @ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops, | ||||
| { | ||||
| 	int ret, i; | ||||
| 
 | ||||
| 	INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); | ||||
| 
 | ||||
| 	wdata->extension.input = input_allocate_device(); | ||||
| 	if (!wdata->extension.input) | ||||
| 		return -ENOMEM; | ||||
| @ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops, | ||||
| 	if (!wdata->extension.input) | ||||
| 		return; | ||||
| 
 | ||||
| 	input_unregister_device(wdata->extension.input); | ||||
| 	wdata->extension.input = NULL; | ||||
| 	cancel_work_sync(&wdata->rumble_worker); | ||||
| 
 | ||||
| 	spin_lock_irqsave(&wdata->state.lock, flags); | ||||
| 	wiiproto_req_rumble(wdata, 0); | ||||
| 	spin_unlock_irqrestore(&wdata->state.lock, flags); | ||||
| 
 | ||||
| 	input_unregister_device(wdata->extension.input); | ||||
| 	wdata->extension.input = NULL; | ||||
| } | ||||
| 
 | ||||
| static const struct wiimod_ops wiimod_pro = { | ||||
|  | ||||
| @ -133,13 +133,15 @@ struct wiimote_state { | ||||
| 	__u8 *cmd_read_buf; | ||||
| 	__u8 cmd_read_size; | ||||
| 
 | ||||
| 	/* calibration data */ | ||||
| 	/* calibration/cache data */ | ||||
| 	__u16 calib_bboard[4][3]; | ||||
| 	__u8 cache_rumble; | ||||
| }; | ||||
| 
 | ||||
| struct wiimote_data { | ||||
| 	struct hid_device *hdev; | ||||
| 	struct input_dev *input; | ||||
| 	struct work_struct rumble_worker; | ||||
| 	struct led_classdev *leds[4]; | ||||
| 	struct input_dev *accel; | ||||
| 	struct input_dev *ir; | ||||
|  | ||||
| @ -308,18 +308,25 @@ static int hidraw_fasync(int fd, struct file *file, int on) | ||||
| static void drop_ref(struct hidraw *hidraw, int exists_bit) | ||||
| { | ||||
| 	if (exists_bit) { | ||||
| 		hid_hw_close(hidraw->hid); | ||||
| 		hidraw->exist = 0; | ||||
| 		if (hidraw->open) | ||||
| 		if (hidraw->open) { | ||||
| 			hid_hw_close(hidraw->hid); | ||||
| 			wake_up_interruptible(&hidraw->wait); | ||||
| 		} | ||||
| 	} else { | ||||
| 		--hidraw->open; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!hidraw->open && !hidraw->exist) { | ||||
| 		device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); | ||||
| 		hidraw_table[hidraw->minor] = NULL; | ||||
| 		kfree(hidraw); | ||||
| 	if (!hidraw->open) { | ||||
| 		if (!hidraw->exist) { | ||||
| 			device_destroy(hidraw_class, | ||||
| 					MKDEV(hidraw_major, hidraw->minor)); | ||||
| 			hidraw_table[hidraw->minor] = NULL; | ||||
| 			kfree(hidraw); | ||||
| 		} else { | ||||
| 			/* close device for last reader */ | ||||
| 			hid_hw_power(hidraw->hid, PM_HINT_NORMAL); | ||||
| 			hid_hw_close(hidraw->hid); | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -615,7 +615,7 @@ static const struct file_operations uhid_fops = { | ||||
| 
 | ||||
| static struct miscdevice uhid_misc = { | ||||
| 	.fops		= &uhid_fops, | ||||
| 	.minor		= MISC_DYNAMIC_MINOR, | ||||
| 	.minor		= UHID_MINOR, | ||||
| 	.name		= UHID_NAME, | ||||
| }; | ||||
| 
 | ||||
| @ -634,4 +634,5 @@ module_exit(uhid_exit); | ||||
| MODULE_LICENSE("GPL"); | ||||
| MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); | ||||
| MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); | ||||
| MODULE_ALIAS_MISCDEV(UHID_MINOR); | ||||
| MODULE_ALIAS("devname:" UHID_NAME); | ||||
|  | ||||
| @ -230,6 +230,7 @@ static int send_argument(const char *key) | ||||
| 
 | ||||
| static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | ||||
| { | ||||
| 	u8 status, data = 0; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (send_command(cmd) || send_argument(key)) { | ||||
| @ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | ||||
| 		return -EIO; | ||||
| 	} | ||||
| 
 | ||||
| 	/* This has no effect on newer (2012) SMCs */ | ||||
| 	if (send_byte(len, APPLESMC_DATA_PORT)) { | ||||
| 		pr_warn("%.4s: read len fail\n", key); | ||||
| 		return -EIO; | ||||
| @ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | ||||
| 		buffer[i] = inb(APPLESMC_DATA_PORT); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Read the data port until bit0 is cleared */ | ||||
| 	for (i = 0; i < 16; i++) { | ||||
| 		udelay(APPLESMC_MIN_WAIT); | ||||
| 		status = inb(APPLESMC_CMD_PORT); | ||||
| 		if (!(status & 0x01)) | ||||
| 			break; | ||||
| 		data = inb(APPLESMC_DATA_PORT); | ||||
| 	} | ||||
| 	if (i) | ||||
| 		pr_warn("flushed %d bytes, last value is: %d\n", i, data); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -996,6 +996,7 @@ static void request_write(struct cached_dev *dc, struct search *s) | ||||
| 		closure_bio_submit(bio, cl, s->d); | ||||
| 	} else { | ||||
| 		bch_writeback_add(dc); | ||||
| 		s->op.cache_bio = bio; | ||||
| 
 | ||||
| 		if (bio->bi_rw & REQ_FLUSH) { | ||||
| 			/* Also need to send a flush to the backing device */ | ||||
| @ -1008,8 +1009,6 @@ static void request_write(struct cached_dev *dc, struct search *s) | ||||
| 			flush->bi_private = cl; | ||||
| 
 | ||||
| 			closure_bio_submit(flush, cl, s->d); | ||||
| 		} else { | ||||
| 			s->op.cache_bio = bio; | ||||
| 		} | ||||
| 	} | ||||
| out: | ||||
|  | ||||
| @ -168,12 +168,25 @@ static inline int write_disable(struct m25p *flash) | ||||
|  */ | ||||
| static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) | ||||
| { | ||||
| 	int status; | ||||
| 	bool need_wren = false; | ||||
| 
 | ||||
| 	switch (JEDEC_MFR(jedec_id)) { | ||||
| 	case CFI_MFR_MACRONIX: | ||||
| 	case CFI_MFR_ST: /* Micron, actually */ | ||||
| 		/* Some Micron need WREN command; all will accept it */ | ||||
| 		need_wren = true; | ||||
| 	case CFI_MFR_MACRONIX: | ||||
| 	case 0xEF /* winbond */: | ||||
| 		if (need_wren) | ||||
| 			write_enable(flash); | ||||
| 
 | ||||
| 		flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; | ||||
| 		return spi_write(flash->spi, flash->command, 1); | ||||
| 		status = spi_write(flash->spi, flash->command, 1); | ||||
| 
 | ||||
| 		if (need_wren) | ||||
| 			write_disable(flash); | ||||
| 
 | ||||
| 		return status; | ||||
| 	default: | ||||
| 		/* Spansion style */ | ||||
| 		flash->command[0] = OPCODE_BRWR; | ||||
|  | ||||
| @ -2869,10 +2869,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd, | ||||
| 
 | ||||
| 	len = le16_to_cpu(p->ext_param_page_length) * 16; | ||||
| 	ep = kmalloc(len, GFP_KERNEL); | ||||
| 	if (!ep) { | ||||
| 		ret = -ENOMEM; | ||||
| 		goto ext_out; | ||||
| 	} | ||||
| 	if (!ep) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	/* Send our own NAND_CMD_PARAM. */ | ||||
| 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); | ||||
| @ -2920,7 +2918,7 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd, | ||||
| 	} | ||||
| 
 | ||||
| 	pr_info("ONFI extended param page detected.\n"); | ||||
| 	return 0; | ||||
| 	ret = 0; | ||||
| 
 | ||||
| ext_out: | ||||
| 	kfree(ep); | ||||
|  | ||||
| @ -145,9 +145,11 @@ bool __init sclp_has_linemode(void) | ||||
| 
 | ||||
| 	if (sccb->header.response_code != 0x20) | ||||
| 		return 0; | ||||
| 	if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)) | ||||
| 		return 1; | ||||
| 	return 0; | ||||
| 	if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) | ||||
| 		return 0; | ||||
| 	if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) | ||||
| 		return 0; | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| bool __init sclp_has_vt220(void) | ||||
|  | ||||
| @ -810,7 +810,7 @@ static void tty3270_resize_work(struct work_struct *work) | ||||
| 	struct winsize ws; | ||||
| 
 | ||||
| 	screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); | ||||
| 	if (!screen) | ||||
| 	if (IS_ERR(screen)) | ||||
| 		return; | ||||
| 	/* Switch to new output size */ | ||||
| 	spin_lock_bh(&tp->view.lock); | ||||
|  | ||||
| @ -1583,7 +1583,7 @@ static int atmel_spi_probe(struct platform_device *pdev) | ||||
| 	/* Initialize the hardware */ | ||||
| 	ret = clk_prepare_enable(clk); | ||||
| 	if (ret) | ||||
| 		goto out_unmap_regs; | ||||
| 		goto out_free_irq; | ||||
| 	spi_writel(as, CR, SPI_BIT(SWRST)); | ||||
| 	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | ||||
| 	if (as->caps.has_wdrbt) { | ||||
| @ -1614,6 +1614,7 @@ out_free_dma: | ||||
| 	spi_writel(as, CR, SPI_BIT(SWRST)); | ||||
| 	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | ||||
| 	clk_disable_unprepare(clk); | ||||
| out_free_irq: | ||||
| 	free_irq(irq, master); | ||||
| out_unmap_regs: | ||||
| 	iounmap(as->regs); | ||||
|  | ||||
| @ -226,7 +226,6 @@ static int spi_clps711x_probe(struct platform_device *pdev) | ||||
| 			       dev_name(&pdev->dev), hw); | ||||
| 	if (ret) { | ||||
| 		dev_err(&pdev->dev, "Can't request IRQ\n"); | ||||
| 		clk_put(hw->spi_clk); | ||||
| 		goto clk_out; | ||||
| 	} | ||||
| 
 | ||||
| @ -247,7 +246,6 @@ err_out: | ||||
| 			gpio_free(hw->chipselect[i]); | ||||
| 
 | ||||
| 	spi_master_put(master); | ||||
| 	kfree(master); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| @ -263,7 +261,6 @@ static int spi_clps711x_remove(struct platform_device *pdev) | ||||
| 			gpio_free(hw->chipselect[i]); | ||||
| 
 | ||||
| 	spi_unregister_master(master); | ||||
| 	kfree(master); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
|  | ||||
| @ -476,15 +476,9 @@ static int dspi_probe(struct platform_device *pdev) | ||||
| 	master->bus_num = bus_num; | ||||
| 
 | ||||
| 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||||
| 	if (!res) { | ||||
| 		dev_err(&pdev->dev, "can't get platform resource\n"); | ||||
| 		ret = -EINVAL; | ||||
| 		goto out_master_put; | ||||
| 	} | ||||
| 
 | ||||
| 	dspi->base = devm_ioremap_resource(&pdev->dev, res); | ||||
| 	if (!dspi->base) { | ||||
| 		ret = -EINVAL; | ||||
| 	if (IS_ERR(dspi->base)) { | ||||
| 		ret = PTR_ERR(dspi->base); | ||||
| 		goto out_master_put; | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
| @ -522,8 +522,10 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, | ||||
| 	psc_num = master->bus_num; | ||||
| 	snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); | ||||
| 	clk = devm_clk_get(dev, clk_name); | ||||
| 	if (IS_ERR(clk)) | ||||
| 	if (IS_ERR(clk)) { | ||||
| 		ret = PTR_ERR(clk); | ||||
| 		goto free_irq; | ||||
| 	} | ||||
| 	ret = clk_prepare_enable(clk); | ||||
| 	if (ret) | ||||
| 		goto free_irq; | ||||
|  | ||||
| @ -546,8 +546,17 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | ||||
| 	if (pm_runtime_suspended(&drv_data->pdev->dev)) | ||||
| 		return IRQ_NONE; | ||||
| 
 | ||||
| 	sccr1_reg = read_SSCR1(reg); | ||||
| 	/*
 | ||||
| 	 * If the device is not yet in RPM suspended state and we get an | ||||
| 	 * interrupt that is meant for another device, check if status bits | ||||
| 	 * are all set to one. That means that the device is already | ||||
| 	 * powered off. | ||||
| 	 */ | ||||
| 	status = read_SSSR(reg); | ||||
| 	if (status == ~0) | ||||
| 		return IRQ_NONE; | ||||
| 
 | ||||
| 	sccr1_reg = read_SSCR1(reg); | ||||
| 
 | ||||
| 	/* Ignore possible writes if we don't need to write */ | ||||
| 	if (!(sccr1_reg & SSCR1_TIE)) | ||||
|  | ||||
| @ -1428,6 +1428,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | ||||
| 	       S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, | ||||
| 	       sdd->regs + S3C64XX_SPI_INT_EN); | ||||
| 
 | ||||
| 	pm_runtime_enable(&pdev->dev); | ||||
| 
 | ||||
| 	if (spi_register_master(master)) { | ||||
| 		dev_err(&pdev->dev, "cannot register SPI master\n"); | ||||
| 		ret = -EBUSY; | ||||
| @ -1440,8 +1442,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | ||||
| 					mem_res, | ||||
| 					sdd->rx_dma.dmach, sdd->tx_dma.dmach); | ||||
| 
 | ||||
| 	pm_runtime_enable(&pdev->dev); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err3: | ||||
|  | ||||
| @ -296,6 +296,8 @@ static int hspi_probe(struct platform_device *pdev) | ||||
| 		goto error1; | ||||
| 	} | ||||
| 
 | ||||
| 	pm_runtime_enable(&pdev->dev); | ||||
| 
 | ||||
| 	master->num_chipselect	= 1; | ||||
| 	master->bus_num		= pdev->id; | ||||
| 	master->setup		= hspi_setup; | ||||
| @ -309,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev) | ||||
| 		goto error1; | ||||
| 	} | ||||
| 
 | ||||
| 	pm_runtime_enable(&pdev->dev); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
|  error1: | ||||
|  | ||||
| @ -802,6 +802,12 @@ static int hpwdt_init_one(struct pci_dev *dev, | ||||
| 		return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Ignore all auxilary iLO devices with the following PCI ID | ||||
| 	 */ | ||||
| 	if (dev->subsystem_device == 0x1979) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	if (pci_enable_device(dev)) { | ||||
| 		dev_warn(&dev->dev, | ||||
| 			"Not possible to enable PCI Device: 0x%x:0x%x.\n", | ||||
|  | ||||
| @ -65,6 +65,21 @@ | ||||
| #define __visible __attribute__((externally_visible)) | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * GCC 'asm goto' miscompiles certain code sequences: | ||||
|  * | ||||
|  *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
 | ||||
|  * | ||||
|  * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. | ||||
|  * Fixed in GCC 4.8.2 and later versions. | ||||
|  * | ||||
|  * (asm goto is automatically volatile - the naming reflects this.) | ||||
|  */ | ||||
| #if GCC_VERSION <= 40801 | ||||
| # define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0) | ||||
| #else | ||||
| # define asm_volatile_goto(x...)	do { asm goto(x); } while (0) | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP | ||||
| #if GCC_VERSION >= 40400 | ||||
|  | ||||
| @ -45,6 +45,7 @@ | ||||
| #define MAPPER_CTRL_MINOR	236 | ||||
| #define LOOP_CTRL_MINOR		237 | ||||
| #define VHOST_NET_MINOR		238 | ||||
| #define UHID_MINOR		239 | ||||
| #define MISC_DYNAMIC_MINOR	255 | ||||
| 
 | ||||
| struct device; | ||||
|  | ||||
| @ -294,9 +294,31 @@ struct ring_buffer; | ||||
|  */ | ||||
| struct perf_event { | ||||
| #ifdef CONFIG_PERF_EVENTS | ||||
| 	struct list_head		group_entry; | ||||
| 	/*
 | ||||
| 	 * entry onto perf_event_context::event_list; | ||||
| 	 *   modifications require ctx->lock | ||||
| 	 *   RCU safe iterations. | ||||
| 	 */ | ||||
| 	struct list_head		event_entry; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * XXX: group_entry and sibling_list should be mutually exclusive; | ||||
| 	 * either you're a sibling on a group, or you're the group leader. | ||||
| 	 * Rework the code to always use the same list element. | ||||
| 	 * | ||||
| 	 * Locked for modification by both ctx->mutex and ctx->lock; holding | ||||
| 	 * either sufficies for read. | ||||
| 	 */ | ||||
| 	struct list_head		group_entry; | ||||
| 	struct list_head		sibling_list; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We need storage to track the entries in perf_pmu_migrate_context; we | ||||
| 	 * cannot use the event_entry because of RCU and we want to keep the | ||||
| 	 * group in tact which avoids us using the other two entries. | ||||
| 	 */ | ||||
| 	struct list_head		migrate_entry; | ||||
| 
 | ||||
| 	struct hlist_node		hlist_entry; | ||||
| 	int				nr_siblings; | ||||
| 	int				group_flags; | ||||
|  | ||||
| @ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags); | ||||
| extern void get_random_bytes(void *buf, int nbytes); | ||||
| extern void get_random_bytes_arch(void *buf, int nbytes); | ||||
| void generate_random_uuid(unsigned char uuid_out[16]); | ||||
| extern int random_int_secret_init(void); | ||||
| 
 | ||||
| #ifndef MODULE | ||||
| extern const struct file_operations random_fops, urandom_fops; | ||||
|  | ||||
| @ -64,6 +64,20 @@ | ||||
| 
 | ||||
| #include <asm/timex.h> | ||||
| 
 | ||||
| #ifndef random_get_entropy | ||||
| /*
 | ||||
|  * The random_get_entropy() function is used by the /dev/random driver | ||||
|  * in order to extract entropy via the relative unpredictability of | ||||
|  * when an interrupt takes places versus a high speed, fine-grained | ||||
|  * timing source or cycle counter.  Since it will be occurred on every | ||||
|  * single interrupt, it must have a very low cost/overhead. | ||||
|  * | ||||
|  * By default we use get_cycles() for this purpose, but individual | ||||
|  * architectures may override this in their asm/timex.h header file. | ||||
|  */ | ||||
| #define random_get_entropy()	get_cycles() | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * SHIFT_PLL is used as a dampening factor to define how much we | ||||
|  * adjust the frequency correction for a given offset in PLL mode. | ||||
|  | ||||
| @ -76,6 +76,7 @@ | ||||
| #include <linux/elevator.h> | ||||
| #include <linux/sched_clock.h> | ||||
| #include <linux/context_tracking.h> | ||||
| #include <linux/random.h> | ||||
| 
 | ||||
| #include <asm/io.h> | ||||
| #include <asm/bugs.h> | ||||
| @ -780,6 +781,7 @@ static void __init do_basic_setup(void) | ||||
| 	do_ctors(); | ||||
| 	usermodehelper_enable(); | ||||
| 	do_initcalls(); | ||||
| 	random_int_secret_init(); | ||||
| } | ||||
| 
 | ||||
| static void __init do_pre_smp_initcalls(void) | ||||
|  | ||||
| @ -7234,15 +7234,15 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) | ||||
| 		perf_remove_from_context(event); | ||||
| 		unaccount_event_cpu(event, src_cpu); | ||||
| 		put_ctx(src_ctx); | ||||
| 		list_add(&event->event_entry, &events); | ||||
| 		list_add(&event->migrate_entry, &events); | ||||
| 	} | ||||
| 	mutex_unlock(&src_ctx->mutex); | ||||
| 
 | ||||
| 	synchronize_rcu(); | ||||
| 
 | ||||
| 	mutex_lock(&dst_ctx->mutex); | ||||
| 	list_for_each_entry_safe(event, tmp, &events, event_entry) { | ||||
| 		list_del(&event->event_entry); | ||||
| 	list_for_each_entry_safe(event, tmp, &events, migrate_entry) { | ||||
| 		list_del(&event->migrate_entry); | ||||
| 		if (event->state >= PERF_EVENT_STATE_OFF) | ||||
| 			event->state = PERF_EVENT_STATE_INACTIVE; | ||||
| 		account_event_cpu(event, dst_cpu); | ||||
|  | ||||
| @ -592,7 +592,7 @@ static void kobject_release(struct kref *kref) | ||||
| { | ||||
| 	struct kobject *kobj = container_of(kref, struct kobject, kref); | ||||
| #ifdef CONFIG_DEBUG_KOBJECT_RELEASE | ||||
| 	pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n", | ||||
| 	pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n", | ||||
| 		 kobject_name(kobj), kobj, __func__, kobj->parent); | ||||
| 	INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); | ||||
| 	schedule_delayed_work(&kobj->release, HZ); | ||||
|  | ||||
| @ -770,6 +770,7 @@ check: $(OUTPUT)common-cmds.h | ||||
| install-bin: all | ||||
| 	$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' | ||||
| 	$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' | ||||
| 	$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' | ||||
| 	$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' | ||||
| ifndef NO_LIBPERL | ||||
| 	$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' | ||||
|  | ||||
| @ -457,6 +457,7 @@ static int __run_perf_stat(int argc, const char **argv) | ||||
| 			perror("failed to prepare workload"); | ||||
| 			return -1; | ||||
| 		} | ||||
| 		child_pid = evsel_list->workload.pid; | ||||
| 	} | ||||
| 
 | ||||
| 	if (group) | ||||
|  | ||||
| @ -219,7 +219,7 @@ define SOURCE_LIBAUDIT | ||||
| 
 | ||||
| int main(void) | ||||
| { | ||||
| 	printf(\"error message: %s\n\", audit_errno_to_name(0)); | ||||
| 	printf(\"error message: %s\", audit_errno_to_name(0)); | ||||
| 	return audit_open(); | ||||
| } | ||||
| endef | ||||
|  | ||||
| @ -426,7 +426,7 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data) | ||||
|  * @die_mem: a buffer for result DIE | ||||
|  * | ||||
|  * Search a non-inlined function DIE which includes @addr. Stores the | ||||
|  * DIE to @die_mem and returns it if found. Returns NULl if failed. | ||||
|  * DIE to @die_mem and returns it if found. Returns NULL if failed. | ||||
|  */ | ||||
| Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, | ||||
| 				    Dwarf_Die *die_mem) | ||||
| @ -454,15 +454,32 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data) | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * die_find_inlinefunc - Search an inlined function at given address | ||||
|  * @cu_die: a CU DIE which including @addr | ||||
|  * die_find_top_inlinefunc - Search the top inlined function at given address | ||||
|  * @sp_die: a subprogram DIE which including @addr | ||||
|  * @addr: target address | ||||
|  * @die_mem: a buffer for result DIE | ||||
|  * | ||||
|  * Search an inlined function DIE which includes @addr. Stores the | ||||
|  * DIE to @die_mem and returns it if found. Returns NULl if failed. | ||||
|  * DIE to @die_mem and returns it if found. Returns NULL if failed. | ||||
|  * Even if several inlined functions are expanded recursively, this | ||||
|  * doesn't trace it down, and returns the topmost one. | ||||
|  */ | ||||
| Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | ||||
| 				   Dwarf_Die *die_mem) | ||||
| { | ||||
| 	return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * die_find_inlinefunc - Search an inlined function at given address | ||||
|  * @sp_die: a subprogram DIE which including @addr | ||||
|  * @addr: target address | ||||
|  * @die_mem: a buffer for result DIE | ||||
|  * | ||||
|  * Search an inlined function DIE which includes @addr. Stores the | ||||
|  * DIE to @die_mem and returns it if found. Returns NULL if failed. | ||||
|  * If several inlined functions are expanded recursively, this trace | ||||
|  * it and returns deepest one. | ||||
|  * it down and returns deepest one. | ||||
|  */ | ||||
| Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | ||||
| 			       Dwarf_Die *die_mem) | ||||
|  | ||||
| @ -79,7 +79,11 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die, | ||||
| extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, | ||||
| 				    Dwarf_Die *die_mem); | ||||
| 
 | ||||
| /* Search an inlined function including given address */ | ||||
| /* Search the top inlined function including given address */ | ||||
| extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | ||||
| 					  Dwarf_Die *die_mem); | ||||
| 
 | ||||
| /* Search the deepest inlined function including given address */ | ||||
| extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | ||||
| 				      Dwarf_Die *die_mem); | ||||
| 
 | ||||
|  | ||||
| @ -2768,6 +2768,18 @@ int perf_session__read_header(struct perf_session *session) | ||||
| 	if (perf_file_header__read(&f_header, header, fd) < 0) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Sanity check that perf.data was written cleanly; data size is | ||||
| 	 * initialized to 0 and updated only if the on_exit function is run. | ||||
| 	 * If data size is still 0 then the file contains only partial | ||||
| 	 * information.  Just warn user and process it as much as it can. | ||||
| 	 */ | ||||
| 	if (f_header.data.size == 0) { | ||||
| 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" | ||||
| 			   "Was the 'perf record' command properly terminated?\n", | ||||
| 			   session->filename); | ||||
| 	} | ||||
| 
 | ||||
| 	nr_attrs = f_header.attrs.size / f_header.attr_size; | ||||
| 	lseek(fd, f_header.attrs.offset, SEEK_SET); | ||||
| 
 | ||||
|  | ||||
| @ -1327,8 +1327,8 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr, | ||||
| 				struct perf_probe_point *ppt) | ||||
| { | ||||
| 	Dwarf_Die cudie, spdie, indie; | ||||
| 	Dwarf_Addr _addr, baseaddr; | ||||
| 	const char *fname = NULL, *func = NULL, *tmp; | ||||
| 	Dwarf_Addr _addr = 0, baseaddr = 0; | ||||
| 	const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; | ||||
| 	int baseline = 0, lineno = 0, ret = 0; | ||||
| 
 | ||||
| 	/* Adjust address with bias */ | ||||
| @ -1349,27 +1349,36 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr, | ||||
| 	/* Find a corresponding function (name, baseline and baseaddr) */ | ||||
| 	if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) { | ||||
| 		/* Get function entry information */ | ||||
| 		tmp = dwarf_diename(&spdie); | ||||
| 		if (!tmp || | ||||
| 		func = basefunc = dwarf_diename(&spdie); | ||||
| 		if (!func || | ||||
| 		    dwarf_entrypc(&spdie, &baseaddr) != 0 || | ||||
| 		    dwarf_decl_line(&spdie, &baseline) != 0) | ||||
| 		    dwarf_decl_line(&spdie, &baseline) != 0) { | ||||
| 			lineno = 0; | ||||
| 			goto post; | ||||
| 		func = tmp; | ||||
| 		} | ||||
| 
 | ||||
| 		if (addr == (unsigned long)baseaddr) | ||||
| 		if (addr == (unsigned long)baseaddr) { | ||||
| 			/* Function entry - Relative line number is 0 */ | ||||
| 			lineno = baseline; | ||||
| 		else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, | ||||
| 					     &indie)) { | ||||
| 			fname = dwarf_decl_file(&spdie); | ||||
| 			goto post; | ||||
| 		} | ||||
| 
 | ||||
| 		/* Track down the inline functions step by step */ | ||||
| 		while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr, | ||||
| 						&indie)) { | ||||
| 			/* There is an inline function */ | ||||
| 			if (dwarf_entrypc(&indie, &_addr) == 0 && | ||||
| 			    _addr == addr) | ||||
| 			    _addr == addr) { | ||||
| 				/*
 | ||||
| 				 * addr is at an inline function entry. | ||||
| 				 * In this case, lineno should be the call-site | ||||
| 				 * line number. | ||||
| 				 * line number. (overwrite lineinfo) | ||||
| 				 */ | ||||
| 				lineno = die_get_call_lineno(&indie); | ||||
| 			else { | ||||
| 				fname = die_get_call_file(&indie); | ||||
| 				break; | ||||
| 			} else { | ||||
| 				/*
 | ||||
| 				 * addr is in an inline function body. | ||||
| 				 * Since lineno points one of the lines | ||||
| @ -1377,19 +1386,27 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr, | ||||
| 				 * be the entry line of the inline function. | ||||
| 				 */ | ||||
| 				tmp = dwarf_diename(&indie); | ||||
| 				if (tmp && | ||||
| 				    dwarf_decl_line(&spdie, &baseline) == 0) | ||||
| 					func = tmp; | ||||
| 				if (!tmp || | ||||
| 				    dwarf_decl_line(&indie, &baseline) != 0) | ||||
| 					break; | ||||
| 				func = tmp; | ||||
| 				spdie = indie; | ||||
| 			} | ||||
| 		} | ||||
| 		/* Verify the lineno and baseline are in a same file */ | ||||
| 		tmp = dwarf_decl_file(&spdie); | ||||
| 		if (!tmp || strcmp(tmp, fname) != 0) | ||||
| 			lineno = 0; | ||||
| 	} | ||||
| 
 | ||||
| post: | ||||
| 	/* Make a relative line number or an offset */ | ||||
| 	if (lineno) | ||||
| 		ppt->line = lineno - baseline; | ||||
| 	else if (func) | ||||
| 	else if (basefunc) { | ||||
| 		ppt->offset = addr - (unsigned long)baseaddr; | ||||
| 		func = basefunc; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Duplicate strings */ | ||||
| 	if (func) { | ||||
|  | ||||
| @ -256,6 +256,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool) | ||||
| 		tool->sample = process_event_sample_stub; | ||||
| 	if (tool->mmap == NULL) | ||||
| 		tool->mmap = process_event_stub; | ||||
| 	if (tool->mmap2 == NULL) | ||||
| 		tool->mmap2 = process_event_stub; | ||||
| 	if (tool->comm == NULL) | ||||
| 		tool->comm = process_event_stub; | ||||
| 	if (tool->fork == NULL) | ||||
| @ -1310,7 +1312,7 @@ int __perf_session__process_events(struct perf_session *session, | ||||
| 	file_offset = page_offset; | ||||
| 	head = data_offset - page_offset; | ||||
| 
 | ||||
| 	if (data_offset + data_size < file_size) | ||||
| 	if (data_size && (data_offset + data_size < file_size)) | ||||
| 		file_size = data_offset + data_size; | ||||
| 
 | ||||
| 	progress_next = file_size / 16; | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user