Linux 3.6

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.18 (GNU/Linux)
 
 iQEcBAABAgAGBQJQaNo+AAoJEHm+PkMAQRiGYhAH/2MsFPKG+nUaaz/fXny/HIkF
 bB3mGTe53h6QNOhDsLOt7sJfv5MmA63nR4zu1jbTTfoD+/DjYLvI1GWn5TMXGLCr
 rmenbMOULygHVct9L2YDpI3e2ubNHeDzIOERYg2amVoWr8aBd/TCVidEF8zxSs8W
 rOeiZ7/J4IBgn89CBYTkbwMdiWVWW45vcRK1c5+3iYuHgAdky89BKuthhw4Hcaxl
 yJVmBxdZAarZV9VMroHxRwdgjD2iUNXhRtHJO8X4YRi8MTlW0KGpb8RR9Dy38rQP
 R9UdFjpsuAwvmRGQDWB+W70043smZ6rPvCznCozJrbNcNFbdTlaGd70sBhxl5xo=
 =ahyz
 -----END PGP SIGNATURE-----

Merge tag 'v3.6' into fixes

Linux 3.6

* tag 'v3.6': (91 commits)
  Linux 3.6
  vfs: dcache: fix deadlock in tree traversal
  mtdchar: fix offset overflow detection
  thp: avoid VM_BUG_ON page_count(page) false positives in __collapse_huge_page_copy
  iommu/amd: Fix wrong assumption in iommu-group specific code
  netdev: octeon: fix return value check in octeon_mgmt_init_phy()
  ALSA: snd-usb: fix next_packet_size calls for pause case
  inetpeer: fix token initialization
  qlcnic: Fix scheduling while atomic bug
  bnx2: Clean up remaining iounmap
  trivial select_parent documentation fix
  net: phy: smsc: Implement PHY config_init for LAN87xx
  smsc75xx: fix resume after device reset
  um: Preinclude include/linux/kern_levels.h
  um: Fix IPC on um
  netdev: pasemi: fix return value check in pasemi_mac_phy_init()
  team: fix return value check
  l2tp: fix return value check
  USB: Fix race condition when removing host controllers
  USB: ohci-at91: fix null pointer in ohci_hcd_at91_overcurrent_irq
  ...
This commit is contained in:
Mauro Carvalho Chehab 2012-10-25 07:15:34 -02:00
commit 285019fd95
87 changed files with 801 additions and 443 deletions

View File

@ -133,7 +133,7 @@ character devices for this group:
$ lspci -n -s 0000:06:0d.0 $ lspci -n -s 0000:06:0d.0
06:0d.0 0401: 1102:0002 (rev 08) 06:0d.0 0401: 1102:0002 (rev 08)
# echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind # echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind
# echo 1102 0002 > /sys/bus/pci/drivers/vfio/new_id # echo 1102 0002 > /sys/bus/pci/drivers/vfio-pci/new_id
Now we need to look at what other devices are in the group to free Now we need to look at what other devices are in the group to free
it for use by VFIO: it for use by VFIO:

View File

@ -3552,11 +3552,12 @@ K: \b(ABS|SYN)_MT_
INTEL C600 SERIES SAS CONTROLLER DRIVER INTEL C600 SERIES SAS CONTROLLER DRIVER
M: Intel SCU Linux support <intel-linux-scu@intel.com> M: Intel SCU Linux support <intel-linux-scu@intel.com>
M: Lukasz Dorau <lukasz.dorau@intel.com>
M: Maciej Patelczyk <maciej.patelczyk@intel.com>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
M: Ed Nadolski <edmund.nadolski@intel.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git T: git git://git.code.sf.net/p/intel-sas/isci
S: Maintained S: Supported
F: drivers/scsi/isci/ F: drivers/scsi/isci/
F: firmware/isci/ F: firmware/isci/
@ -5544,6 +5545,8 @@ F: Documentation/devicetree/bindings/pwm/
F: include/linux/pwm.h F: include/linux/pwm.h
F: include/linux/of_pwm.h F: include/linux/of_pwm.h
F: drivers/pwm/ F: drivers/pwm/
F: drivers/video/backlight/pwm_bl.c
F: include/linux/pwm_backlight.h
PXA2xx/PXA3xx SUPPORT PXA2xx/PXA3xx SUPPORT
M: Eric Miao <eric.y.miao@gmail.com> M: Eric Miao <eric.y.miao@gmail.com>

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc7 EXTRAVERSION =
NAME = Terrified Chipmunk NAME = Terrified Chipmunk
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -261,7 +261,7 @@ static void __init apx4devkit_init(void)
enable_clk_enet_out(); enable_clk_enet_out();
if (IS_BUILTIN(CONFIG_PHYLIB)) if (IS_BUILTIN(CONFIG_PHYLIB))
phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK, phy_register_fixup_for_uid(PHY_ID_KSZ8051, MICREL_PHY_ID_MASK,
apx4devkit_phy_fixup); apx4devkit_phy_fixup);
mxsfb_pdata.mode_list = apx4devkit_video_modes; mxsfb_pdata.mode_list = apx4devkit_video_modes;

View File

@ -204,6 +204,13 @@ void __init orion5x_wdt_init(void)
void __init orion5x_init_early(void) void __init orion5x_init_early(void)
{ {
orion_time_set_base(TIMER_VIRT_BASE); orion_time_set_base(TIMER_VIRT_BASE);
/*
* Some Orion5x devices allocate their coherent buffers from atomic
* context. Increase size of atomic coherent pool to make sure such
* the allocations won't fail.
*/
init_dma_coherent_pool_size(SZ_1M);
} }
int orion5x_tclk; int orion5x_tclk;

View File

@ -346,6 +346,8 @@ static int __init atomic_pool_init(void)
(unsigned)pool->size / 1024); (unsigned)pool->size / 1024);
return 0; return 0;
} }
kfree(pages);
no_pages: no_pages:
kfree(bitmap); kfree(bitmap);
no_bitmap: no_bitmap:

View File

@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
generic-y += atomic.h generic-y += atomic.h
generic-y += auxvec.h generic-y += auxvec.h
generic-y += barrier.h
generic-y += bitsperlong.h generic-y += bitsperlong.h
generic-y += bugs.h generic-y += bugs.h
generic-y += cputime.h generic-y += cputime.h

View File

@ -1,27 +0,0 @@
/*
* Port on Texas Instruments TMS320C6x architecture
*
* Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_C6X_BARRIER_H
#define _ASM_C6X_BARRIER_H
#define nop() asm("NOP\n");
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif /* _ASM_C6X_BARRIER_H */

View File

@ -25,21 +25,23 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) #define GXIO_TRIO_OP_DEALLOC_ASID IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1401)
#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402) #define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e) #define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f)
#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417) #define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418)
#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419)
#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a)
#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c) #define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d) #define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141f)
#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) #define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1420)
#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1421)
#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1423)
#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1424)
#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1425)
#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) #define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)

View File

@ -20,14 +20,6 @@ struct mm_struct;
struct thread_struct { struct thread_struct {
struct task_struct *saved_task; struct task_struct *saved_task;
/*
* This flag is set to 1 before calling do_fork (and analyzed in
* copy_thread) to mark that we are begin called from userspace (fork /
* vfork / clone), and reset to 0 after. It is left to 0 when called
* from kernelspace (i.e. kernel_thread() or fork_idle(),
* as of 2.6.11).
*/
int forking;
struct pt_regs regs; struct pt_regs regs;
int singlestep_syscall; int singlestep_syscall;
void *fault_addr; void *fault_addr;
@ -58,7 +50,6 @@ struct thread_struct {
#define INIT_THREAD \ #define INIT_THREAD \
{ \ { \
.forking = 0, \
.regs = EMPTY_REGS, \ .regs = EMPTY_REGS, \
.fault_addr = NULL, \ .fault_addr = NULL, \
.prev_sched = NULL, \ .prev_sched = NULL, \

View File

@ -7,16 +7,6 @@ DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK);
DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT); DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT);
DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
DEFINE_STR(UM_KERN_ALERT, KERN_ALERT);
DEFINE_STR(UM_KERN_CRIT, KERN_CRIT);
DEFINE_STR(UM_KERN_ERR, KERN_ERR);
DEFINE_STR(UM_KERN_WARNING, KERN_WARNING);
DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE);
DEFINE_STR(UM_KERN_INFO, KERN_INFO);
DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG);
DEFINE_STR(UM_KERN_CONT, KERN_CONT);
DEFINE(UM_ELF_CLASS, ELF_CLASS); DEFINE(UM_ELF_CLASS, ELF_CLASS);
DEFINE(UM_ELFCLASS32, ELFCLASS32); DEFINE(UM_ELFCLASS32, ELFCLASS32);
DEFINE(UM_ELFCLASS64, ELFCLASS64); DEFINE(UM_ELFCLASS64, ELFCLASS64);

View File

@ -26,6 +26,17 @@
extern void panic(const char *fmt, ...) extern void panic(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2))); __attribute__ ((format (printf, 1, 2)));
/* Requires preincluding include/linux/kern_levels.h */
#define UM_KERN_EMERG KERN_EMERG
#define UM_KERN_ALERT KERN_ALERT
#define UM_KERN_CRIT KERN_CRIT
#define UM_KERN_ERR KERN_ERR
#define UM_KERN_WARNING KERN_WARNING
#define UM_KERN_NOTICE KERN_NOTICE
#define UM_KERN_INFO KERN_INFO
#define UM_KERN_DEBUG KERN_DEBUG
#define UM_KERN_CONT KERN_CONT
#ifdef UML_CONFIG_PRINTK #ifdef UML_CONFIG_PRINTK
extern int printk(const char *fmt, ...) extern int printk(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2))); __attribute__ ((format (printf, 1, 2)));

View File

@ -39,34 +39,21 @@ void flush_thread(void)
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{ {
get_safe_registers(regs->regs.gp, regs->regs.fp);
PT_REGS_IP(regs) = eip; PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp; PT_REGS_SP(regs) = esp;
current->ptrace &= ~PT_DTRACE;
#ifdef SUBARCH_EXECVE1
SUBARCH_EXECVE1(regs->regs);
#endif
} }
EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(start_thread);
static long execve1(const char *file,
const char __user *const __user *argv,
const char __user *const __user *env)
{
long error;
error = do_execve(file, argv, env, &current->thread.regs);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;
#ifdef SUBARCH_EXECVE1
SUBARCH_EXECVE1(&current->thread.regs.regs);
#endif
task_unlock(current);
}
return error;
}
long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
{ {
long err; long err;
err = execve1(file, argv, env); err = do_execve(file, argv, env, &current->thread.regs);
if (!err) if (!err)
UML_LONGJMP(current->thread.exec_buf, 1); UML_LONGJMP(current->thread.exec_buf, 1);
return err; return err;
@ -81,7 +68,7 @@ long sys_execve(const char __user *file, const char __user *const __user *argv,
filename = getname(file); filename = getname(file);
error = PTR_ERR(filename); error = PTR_ERR(filename);
if (IS_ERR(filename)) goto out; if (IS_ERR(filename)) goto out;
error = execve1(filename, argv, env); error = do_execve(filename, argv, env, &current->thread.regs);
putname(filename); putname(filename);
out: out:
return error; return error;

View File

@ -181,11 +181,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
struct pt_regs *regs) struct pt_regs *regs)
{ {
void (*handler)(void); void (*handler)(void);
int kthread = current->flags & PF_KTHREAD;
int ret = 0; int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD; p->thread = (struct thread_struct) INIT_THREAD;
if (current->thread.forking) { if (!kthread) {
memcpy(&p->thread.regs.regs, &regs->regs, memcpy(&p->thread.regs.regs, &regs->regs,
sizeof(p->thread.regs.regs)); sizeof(p->thread.regs.regs));
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
@ -195,8 +196,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
handler = fork_handler; handler = fork_handler;
arch_copy_thread(&current->thread.arch, &p->thread.arch); arch_copy_thread(&current->thread.arch, &p->thread.arch);
} } else {
else {
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
p->thread.request.u.thread = current->thread.request.u.thread; p->thread.request.u.thread = current->thread.request.u.thread;
handler = new_thread_handler; handler = new_thread_handler;
@ -204,7 +204,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
new_thread(task_stack_page(p), &p->thread.switch_buf, handler); new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
if (current->thread.forking) { if (!kthread) {
clear_flushed_tls(p); clear_flushed_tls(p);
/* /*

View File

@ -22,9 +22,13 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
struct k_sigaction *ka, siginfo_t *info) struct k_sigaction *ka, siginfo_t *info)
{ {
sigset_t *oldset = sigmask_to_save(); sigset_t *oldset = sigmask_to_save();
int singlestep = 0;
unsigned long sp; unsigned long sp;
int err; int err;
if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
singlestep = 1;
/* Did we come from a system call? */ /* Did we come from a system call? */
if (PT_REGS_SYSCALL_NR(regs) >= 0) { if (PT_REGS_SYSCALL_NR(regs) >= 0) {
/* If so, check system call restarting.. */ /* If so, check system call restarting.. */
@ -61,7 +65,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
if (err) if (err)
force_sigsegv(signr, current); force_sigsegv(signr, current);
else else
signal_delivered(signr, info, ka, regs, 0); signal_delivered(signr, info, ka, regs, singlestep);
} }
static int kern_do_signal(struct pt_regs *regs) static int kern_do_signal(struct pt_regs *regs)

View File

@ -17,25 +17,25 @@
long sys_fork(void) long sys_fork(void)
{ {
long ret; return do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
current->thread.forking = 1;
ret = do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
&current->thread.regs, 0, NULL, NULL); &current->thread.regs, 0, NULL, NULL);
current->thread.forking = 0;
return ret;
} }
long sys_vfork(void) long sys_vfork(void)
{ {
long ret; return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
current->thread.forking = 1;
ret = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
UPT_SP(&current->thread.regs.regs), UPT_SP(&current->thread.regs.regs),
&current->thread.regs, 0, NULL, NULL); &current->thread.regs, 0, NULL, NULL);
current->thread.forking = 0; }
return ret;
long sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tid, void __user *child_tid)
{
if (!newsp)
newsp = UPT_SP(&current->thread.regs.regs);
return do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
child_tid);
} }
long old_mmap(unsigned long addr, unsigned long len, long old_mmap(unsigned long addr, unsigned long len,

View File

@ -8,7 +8,7 @@ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS))
USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
$(USER_OBJS:.o=.%): \ $(USER_OBJS:.o=.%): \
c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include user.h $(CFLAGS_$(basetarget).o) c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include $(srctree)/include/linux/kern_levels.h -include user.h $(CFLAGS_$(basetarget).o)
# These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of # These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of
# using it directly. # using it directly.

View File

@ -21,6 +21,7 @@ config 64BIT
config X86_32 config X86_32
def_bool !64BIT def_bool !64BIT
select HAVE_AOUT select HAVE_AOUT
select ARCH_WANT_IPC_PARSE_VERSION
config X86_64 config X86_64
def_bool 64BIT def_bool 64BIT

View File

@ -7,9 +7,6 @@
#define DEFINE(sym, val) \ #define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val)) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define STR(x) #x
#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
#define BLANK() asm volatile("\n->" : : ) #define BLANK() asm volatile("\n->" : : )
#define OFFSET(sym, str, mem) \ #define OFFSET(sym, str, mem) \

View File

@ -1,3 +1,5 @@
extern long sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tid, void __user *child_tid);
#ifdef __i386__ #ifdef __i386__
#include "syscalls_32.h" #include "syscalls_32.h"
#else #else

View File

@ -416,9 +416,6 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
PT_REGS_AX(regs) = (unsigned long) sig; PT_REGS_AX(regs) = (unsigned long) sig;
PT_REGS_DX(regs) = (unsigned long) 0; PT_REGS_DX(regs) = (unsigned long) 0;
PT_REGS_CX(regs) = (unsigned long) 0; PT_REGS_CX(regs) = (unsigned long) 0;
if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
ptrace_notify(SIGTRAP);
return 0; return 0;
} }
@ -466,9 +463,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
PT_REGS_AX(regs) = (unsigned long) sig; PT_REGS_AX(regs) = (unsigned long) sig;
PT_REGS_DX(regs) = (unsigned long) &frame->info; PT_REGS_DX(regs) = (unsigned long) &frame->info;
PT_REGS_CX(regs) = (unsigned long) &frame->uc; PT_REGS_CX(regs) = (unsigned long) &frame->uc;
if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
ptrace_notify(SIGTRAP);
return 0; return 0;
} }

View File

@ -28,7 +28,7 @@
#define ptregs_execve sys_execve #define ptregs_execve sys_execve
#define ptregs_iopl sys_iopl #define ptregs_iopl sys_iopl
#define ptregs_vm86old sys_vm86old #define ptregs_vm86old sys_vm86old
#define ptregs_clone sys_clone #define ptregs_clone i386_clone
#define ptregs_vm86 sys_vm86 #define ptregs_vm86 sys_vm86
#define ptregs_sigaltstack sys_sigaltstack #define ptregs_sigaltstack sys_sigaltstack
#define ptregs_vfork sys_vfork #define ptregs_vfork sys_vfork

View File

@ -3,37 +3,24 @@
* Licensed under the GPL * Licensed under the GPL
*/ */
#include "linux/sched.h" #include <linux/syscalls.h>
#include "linux/shm.h" #include <sysdep/syscalls.h>
#include "linux/ipc.h"
#include "linux/syscalls.h"
#include "asm/mman.h"
#include "asm/uaccess.h"
#include "asm/unistd.h"
/* /*
* The prototype on i386 is: * The prototype on i386 is:
* *
* int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr) * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls
* *
* and the "newtls" arg. on i386 is read by copy_thread directly from the * and the "newtls" arg. on i386 is read by copy_thread directly from the
* register saved on the stack. * register saved on the stack.
*/ */
long sys_clone(unsigned long clone_flags, unsigned long newsp, long i386_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tid, void *newtls, int __user *child_tid) int __user *parent_tid, void *newtls, int __user *child_tid)
{ {
long ret; return sys_clone(clone_flags, newsp, parent_tid, child_tid);
if (!newsp)
newsp = UPT_SP(&current->thread.regs.regs);
current->thread.forking = 1;
ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
child_tid);
current->thread.forking = 0;
return ret;
} }
long sys_sigaction(int sig, const struct old_sigaction __user *act, long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact) struct old_sigaction __user *oact)
{ {

View File

@ -5,12 +5,9 @@
* Licensed under the GPL * Licensed under the GPL
*/ */
#include "linux/linkage.h" #include <linux/sched.h>
#include "linux/personality.h" #include <asm/prctl.h> /* XXX This should get the constants from libc */
#include "linux/utsname.h" #include <os.h>
#include "asm/prctl.h" /* XXX This should get the constants from libc */
#include "asm/uaccess.h"
#include "os.h"
long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
{ {
@ -79,20 +76,6 @@ long sys_arch_prctl(int code, unsigned long addr)
return arch_prctl(current, code, (unsigned long __user *) addr); return arch_prctl(current, code, (unsigned long __user *) addr);
} }
long sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tid, void __user *child_tid)
{
long ret;
if (!newsp)
newsp = UPT_SP(&current->thread.regs.regs);
current->thread.forking = 1;
ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
child_tid);
current->thread.forking = 0;
return ret;
}
void arch_switch_to(struct task_struct *to) void arch_switch_to(struct task_struct *to)
{ {
if ((to->thread.arch.fs == 0) || (to->mm == NULL)) if ((to->thread.arch.fs == 0) || (to->mm == NULL))

View File

@ -17,6 +17,7 @@
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/numa.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
@ -544,4 +545,7 @@ void __init xen_arch_setup(void)
disable_cpufreq(); disable_cpufreq();
WARN_ON(set_pm_idle_to_default()); WARN_ON(set_pm_idle_to_default());
fiddle_vdso(); fiddle_vdso();
#ifdef CONFIG_NUMA
numa_off = 1;
#endif
} }

View File

@ -79,6 +79,7 @@ struct nvme_dev {
char serial[20]; char serial[20];
char model[40]; char model[40];
char firmware_rev[8]; char firmware_rev[8];
u32 max_hw_sectors;
}; };
/* /*
@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
} }
static int nvme_get_features(struct nvme_dev *dev, unsigned fid, static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
unsigned dword11, dma_addr_t dma_addr) unsigned nsid, dma_addr_t dma_addr)
{ {
struct nvme_command c; struct nvme_command c;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features; c.features.opcode = nvme_admin_get_features;
c.features.nsid = cpu_to_le32(nsid);
c.features.prp1 = cpu_to_le64(dma_addr); c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
return nvme_submit_admin_cmd(dev, &c, NULL); return nvme_submit_admin_cmd(dev, &c, NULL);
} }
@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
return nvme_submit_admin_cmd(dev, &c, result); return nvme_submit_admin_cmd(dev, &c, result);
} }
/**
* nvme_cancel_ios - Cancel outstanding I/Os
* @queue: The queue to cancel I/Os on
* @timeout: True to only cancel I/Os which have timed out
*/
static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
{
int depth = nvmeq->q_depth - 1;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
unsigned long now = jiffies;
int cmdid;
for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
void *ctx;
nvme_completion_fn fn;
static struct nvme_completion cqe = {
.status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
};
if (timeout && !time_after(now, info[cmdid].timeout))
continue;
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
}
}
static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
}
static void nvme_free_queue(struct nvme_dev *dev, int qid) static void nvme_free_queue(struct nvme_dev *dev, int qid)
{ {
struct nvme_queue *nvmeq = dev->queues[qid]; struct nvme_queue *nvmeq = dev->queues[qid];
int vector = dev->entry[nvmeq->cq_vector].vector; int vector = dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL); irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq); free_irq(vector, nvmeq);
@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
adapter_delete_cq(dev, qid); adapter_delete_cq(dev, qid);
} }
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), nvme_free_queue_mem(nvmeq);
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
} }
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector) int depth, int vector)
{ {
struct device *dmadev = &dev->pci_dev->dev; struct device *dmadev = &dev->pci_dev->dev;
unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
sizeof(struct nvme_cmd_info));
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
if (!nvmeq) if (!nvmeq)
return NULL; return NULL;
@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
{ {
int result; int result = 0;
u32 aqa; u32 aqa;
u64 cap; u64 cap;
unsigned long timeout; unsigned long timeout;
@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
dev->db_stride = NVME_CAP_STRIDE(cap); dev->db_stride = NVME_CAP_STRIDE(cap);
while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
msleep(100); msleep(100);
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -EINTR; result = -EINTR;
if (time_after(jiffies, timeout)) { if (time_after(jiffies, timeout)) {
dev_err(&dev->pci_dev->dev, dev_err(&dev->pci_dev->dev,
"Device not ready; aborting initialisation\n"); "Device not ready; aborting initialisation\n");
return -ENODEV; result = -ENODEV;
} }
} }
if (result) {
nvme_free_queue_mem(nvmeq);
return result;
}
result = queue_request_irq(dev, nvmeq, "nvme admin"); result = queue_request_irq(dev, nvmeq, "nvme admin");
dev->queues[0] = nvmeq; dev->queues[0] = nvmeq;
return result; return result;
@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
offset = offset_in_page(addr); offset = offset_in_page(addr);
count = DIV_ROUND_UP(offset + length, PAGE_SIZE); count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
if (!pages)
return ERR_PTR(-ENOMEM);
err = get_user_pages_fast(addr, count, 1, pages); err = get_user_pages_fast(addr, count, 1, pages);
if (err < count) { if (err < count) {
@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return status; return status;
} }
static int nvme_user_admin_cmd(struct nvme_ns *ns, static int nvme_user_admin_cmd(struct nvme_dev *dev,
struct nvme_admin_cmd __user *ucmd) struct nvme_admin_cmd __user *ucmd)
{ {
struct nvme_dev *dev = ns->dev;
struct nvme_admin_cmd cmd; struct nvme_admin_cmd cmd;
struct nvme_command c; struct nvme_command c;
int status, length; int status, length;
struct nvme_iod *iod; struct nvme_iod *uninitialized_var(iod);
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
case NVME_IOCTL_ID: case NVME_IOCTL_ID:
return ns->ns_id; return ns->ns_id;
case NVME_IOCTL_ADMIN_CMD: case NVME_IOCTL_ADMIN_CMD:
return nvme_user_admin_cmd(ns, (void __user *)arg); return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO: case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg); return nvme_submit_io(ns, (void __user *)arg);
default: default:
@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = {
.compat_ioctl = nvme_ioctl, .compat_ioctl = nvme_ioctl,
}; };
static void nvme_timeout_ios(struct nvme_queue *nvmeq)
{
int depth = nvmeq->q_depth - 1;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
unsigned long now = jiffies;
int cmdid;
for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
void *ctx;
nvme_completion_fn fn;
static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
if (!time_after(now, info[cmdid].timeout))
continue;
dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
}
}
static void nvme_resubmit_bios(struct nvme_queue *nvmeq) static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{ {
while (bio_list_peek(&nvmeq->sq_cong)) { while (bio_list_peek(&nvmeq->sq_cong)) {
@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data)
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
if (nvme_process_cq(nvmeq)) if (nvme_process_cq(nvmeq))
printk("process_cq did something\n"); printk("process_cq did something\n");
nvme_timeout_ios(nvmeq); nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq); nvme_resubmit_bios(nvmeq);
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
} }
@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
ns->disk = disk; ns->disk = disk;
lbaf = id->flbas & 0xf; lbaf = id->flbas & 0xf;
ns->lba_shift = id->lbaf[lbaf].ds; ns->lba_shift = id->lbaf[lbaf].ds;
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
if (dev->max_hw_sectors)
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
disk->major = nvme_major; disk->major = nvme_major;
disk->minors = NVME_MINORS; disk->minors = NVME_MINORS;
@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{ {
int result, cpu, i, nr_io_queues, db_bar_size; int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
nr_io_queues = num_online_cpus(); nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues); result = set_queue_count(dev, nr_io_queues);
@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
cpu = cpumask_next(cpu, cpu_online_mask); cpu = cpumask_next(cpu, cpu_online_mask);
} }
q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
NVME_Q_DEPTH);
for (i = 0; i < nr_io_queues; i++) { for (i = 0; i < nr_io_queues; i++) {
dev->queues[i + 1] = nvme_create_queue(dev, i + 1, dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
NVME_Q_DEPTH, i);
if (IS_ERR(dev->queues[i + 1])) if (IS_ERR(dev->queues[i + 1]))
return PTR_ERR(dev->queues[i + 1]); return PTR_ERR(dev->queues[i + 1]);
dev->queue_count++; dev->queue_count++;
@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts) {
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
}
id_ns = mem; id_ns = mem;
for (i = 1; i <= nn; i++) { for (i = 1; i <= nn; i++) {
@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
list_del(&dev->node); list_del(&dev->node);
spin_unlock(&dev_list_lock); spin_unlock(&dev_list_lock);
/* TODO: wait all I/O finished or cancel them */
list_for_each_entry_safe(ns, next, &dev->namespaces, list) { list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list); list_del(&ns->list);
del_gendisk(ns->disk); del_gendisk(ns->disk);
@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
dma_pool_destroy(dev->prp_small_pool); dma_pool_destroy(dev->prp_small_pool);
} }
/* XXX: Use an ida or something to let remove / add work correctly */ static DEFINE_IDA(nvme_instance_ida);
static void nvme_set_instance(struct nvme_dev *dev)
static int nvme_set_instance(struct nvme_dev *dev)
{ {
static int instance; int instance, error;
dev->instance = instance++;
do {
if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
return -ENODEV;
spin_lock(&dev_list_lock);
error = ida_get_new(&nvme_instance_ida, &instance);
spin_unlock(&dev_list_lock);
} while (error == -EAGAIN);
if (error)
return -ENODEV;
dev->instance = instance;
return 0;
} }
static void nvme_release_instance(struct nvme_dev *dev) static void nvme_release_instance(struct nvme_dev *dev)
{ {
spin_lock(&dev_list_lock);
ida_remove(&nvme_instance_ida, dev->instance);
spin_unlock(&dev_list_lock);
} }
static int __devinit nvme_probe(struct pci_dev *pdev, static int __devinit nvme_probe(struct pci_dev *pdev,
@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
nvme_set_instance(dev); result = nvme_set_instance(dev);
if (result)
goto disable;
dev->entry[0].vector = pdev->irq; dev->entry[0].vector = pdev->irq;
result = nvme_setup_prp_pools(dev); result = nvme_setup_prp_pools(dev);
@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = {
static int __init nvme_init(void) static int __init nvme_init(void)
{ {
int result = -EBUSY; int result;
nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
if (IS_ERR(nvme_thread)) if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread); return PTR_ERR(nvme_thread);
nvme_major = register_blkdev(nvme_major, "nvme"); result = register_blkdev(nvme_major, "nvme");
if (nvme_major <= 0) if (result < 0)
goto kill_kthread; goto kill_kthread;
else if (result > 0)
nvme_major = result;
result = pci_register_driver(&nvme_driver); result = pci_register_driver(&nvme_driver);
if (result) if (result)

View File

@ -246,13 +246,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
{ {
struct rbd_device *rbd_dev = bdev->bd_disk->private_data; struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
rbd_get_dev(rbd_dev);
set_device_ro(bdev, rbd_dev->read_only);
if ((mode & FMODE_WRITE) && rbd_dev->read_only) if ((mode & FMODE_WRITE) && rbd_dev->read_only)
return -EROFS; return -EROFS;
rbd_get_dev(rbd_dev);
set_device_ro(bdev, rbd_dev->read_only);
return 0; return 0;
} }

View File

@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
{ {
struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
__set_gpio_level_p012(group, pin, value);
__set_gpio_dir_p012(group, pin, 0); __set_gpio_dir_p012(group, pin, 0);
return 0; return 0;
@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
{ {
struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
__set_gpio_level_p3(group, pin, value);
__set_gpio_dir_p3(group, pin, 0); __set_gpio_dir_p3(group, pin, 0);
return 0; return 0;
@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
int value) int value)
{ {
struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
__set_gpo_level_p3(group, pin, value);
return 0; return 0;
} }

View File

@ -179,7 +179,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
return 0; return 0;
} else } else
if (init->class == 0x906e) { if (init->class == 0x906e) {
NV_ERROR(dev, "906e not supported yet\n"); NV_DEBUG(dev, "906e not supported yet\n");
return -EINVAL; return -EINVAL;
} }

View File

@ -124,6 +124,7 @@ nvc0_fb_init(struct drm_device *dev)
priv = dev_priv->engine.fb.priv; priv = dev_priv->engine.fb.priv;
nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
return 0; return 0;
} }

View File

@ -373,7 +373,8 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
static void static void
nvc0_fifo_isr(struct drm_device *dev) nvc0_fifo_isr(struct drm_device *dev)
{ {
u32 stat = nv_rd32(dev, 0x002100); u32 mask = nv_rd32(dev, 0x002140);
u32 stat = nv_rd32(dev, 0x002100) & mask;
if (stat & 0x00000100) { if (stat & 0x00000100) {
NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");

View File

@ -345,7 +345,8 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
static void static void
nve0_fifo_isr(struct drm_device *dev) nve0_fifo_isr(struct drm_device *dev)
{ {
u32 stat = nv_rd32(dev, 0x002100); u32 mask = nv_rd32(dev, 0x002140);
u32 stat = nv_rd32(dev, 0x002100) & mask;
if (stat & 0x00000100) { if (stat & 0x00000100) {
NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");

View File

@ -69,6 +69,13 @@ static int udl_get_modes(struct drm_connector *connector)
static int udl_mode_valid(struct drm_connector *connector, static int udl_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct udl_device *udl = connector->dev->dev_private;
if (!udl->sku_pixel_limit)
return 0;
if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
return MODE_VIRTUAL_Y;
return 0; return 0;
} }

View File

@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
} }
event = kzalloc(sizeof(event->event), GFP_KERNEL); event = kzalloc(sizeof(*event), GFP_KERNEL);
if (unlikely(event == NULL)) { if (unlikely(event == NULL)) {
DRM_ERROR("Failed to allocate an event.\n"); DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM; ret = -ENOMEM;

View File

@ -266,7 +266,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
static int iommu_init_device(struct device *dev) static int iommu_init_device(struct device *dev)
{ {
struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev); struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
struct iommu_group *group; struct iommu_group *group;
u16 alias; u16 alias;
@ -293,7 +293,9 @@ static int iommu_init_device(struct device *dev)
dev_data->alias_data = alias_data; dev_data->alias_data = alias_data;
dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
} else }
if (dma_pdev == NULL)
dma_pdev = pci_dev_get(pdev); dma_pdev = pci_dev_get(pdev);
/* Account for quirked devices */ /* Account for quirked devices */

View File

@ -1555,6 +1555,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
struct multipath *m = ti->private; struct multipath *m = ti->private;
struct pgpath *pgpath;
struct block_device *bdev; struct block_device *bdev;
fmode_t mode; fmode_t mode;
unsigned long flags; unsigned long flags;
@ -1570,12 +1571,14 @@ again:
if (!m->current_pgpath) if (!m->current_pgpath)
__choose_pgpath(m, 0); __choose_pgpath(m, 0);
if (m->current_pgpath) { pgpath = m->current_pgpath;
bdev = m->current_pgpath->path.dev->bdev;
mode = m->current_pgpath->path.dev->mode; if (pgpath) {
bdev = pgpath->path.dev->bdev;
mode = pgpath->path.dev->mode;
} }
if (m->queue_io) if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
r = -EAGAIN; r = -EAGAIN;
else if (!bdev) else if (!bdev)
r = -EIO; r = -EIO;

View File

@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k]; return &t->targets[(KEYS_PER_NODE * n) + k];
} }
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
unsigned *num_devices = data;
(*num_devices)++;
return 0;
}
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
* Returns false if the result is unknown because a target doesn't
* support iterate_devices.
*/
bool dm_table_has_no_data_devices(struct dm_table *table)
{
struct dm_target *uninitialized_var(ti);
unsigned i = 0, num_devices = 0;
while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++);
if (!ti->type->iterate_devices)
return false;
ti->type->iterate_devices(ti, count_device, &num_devices);
if (num_devices)
return false;
}
return true;
}
/* /*
* Establish the new table's queue_limits and validate them. * Establish the new table's queue_limits and validate them.
*/ */
@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
return q && blk_queue_nonrot(q); return q && blk_queue_nonrot(q);
} }
static bool dm_table_is_nonrot(struct dm_table *t) static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !blk_queue_add_random(q);
}
static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{ {
struct dm_target *ti; struct dm_target *ti;
unsigned i = 0; unsigned i = 0;
/* Ensure that all underlying device are non-rotational. */
while (i < dm_table_get_num_targets(t)) { while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++); ti = dm_table_get_target(t, i++);
if (!ti->type->iterate_devices || if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_is_nonrot, NULL)) !ti->type->iterate_devices(ti, func, NULL))
return 0; return 0;
} }
@ -1396,13 +1439,23 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_discard_zeroes_data(t)) if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0; q->limits.discard_zeroes_data = 0;
if (dm_table_is_nonrot(t)) /* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
dm_table_set_integrity(t); dm_table_set_integrity(t);
/*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
* have it set.
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
/* /*
* QUEUE_FLAG_STACKABLE must be set after all queue settings are * QUEUE_FLAG_STACKABLE must be set after all queue settings are
* visible to other CPUs because, once the flag is set, incoming bios * visible to other CPUs because, once the flag is set, incoming bios

View File

@ -509,9 +509,9 @@ enum pool_mode {
struct pool_features { struct pool_features {
enum pool_mode mode; enum pool_mode mode;
unsigned zero_new_blocks:1; bool zero_new_blocks:1;
unsigned discard_enabled:1; bool discard_enabled:1;
unsigned discard_passdown:1; bool discard_passdown:1;
}; };
struct thin_c; struct thin_c;
@ -580,7 +580,8 @@ struct pool_c {
struct dm_target_callbacks callbacks; struct dm_target_callbacks callbacks;
dm_block_t low_water_blocks; dm_block_t low_water_blocks;
struct pool_features pf; struct pool_features requested_pf; /* Features requested during table load */
struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
}; };
/* /*
@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool)
/*---------------------------------------------------------------- /*----------------------------------------------------------------
* Binding of control targets to a pool object * Binding of control targets to a pool object
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
static bool data_dev_supports_discard(struct pool_c *pt)
{
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
return q && blk_queue_discard(q);
}
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not.
*/
static void disable_passdown_if_not_supported(struct pool_c *pt)
{
struct pool *pool = pt->pool;
struct block_device *data_bdev = pt->data_dev->bdev;
struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
const char *reason = NULL;
char buf[BDEVNAME_SIZE];
if (!pt->adjusted_pf.discard_passdown)
return;
if (!data_dev_supports_discard(pt))
reason = "discard unsupported";
else if (data_limits->max_discard_sectors < pool->sectors_per_block)
reason = "max discard sectors smaller than a block";
else if (data_limits->discard_granularity > block_size)
reason = "discard granularity larger than a block";
else if (block_size & (data_limits->discard_granularity - 1))
reason = "discard granularity not a factor of block size";
if (reason) {
DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
pt->adjusted_pf.discard_passdown = false;
}
}
static int bind_control_target(struct pool *pool, struct dm_target *ti) static int bind_control_target(struct pool *pool, struct dm_target *ti)
{ {
struct pool_c *pt = ti->private; struct pool_c *pt = ti->private;
@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
* We want to make sure that degraded pools are never upgraded. * We want to make sure that degraded pools are never upgraded.
*/ */
enum pool_mode old_mode = pool->pf.mode; enum pool_mode old_mode = pool->pf.mode;
enum pool_mode new_mode = pt->pf.mode; enum pool_mode new_mode = pt->adjusted_pf.mode;
if (old_mode > new_mode) if (old_mode > new_mode)
new_mode = old_mode; new_mode = old_mode;
pool->ti = ti; pool->ti = ti;
pool->low_water_blocks = pt->low_water_blocks; pool->low_water_blocks = pt->low_water_blocks;
pool->pf = pt->pf; pool->pf = pt->adjusted_pf;
set_pool_mode(pool, new_mode);
/* set_pool_mode(pool, new_mode);
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
/* FIXME: pull this out into a sep fn. */
if (pt->pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
char buf[BDEVNAME_SIZE];
DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
bdevname(pt->data_dev->bdev, buf));
pool->pf.discard_passdown = 0;
}
}
return 0; return 0;
} }
@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
static void pool_features_init(struct pool_features *pf) static void pool_features_init(struct pool_features *pf)
{ {
pf->mode = PM_WRITE; pf->mode = PM_WRITE;
pf->zero_new_blocks = 1; pf->zero_new_blocks = true;
pf->discard_enabled = 1; pf->discard_enabled = true;
pf->discard_passdown = 1; pf->discard_passdown = true;
} }
static void __pool_destroy(struct pool *pool) static void __pool_destroy(struct pool *pool)
@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
argc--; argc--;
if (!strcasecmp(arg_name, "skip_block_zeroing")) if (!strcasecmp(arg_name, "skip_block_zeroing"))
pf->zero_new_blocks = 0; pf->zero_new_blocks = false;
else if (!strcasecmp(arg_name, "ignore_discard")) else if (!strcasecmp(arg_name, "ignore_discard"))
pf->discard_enabled = 0; pf->discard_enabled = false;
else if (!strcasecmp(arg_name, "no_discard_passdown")) else if (!strcasecmp(arg_name, "no_discard_passdown"))
pf->discard_passdown = 0; pf->discard_passdown = false;
else if (!strcasecmp(arg_name, "read_only")) else if (!strcasecmp(arg_name, "read_only"))
pf->mode = PM_READ_ONLY; pf->mode = PM_READ_ONLY;
@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->metadata_dev = metadata_dev; pt->metadata_dev = metadata_dev;
pt->data_dev = data_dev; pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks; pt->low_water_blocks = low_water_blocks;
pt->pf = pf; pt->adjusted_pf = pt->requested_pf = pf;
ti->num_flush_requests = 1; ti->num_flush_requests = 1;
/* /*
* Only need to enable discards if the pool should pass * Only need to enable discards if the pool should pass
* them down to the data device. The thin device's discard * them down to the data device. The thin device's discard
@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
*/ */
if (pf.discard_enabled && pf.discard_passdown) { if (pf.discard_enabled && pf.discard_passdown) {
ti->num_discard_requests = 1; ti->num_discard_requests = 1;
/* /*
* Setting 'discards_supported' circumvents the normal * Setting 'discards_supported' circumvents the normal
* stacking of discard limits (this keeps the pool and * stacking of discard limits (this keeps the pool and
* thin devices' discard limits consistent). * thin devices' discard limits consistent).
*/ */
ti->discards_supported = true; ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true;
} }
ti->private = pt; ti->private = pt;
@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
format_dev_t(buf2, pt->data_dev->bdev->bd_dev), format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
(unsigned long)pool->sectors_per_block, (unsigned long)pool->sectors_per_block,
(unsigned long long)pt->low_water_blocks); (unsigned long long)pt->low_water_blocks);
emit_flags(&pt->pf, result, sz, maxlen); emit_flags(&pt->requested_pf, result, sz, maxlen);
break; break;
} }
@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
} }
static void set_discard_limits(struct pool *pool, struct queue_limits *limits) static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
{ {
/* struct pool *pool = pt->pool;
* FIXME: these limits may be incompatible with the pool's data device struct queue_limits *data_limits;
*/
limits->max_discard_sectors = pool->sectors_per_block; limits->max_discard_sectors = pool->sectors_per_block;
/* /*
* This is just a hint, and not enforced. We have to cope with * discard_granularity is just a hint, and not enforced.
* bios that cover a block partially. A discard that spans a block
* boundary is not sent to this target.
*/ */
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; if (pt->adjusted_pf.discard_passdown) {
limits->discard_zeroes_data = pool->pf.zero_new_blocks; data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
limits->discard_granularity = data_limits->discard_granularity;
} else
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
} }
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, 0); blk_limits_io_min(limits, 0);
blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
if (pool->pf.discard_enabled)
set_discard_limits(pool, limits); /*
* pt->adjusted_pf is a staging area for the actual features to use.
* They get transferred to the live pool in bind_control_target()
* called from pool_preresume().
*/
if (!pt->adjusted_pf.discard_enabled)
return;
disable_passdown_if_not_supported(pt);
set_discard_limits(pt, limits);
} }
static struct target_type pool_target = { static struct target_type pool_target = {
.name = "thin-pool", .name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE, DM_TARGET_IMMUTABLE,
.version = {1, 3, 0}, .version = {1, 4, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = pool_ctr, .ctr = pool_ctr,
.dtr = pool_dtr, .dtr = pool_dtr,
@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti,
return 0; return 0;
} }
/*
* A thin device always inherits its queue limits from its pool.
*/
static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ {
struct thin_c *tc = ti->private; struct thin_c *tc = ti->private;
struct pool *pool = tc->pool;
blk_limits_io_min(limits, 0); *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
set_discard_limits(pool, limits);
} }
static struct target_type thin_target = { static struct target_type thin_target = {
.name = "thin", .name = "thin",
.version = {1, 3, 0}, .version = {1, 4, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = thin_ctr, .ctr = thin_ctr,
.dtr = thin_dtr, .dtr = thin_dtr,

View File

@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
v->hash_dev_block_bits = ffs(num) - 1; v->hash_dev_block_bits = ffs(num) - 1;
if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) != (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
(sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) { >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid data blocks"; ti->error = "Invalid data blocks";
r = -EINVAL; r = -EINVAL;
goto bad; goto bad;
@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
} }
if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) != (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
(sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) { >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid hash start"; ti->error = "Invalid hash start";
r = -EINVAL; r = -EINVAL;
goto bad; goto bad;

View File

@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
{ {
int r = error; int r = error;
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; dm_request_endio_fn rq_end_io = NULL;
if (mapped && rq_end_io) if (tio->ti) {
r = rq_end_io(tio->ti, clone, error, &tio->info); rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
if (r <= 0) if (r <= 0)
/* The target wants to complete the I/O */ /* The target wants to complete the I/O */
@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
int r, requeued = 0; int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = clone->end_io_data;
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
* because the device may be closed during the request completion
* when all bios are completed.
* See the comment in rq_completed() too.
*/
dm_get(md);
tio->ti = ti; tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info); r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) { switch (r) {
@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
return requeued; return requeued;
} }
static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
{
struct request *clone;
blk_start_request(orig);
clone = orig->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
* because the device may be closed during the request completion
* when all bios are completed.
* See the comment in rq_completed() too.
*/
dm_get(md);
return clone;
}
/* /*
* q->request_fn for request-based dm. * q->request_fn for request-based dm.
* Called with the queue lock held. * Called with the queue lock held.
@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq); pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos); ti = dm_table_find_target(map, pos);
BUG_ON(!dm_target_is_valid(ti)); if (!dm_target_is_valid(ti)) {
/*
* Must perform setup, that dm_done() requires,
* before calling dm_kill_unmapped_request
*/
DMERR_LIMIT("request attempted access beyond the end of device");
clone = dm_start_request(md, rq);
dm_kill_unmapped_request(clone, -EIO);
continue;
}
if (ti->type->busy && ti->type->busy(ti)) if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out; goto delay_and_out;
blk_start_request(rq); clone = dm_start_request(md, rq);
clone = rq->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
if (map_request(ti, clone, md)) if (map_request(ti, clone, md))
@ -1684,8 +1706,6 @@ delay_and_out:
blk_delay_queue(q, HZ / 10); blk_delay_queue(q, HZ / 10);
out: out:
dm_table_put(map); dm_table_put(map);
return;
} }
int dm_underlying_device_busy(struct request_queue *q) int dm_underlying_device_busy(struct request_queue *q)
@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/ */
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{ {
struct dm_table *map = ERR_PTR(-EINVAL); struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
struct queue_limits limits; struct queue_limits limits;
int r; int r;
@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
if (!dm_suspended_md(md)) if (!dm_suspended_md(md))
goto out; goto out;
/*
* If the new table has no data devices, retain the existing limits.
* This helps multipath with queue_if_no_path if all paths disappear,
* then new I/O is queued based on these limits, and then some paths
* reappear.
*/
if (dm_table_has_no_data_devices(table)) {
live_map = dm_get_live_table(md);
if (live_map)
limits = md->queue->limits;
dm_table_put(live_map);
}
r = dm_calculate_queue_limits(table, &limits); r = dm_calculate_queue_limits(table, &limits);
if (r) { if (r) {
map = ERR_PTR(r); map = ERR_PTR(r);

View File

@ -54,6 +54,7 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context); void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
bool dm_table_has_no_data_devices(struct dm_table *table);
int dm_calculate_queue_limits(struct dm_table *table, int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits); struct queue_limits *limits);
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,

View File

@ -1512,14 +1512,16 @@ static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
do { do {
int n = conf->copies; int n = conf->copies;
int cnt = 0; int cnt = 0;
int this = first;
while (n--) { while (n--) {
if (conf->mirrors[first].rdev && if (conf->mirrors[this].rdev &&
first != ignore) this != ignore)
cnt++; cnt++;
first = (first+1) % geo->raid_disks; this = (this+1) % geo->raid_disks;
} }
if (cnt == 0) if (cnt == 0)
return 0; return 0;
first = (first + geo->near_copies) % geo->raid_disks;
} while (first != 0); } while (first != 0);
return 1; return 1;
} }

View File

@ -1591,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
#ifdef CONFIG_MULTICORE_RAID456 #ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&nsh->ops.wait_for_ops); init_waitqueue_head(&nsh->ops.wait_for_ops);
#endif #endif
spin_lock_init(&nsh->stripe_lock);
list_add(&nsh->lru, &newstripes); list_add(&nsh->lru, &newstripes);
} }

View File

@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
} }
#endif #endif
static inline unsigned long get_vm_size(struct vm_area_struct *vma)
{
return vma->vm_end - vma->vm_start;
}
static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
{
return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
}
/*
* Set a new vm offset.
*
* Verify that the incoming offset really works as a page offset,
* and that the offset and size fit in a resource_size_t.
*/
static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
{
pgoff_t pgoff = off >> PAGE_SHIFT;
if (off != (resource_size_t) pgoff << PAGE_SHIFT)
return -EINVAL;
if (off + get_vm_size(vma) - 1 < off)
return -EINVAL;
vma->vm_pgoff = pgoff;
return 0;
}
/* /*
* set up a mapping for shared memory segments * set up a mapping for shared memory segments
*/ */
@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
struct mtd_file_info *mfi = file->private_data; struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd; struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv; struct map_info *map = mtd->priv;
unsigned long start; resource_size_t start, off;
unsigned long off; unsigned long len, vma_len;
u32 len;
if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
off = vma->vm_pgoff << PAGE_SHIFT; off = get_vm_offset(vma);
start = map->phys; start = map->phys;
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
start &= PAGE_MASK; start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len) vma_len = get_vm_size(vma);
/* Overflow in off+len? */
if (vma_len + off < off)
return -EINVAL;
/* Does it fit in the mapping? */
if (vma_len + off > len)
return -EINVAL; return -EINVAL;
off += start; off += start;
vma->vm_pgoff = off >> PAGE_SHIFT; /* Did that overflow? */
if (off < start)
return -EINVAL;
if (set_vm_offset(vma, off) < 0)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_RESERVED; vma->vm_flags |= VM_IO | VM_RESERVED;
#ifdef pgprot_noncached #ifdef pgprot_noncached

View File

@ -8564,7 +8564,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
error: error:
iounmap(bp->regview); pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);

View File

@ -722,10 +722,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
octeon_mgmt_adjust_link, 0, octeon_mgmt_adjust_link, 0,
PHY_INTERFACE_MODE_MII); PHY_INTERFACE_MODE_MII);
if (IS_ERR(p->phydev)) { if (!p->phydev)
p->phydev = NULL;
return -1; return -1;
}
phy_start_aneg(p->phydev); phy_start_aneg(p->phydev);

View File

@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
PHY_INTERFACE_MODE_SGMII); PHY_INTERFACE_MODE_SGMII);
if (IS_ERR(phydev)) { if (!phydev) {
printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
return PTR_ERR(phydev); return -ENODEV;
} }
mac->phydev = phydev; mac->phydev = phydev;

View File

@ -15,7 +15,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
do { do {
/* give atleast 1ms for firmware to respond */ /* give atleast 1ms for firmware to respond */
msleep(1); mdelay(1);
if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
return QLCNIC_CDRP_RSP_TIMEOUT; return QLCNIC_CDRP_RSP_TIMEOUT;
@ -601,7 +601,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_destroy_tx_ctx(adapter); qlcnic_fw_cmd_destroy_tx_ctx(adapter);
/* Allow dma queues to drain after context reset */ /* Allow dma queues to drain after context reset */
msleep(20); mdelay(20);
} }
} }

View File

@ -229,3 +229,5 @@ static void __exit bcm87xx_exit(void)
ARRAY_SIZE(bcm87xx_driver)); ARRAY_SIZE(bcm87xx_driver));
} }
module_exit(bcm87xx_exit); module_exit(bcm87xx_exit);
MODULE_LICENSE("GPL");

View File

@ -21,6 +21,12 @@
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/micrel_phy.h> #include <linux/micrel_phy.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
#define KSZPHY_OMSO_B_CAST_OFF (1 << 9)
#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1)
#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0)
/* general Interrupt control/status reg in vendor specific block. */ /* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B #define MII_KSZPHY_INTCS 0x1B
#define KSZPHY_INTCS_JABBER (1 << 15) #define KSZPHY_INTCS_JABBER (1 << 15)
@ -101,6 +107,13 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0; return 0;
} }
static int ksz8021_config_init(struct phy_device *phydev)
{
const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
phy_write(phydev, MII_KSZPHY_OMSO, val);
return 0;
}
static int ks8051_config_init(struct phy_device *phydev) static int ks8051_config_init(struct phy_device *phydev)
{ {
int regval; int regval;
@ -128,9 +141,22 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = ks8737_config_intr, .config_intr = ks8737_config_intr,
.driver = { .owner = THIS_MODULE,}, .driver = { .owner = THIS_MODULE,},
}, { }, {
.phy_id = PHY_ID_KS8041, .phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = ksz8021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = 0x00fffff0, .phy_id_mask = 0x00fffff0,
.name = "Micrel KS8041", .name = "Micrel KSZ8041",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@ -141,9 +167,9 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr, .config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,}, .driver = { .owner = THIS_MODULE,},
}, { }, {
.phy_id = PHY_ID_KS8051, .phy_id = PHY_ID_KSZ8051,
.phy_id_mask = 0x00fffff0, .phy_id_mask = 0x00fffff0,
.name = "Micrel KS8051", .name = "Micrel KSZ8051",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@ -154,8 +180,8 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr, .config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,}, .driver = { .owner = THIS_MODULE,},
}, { }, {
.phy_id = PHY_ID_KS8001, .phy_id = PHY_ID_KSZ8001,
.name = "Micrel KS8001 or KS8721", .name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00ffffff, .phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@ -201,10 +227,11 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = { static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe }, { PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KS8001, 0x00ffffff }, { PHY_ID_KSZ8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 }, { PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KS8041, 0x00fffff0 }, { PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KS8051, 0x00fffff0 }, { PHY_ID_KSZ8041, 0x00fffff0 },
{ PHY_ID_KSZ8051, 0x00fffff0 },
{ } { }
}; };

View File

@ -56,6 +56,32 @@ static int smsc_phy_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt (phydev); return smsc_phy_ack_interrupt (phydev);
} }
static int lan87xx_config_init(struct phy_device *phydev)
{
/*
* Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
* LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
* to a bug on the chip.
*
* When the system is powered on with the network cable being
* disconnected all the way until after ifconfig ethX up is
* issued for the LAN port with this PHY, connecting the cable
* afterwards does not cause LINK change detection, while the
* expected behavior is the Link UP being detected.
*/
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;
rc &= ~MII_LAN83C185_EDPWRDOWN;
rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
if (rc < 0)
return rc;
return smsc_phy_ack_interrupt(phydev);
}
static int lan911x_config_init(struct phy_device *phydev) static int lan911x_config_init(struct phy_device *phydev)
{ {
return smsc_phy_ack_interrupt(phydev); return smsc_phy_ack_interrupt(phydev);
@ -162,7 +188,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */ /* basic functions */
.config_aneg = genphy_config_aneg, .config_aneg = genphy_config_aneg,
.read_status = genphy_read_status, .read_status = genphy_read_status,
.config_init = smsc_phy_config_init, .config_init = lan87xx_config_init,
/* IRQ related */ /* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt, .ack_interrupt = smsc_phy_ack_interrupt,

View File

@ -570,7 +570,7 @@ static int pppoe_release(struct socket *sock)
po = pppox_sk(sk); po = pppox_sk(sk);
if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
dev_put(po->pppoe_dev); dev_put(po->pppoe_dev);
po->pppoe_dev = NULL; po->pppoe_dev = NULL;
} }

View File

@ -848,7 +848,7 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
} }
#endif #endif
static void __team_port_change_check(struct team_port *port, bool linkup); static void __team_port_change_port_added(struct team_port *port, bool linkup);
static int team_port_add(struct team *team, struct net_device *port_dev) static int team_port_add(struct team *team, struct net_device *port_dev)
{ {
@ -948,7 +948,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
team_port_enable(team, port); team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list); list_add_tail_rcu(&port->list, &team->port_list);
__team_compute_features(team); __team_compute_features(team);
__team_port_change_check(port, !!netif_carrier_ok(port_dev)); __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
__team_options_change_check(team); __team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname); netdev_info(dev, "Port device %s added\n", portname);
@ -983,6 +983,8 @@ err_set_mtu:
return err; return err;
} }
static void __team_port_change_port_removed(struct team_port *port);
static int team_port_del(struct team *team, struct net_device *port_dev) static int team_port_del(struct team *team, struct net_device *port_dev)
{ {
struct net_device *dev = team->dev; struct net_device *dev = team->dev;
@ -999,8 +1001,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
__team_option_inst_mark_removed_port(team, port); __team_option_inst_mark_removed_port(team, port);
__team_options_change_check(team); __team_options_change_check(team);
__team_option_inst_del_port(team, port); __team_option_inst_del_port(team, port);
port->removed = true; __team_port_change_port_removed(port);
__team_port_change_check(port, false);
team_port_disable(team, port); team_port_disable(team, port);
list_del_rcu(&port->list); list_del_rcu(&port->list);
netdev_rx_handler_unregister(port_dev); netdev_rx_handler_unregister(port_dev);
@ -1652,8 +1653,8 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
&team_nl_family, 0, TEAM_CMD_NOOP); &team_nl_family, 0, TEAM_CMD_NOOP);
if (IS_ERR(hdr)) { if (!hdr) {
err = PTR_ERR(hdr); err = -EMSGSIZE;
goto err_msg_put; goto err_msg_put;
} }
@ -1847,8 +1848,8 @@ start_again:
hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET); TEAM_CMD_OPTIONS_GET);
if (IS_ERR(hdr)) if (!hdr)
return PTR_ERR(hdr); return -EMSGSIZE;
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure; goto nla_put_failure;
@ -2067,8 +2068,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
TEAM_CMD_PORT_LIST_GET); TEAM_CMD_PORT_LIST_GET);
if (IS_ERR(hdr)) if (!hdr)
return PTR_ERR(hdr); return -EMSGSIZE;
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure; goto nla_put_failure;
@ -2251,13 +2252,11 @@ static void __team_options_change_check(struct team *team)
} }
/* rtnl lock is held */ /* rtnl lock is held */
static void __team_port_change_check(struct team_port *port, bool linkup)
static void __team_port_change_send(struct team_port *port, bool linkup)
{ {
int err; int err;
if (!port->removed && port->state.linkup == linkup)
return;
port->changed = true; port->changed = true;
port->state.linkup = linkup; port->state.linkup = linkup;
team_refresh_port_linkup(port); team_refresh_port_linkup(port);
@ -2282,6 +2281,23 @@ send_event:
} }
static void __team_port_change_check(struct team_port *port, bool linkup)
{
if (port->state.linkup != linkup)
__team_port_change_send(port, linkup);
}
static void __team_port_change_port_added(struct team_port *port, bool linkup)
{
__team_port_change_send(port, linkup);
}
static void __team_port_change_port_removed(struct team_port *port)
{
port->removed = true;
__team_port_change_send(port, false);
}
static void team_port_change_check(struct team_port *port, bool linkup) static void team_port_change_check(struct team_port *port, bool linkup)
{ {
struct team *team = port->team; struct team *team = port->team;

View File

@ -1253,6 +1253,7 @@ static struct usb_driver smsc75xx_driver = {
.probe = usbnet_probe, .probe = usbnet_probe,
.suspend = usbnet_suspend, .suspend = usbnet_suspend,
.resume = usbnet_resume, .resume = usbnet_resume,
.reset_resume = usbnet_resume,
.disconnect = usbnet_disconnect, .disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1, .disable_hub_initiated_lpm = 1,
}; };

View File

@ -1442,6 +1442,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return err; return err;
err_free_irq: err_free_irq:
trans_pcie->irq_requested = false;
free_irq(trans_pcie->irq, trans); free_irq(trans_pcie->irq, trans);
error: error:
iwl_free_isr_ict(trans); iwl_free_isr_ict(trans);

View File

@ -208,6 +208,8 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
break; break;
case PINMUX_TYPE_GPIO: case PINMUX_TYPE_GPIO:
case PINMUX_TYPE_INPUT:
case PINMUX_TYPE_OUTPUT:
break; break;
default: default:
pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type); pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);

View File

@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
/* print devices for all busses */ /* print devices for all busses */
list_for_each_entry(bus, &usb_bus_list, bus_list) { list_for_each_entry(bus, &usb_bus_list, bus_list) {
/* recurse through all children of the root hub */ /* recurse through all children of the root hub */
if (!bus->root_hub) if (!bus_to_hcd(bus)->rh_registered)
continue; continue;
usb_lock_device(bus->root_hub); usb_lock_device(bus->root_hub);
ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,

View File

@ -1011,10 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
if (retval) { if (retval) {
dev_err (parent_dev, "can't register root hub for %s, %d\n", dev_err (parent_dev, "can't register root hub for %s, %d\n",
dev_name(&usb_dev->dev), retval); dev_name(&usb_dev->dev), retval);
} } else {
mutex_unlock(&usb_bus_list_lock);
if (retval == 0) {
spin_lock_irq (&hcd_root_hub_lock); spin_lock_irq (&hcd_root_hub_lock);
hcd->rh_registered = 1; hcd->rh_registered = 1;
spin_unlock_irq (&hcd_root_hub_lock); spin_unlock_irq (&hcd_root_hub_lock);
@ -1023,6 +1020,7 @@ static int register_root_hub(struct usb_hcd *hcd)
if (HCD_DEAD(hcd)) if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */ usb_hc_died (hcd); /* This time clean up */
} }
mutex_unlock(&usb_bus_list_lock);
return retval; return retval;
} }

View File

@ -467,7 +467,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
/* From the GPIO notifying the over-current situation, find /* From the GPIO notifying the over-current situation, find
* out the corresponding port */ * out the corresponding port */
at91_for_each_port(port) { at91_for_each_port(port) {
if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
gpio = pdata->overcurrent_pin[port]; gpio = pdata->overcurrent_pin[port];
break; break;
} }

View File

@ -76,9 +76,24 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
schedule_work(&virqfd->inject); schedule_work(&virqfd->inject);
} }
if (flags & POLLHUP) if (flags & POLLHUP) {
/* The eventfd is closing, detach from VFIO */ unsigned long flags;
virqfd_deactivate(virqfd); spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
/*
* The eventfd is closing, if the virqfd has not yet been
* queued for release, as determined by testing whether the
* vdev pointer to it is still valid, queue it now. As
* with kvm irqfds, we know we won't race against the virqfd
* going away because we hold wqh->lock to get here.
*/
if (*(virqfd->pvirqfd) == virqfd) {
*(virqfd->pvirqfd) = NULL;
virqfd_deactivate(virqfd);
}
spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
}
return 0; return 0;
} }
@ -93,7 +108,6 @@ static void virqfd_ptable_queue_proc(struct file *file,
static void virqfd_shutdown(struct work_struct *work) static void virqfd_shutdown(struct work_struct *work)
{ {
struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
struct virqfd **pvirqfd = virqfd->pvirqfd;
u64 cnt; u64 cnt;
eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt); eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
@ -101,7 +115,6 @@ static void virqfd_shutdown(struct work_struct *work)
eventfd_ctx_put(virqfd->eventfd); eventfd_ctx_put(virqfd->eventfd);
kfree(virqfd); kfree(virqfd);
*pvirqfd = NULL;
} }
static void virqfd_inject(struct work_struct *work) static void virqfd_inject(struct work_struct *work)
@ -122,15 +135,11 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
int ret = 0; int ret = 0;
unsigned int events; unsigned int events;
if (*pvirqfd)
return -EBUSY;
virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
if (!virqfd) if (!virqfd)
return -ENOMEM; return -ENOMEM;
virqfd->pvirqfd = pvirqfd; virqfd->pvirqfd = pvirqfd;
*pvirqfd = virqfd;
virqfd->vdev = vdev; virqfd->vdev = vdev;
virqfd->handler = handler; virqfd->handler = handler;
virqfd->thread = thread; virqfd->thread = thread;
@ -153,6 +162,23 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
virqfd->eventfd = ctx; virqfd->eventfd = ctx;
/*
* virqfds can be released by closing the eventfd or directly
* through ioctl. These are both done through a workqueue, so
* we update the pointer to the virqfd under lock to avoid
* pushing multiple jobs to release the same virqfd.
*/
spin_lock_irq(&vdev->irqlock);
if (*pvirqfd) {
spin_unlock_irq(&vdev->irqlock);
ret = -EBUSY;
goto fail;
}
*pvirqfd = virqfd;
spin_unlock_irq(&vdev->irqlock);
/* /*
* Install our own custom wake-up handling so we are notified via * Install our own custom wake-up handling so we are notified via
* a callback whenever someone signals the underlying eventfd. * a callback whenever someone signals the underlying eventfd.
@ -187,19 +213,29 @@ fail:
fput(file); fput(file);
kfree(virqfd); kfree(virqfd);
*pvirqfd = NULL;
return ret; return ret;
} }
static void virqfd_disable(struct virqfd *virqfd) static void virqfd_disable(struct vfio_pci_device *vdev,
struct virqfd **pvirqfd)
{ {
if (!virqfd) unsigned long flags;
return;
virqfd_deactivate(virqfd); spin_lock_irqsave(&vdev->irqlock, flags);
/* Block until we know all outstanding shutdown jobs have completed. */ if (*pvirqfd) {
virqfd_deactivate(*pvirqfd);
*pvirqfd = NULL;
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
/*
* Block until we know all outstanding shutdown jobs have completed.
* Even if we don't queue the job, flush the wq to be sure it's
* been released.
*/
flush_workqueue(vfio_irqfd_cleanup_wq); flush_workqueue(vfio_irqfd_cleanup_wq);
} }
@ -392,8 +428,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
static void vfio_intx_disable(struct vfio_pci_device *vdev) static void vfio_intx_disable(struct vfio_pci_device *vdev)
{ {
vfio_intx_set_signal(vdev, -1); vfio_intx_set_signal(vdev, -1);
virqfd_disable(vdev->ctx[0].unmask); virqfd_disable(vdev, &vdev->ctx[0].unmask);
virqfd_disable(vdev->ctx[0].mask); virqfd_disable(vdev, &vdev->ctx[0].mask);
vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0; vdev->num_ctx = 0;
kfree(vdev->ctx); kfree(vdev->ctx);
@ -539,8 +575,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
for (i = 0; i < vdev->num_ctx; i++) { for (i = 0; i < vdev->num_ctx; i++) {
virqfd_disable(vdev->ctx[i].unmask); virqfd_disable(vdev, &vdev->ctx[i].unmask);
virqfd_disable(vdev->ctx[i].mask); virqfd_disable(vdev, &vdev->ctx[i].mask);
} }
if (msix) { if (msix) {
@ -577,7 +613,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
vfio_send_intx_eventfd, NULL, vfio_send_intx_eventfd, NULL,
&vdev->ctx[0].unmask, fd); &vdev->ctx[0].unmask, fd);
virqfd_disable(vdev->ctx[0].unmask); virqfd_disable(vdev, &vdev->ctx[0].unmask);
} }
return 0; return 0;

View File

@ -1134,6 +1134,8 @@ positive:
return 1; return 1;
rename_retry: rename_retry:
if (locked)
goto again;
locked = 1; locked = 1;
write_seqlock(&rename_lock); write_seqlock(&rename_lock);
goto again; goto again;
@ -1141,7 +1143,7 @@ rename_retry:
EXPORT_SYMBOL(have_submounts); EXPORT_SYMBOL(have_submounts);
/* /*
* Search the dentry child list for the specified parent, * Search the dentry child list of the specified parent,
* and move any unused dentries to the end of the unused * and move any unused dentries to the end of the unused
* list for prune_dcache(). We descend to the next level * list for prune_dcache(). We descend to the next level
* whenever the d_subdirs list is non-empty and continue * whenever the d_subdirs list is non-empty and continue
@ -1236,6 +1238,8 @@ out:
rename_retry: rename_retry:
if (found) if (found)
return found; return found;
if (locked)
goto again;
locked = 1; locked = 1;
write_seqlock(&rename_lock); write_seqlock(&rename_lock);
goto again; goto again;
@ -3035,6 +3039,8 @@ resume:
return; return;
rename_retry: rename_retry:
if (locked)
goto again;
locked = 1; locked = 1;
write_seqlock(&rename_lock); write_seqlock(&rename_lock);
goto again; goto again;

View File

@ -289,7 +289,6 @@ static void nlmsvc_free_block(struct kref *kref)
dprintk("lockd: freeing block %p...\n", block); dprintk("lockd: freeing block %p...\n", block);
/* Remove block from file's list of blocks */ /* Remove block from file's list of blocks */
mutex_lock(&file->f_mutex);
list_del_init(&block->b_flist); list_del_init(&block->b_flist);
mutex_unlock(&file->f_mutex); mutex_unlock(&file->f_mutex);
@ -303,7 +302,7 @@ static void nlmsvc_free_block(struct kref *kref)
static void nlmsvc_release_block(struct nlm_block *block) static void nlmsvc_release_block(struct nlm_block *block)
{ {
if (block != NULL) if (block != NULL)
kref_put(&block->b_count, nlmsvc_free_block); kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
} }
/* /*

View File

@ -1886,8 +1886,14 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
return err; return err;
err = -EINVAL; err = -EINVAL;
if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt))) if (unlikely(!check_mnt(real_mount(path->mnt)))) {
goto unlock; /* that's acceptable only for automounts done in private ns */
if (!(mnt_flags & MNT_SHRINKABLE))
goto unlock;
/* ... and for those we'd better have mountpoint still alive */
if (!real_mount(path->mnt)->mnt_ns)
goto unlock;
}
/* Refuse the same filesystem on the same mount point */ /* Refuse the same filesystem on the same mount point */
err = -EBUSY; err = -EBUSY;

View File

@ -691,9 +691,11 @@ __SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
#define __NR_process_vm_writev 271 #define __NR_process_vm_writev 271
__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \ __SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
compat_sys_process_vm_writev) compat_sys_process_vm_writev)
#define __NR_kcmp 272
__SYSCALL(__NR_kcmp, sys_kcmp)
#undef __NR_syscalls #undef __NR_syscalls
#define __NR_syscalls 272 #define __NR_syscalls 273
/* /*
* All syscalls below here should go away really, * All syscalls below here should go away really,

View File

@ -256,72 +256,78 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{ {
} }
int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) static inline int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{ {
return -ENODEV; return -ENODEV;
} }
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) static inline void iommu_detach_group(struct iommu_domain *domain,
struct iommu_group *group)
{ {
} }
struct iommu_group *iommu_group_alloc(void) static inline struct iommu_group *iommu_group_alloc(void)
{ {
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
} }
void *iommu_group_get_iommudata(struct iommu_group *group) static inline void *iommu_group_get_iommudata(struct iommu_group *group)
{ {
return NULL; return NULL;
} }
void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, static inline void iommu_group_set_iommudata(struct iommu_group *group,
void (*release)(void *iommu_data)) void *iommu_data,
void (*release)(void *iommu_data))
{ {
} }
int iommu_group_set_name(struct iommu_group *group, const char *name) static inline int iommu_group_set_name(struct iommu_group *group,
const char *name)
{ {
return -ENODEV; return -ENODEV;
} }
int iommu_group_add_device(struct iommu_group *group, struct device *dev) static inline int iommu_group_add_device(struct iommu_group *group,
struct device *dev)
{ {
return -ENODEV; return -ENODEV;
} }
void iommu_group_remove_device(struct device *dev) static inline void iommu_group_remove_device(struct device *dev)
{ {
} }
int iommu_group_for_each_dev(struct iommu_group *group, void *data, static inline int iommu_group_for_each_dev(struct iommu_group *group,
int (*fn)(struct device *, void *)) void *data,
int (*fn)(struct device *, void *))
{ {
return -ENODEV; return -ENODEV;
} }
struct iommu_group *iommu_group_get(struct device *dev) static inline struct iommu_group *iommu_group_get(struct device *dev)
{ {
return NULL; return NULL;
} }
void iommu_group_put(struct iommu_group *group) static inline void iommu_group_put(struct iommu_group *group)
{ {
} }
int iommu_group_register_notifier(struct iommu_group *group, static inline int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb) struct notifier_block *nb)
{ {
return -ENODEV; return -ENODEV;
} }
int iommu_group_unregister_notifier(struct iommu_group *group, static inline int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb) struct notifier_block *nb)
{ {
return 0; return 0;
} }
int iommu_group_id(struct iommu_group *group) static inline int iommu_group_id(struct iommu_group *group)
{ {
return -ENODEV; return -ENODEV;
} }

View File

@ -1,3 +1,15 @@
/*
* include/linux/micrel_phy.h
*
* Micrel PHY IDs
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef _MICREL_PHY_H #ifndef _MICREL_PHY_H
#define _MICREL_PHY_H #define _MICREL_PHY_H
@ -5,10 +17,11 @@
#define PHY_ID_KSZ9021 0x00221610 #define PHY_ID_KSZ9021 0x00221610
#define PHY_ID_KS8737 0x00221720 #define PHY_ID_KS8737 0x00221720
#define PHY_ID_KS8041 0x00221510 #define PHY_ID_KSZ8021 0x00221555
#define PHY_ID_KS8051 0x00221550 #define PHY_ID_KSZ8041 0x00221510
#define PHY_ID_KSZ8051 0x00221550
/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */ /* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
#define PHY_ID_KS8001 0x0022161A #define PHY_ID_KSZ8001 0x0022161A
/* struct phy_device dev_flags definitions */ /* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_50MHZ_CLK 0x00000001

View File

@ -35,8 +35,10 @@ struct nvme_bar {
__u64 acq; /* Admin CQ Base Address */ __u64 acq; /* Admin CQ Base Address */
}; };
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
enum { enum {
NVME_CC_ENABLE = 1 << 0, NVME_CC_ENABLE = 1 << 0,

View File

@ -118,6 +118,7 @@ void reset_security_ops(void);
extern unsigned long mmap_min_addr; extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr; extern unsigned long dac_mmap_min_addr;
#else #else
#define mmap_min_addr 0UL
#define dac_mmap_min_addr 0UL #define dac_mmap_min_addr 0UL
#endif #endif

View File

@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p)
*/ */
bool fprop_new_period(struct fprop_global *p, int periods) bool fprop_new_period(struct fprop_global *p, int periods)
{ {
u64 events; s64 events;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);

View File

@ -1811,7 +1811,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
src_page = pte_page(pteval); src_page = pte_page(pteval);
copy_user_highpage(page, src_page, address, vma); copy_user_highpage(page, src_page, address, vma);
VM_BUG_ON(page_mapcount(src_page) != 1); VM_BUG_ON(page_mapcount(src_page) != 1);
VM_BUG_ON(page_count(src_page) != 2);
release_pte_page(src_page); release_pte_page(src_page);
/* /*
* ptl mostly unnecessary, but preempt has to * ptl mostly unnecessary, but preempt has to

View File

@ -642,7 +642,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_neigh_node *router = NULL; struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_node_tmp; struct batadv_orig_node *orig_node_tmp;
struct hlist_node *node; struct hlist_node *node;
uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; int if_num;
uint8_t sum_orig, sum_neigh;
uint8_t *neigh_addr; uint8_t *neigh_addr;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@ -727,17 +728,17 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
if (router && (neigh_node->tq_avg == router->tq_avg)) { if (router && (neigh_node->tq_avg == router->tq_avg)) {
orig_node_tmp = router->orig_node; orig_node_tmp = router->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
bcast_own_sum_orig = if_num = router->if_incoming->if_num;
orig_node_tmp->bcast_own_sum[if_incoming->if_num]; sum_orig = orig_node_tmp->bcast_own_sum[if_num];
spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
orig_node_tmp = neigh_node->orig_node; orig_node_tmp = neigh_node->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
bcast_own_sum_neigh = if_num = neigh_node->if_incoming->if_num;
orig_node_tmp->bcast_own_sum[if_incoming->if_num]; sum_neigh = orig_node_tmp->bcast_own_sum[if_num];
spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
if (bcast_own_sum_orig >= bcast_own_sum_neigh) if (sum_orig >= sum_neigh)
goto update_tt; goto update_tt;
} }

View File

@ -100,18 +100,21 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
{ {
struct batadv_priv *bat_priv = netdev_priv(dev); struct batadv_priv *bat_priv = netdev_priv(dev);
struct sockaddr *addr = p; struct sockaddr *addr = p;
uint8_t old_addr[ETH_ALEN];
if (!is_valid_ether_addr(addr->sa_data)) if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
memcpy(old_addr, dev->dev_addr, ETH_ALEN);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
/* only modify transtable if it has been initialized before */ /* only modify transtable if it has been initialized before */
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
batadv_tt_local_remove(bat_priv, dev->dev_addr, batadv_tt_local_remove(bat_priv, old_addr,
"mac address changed", false); "mac address changed", false);
batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
} }
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
dev->addr_assign_type &= ~NET_ADDR_RANDOM; dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0; return 0;
} }

View File

@ -734,6 +734,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
cancel_work_sync(&hdev->le_scan); cancel_work_sync(&hdev->le_scan);
cancel_delayed_work(&hdev->power_off);
hci_req_cancel(hdev, ENODEV); hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev); hci_req_lock(hdev);

View File

@ -1008,7 +1008,7 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
if (!conn) if (!conn)
return; return;
if (chan->mode == L2CAP_MODE_ERTM) { if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
__clear_retrans_timer(chan); __clear_retrans_timer(chan);
__clear_monitor_timer(chan); __clear_monitor_timer(chan);
__clear_ack_timer(chan); __clear_ack_timer(chan);

View File

@ -2875,6 +2875,22 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
if (scan) if (scan)
hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
u8 ssp = 1;
hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
}
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
struct hci_cp_write_le_host_supported cp;
cp.le = 1;
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
sizeof(cp), &cp);
}
update_class(hdev); update_class(hdev);
update_name(hdev, hdev->dev_name); update_name(hdev, hdev->dev_name);
update_eir(hdev); update_eir(hdev);

View File

@ -1073,16 +1073,13 @@ static int write_partial_msg_pages(struct ceph_connection *con)
BUG_ON(kaddr == NULL); BUG_ON(kaddr == NULL);
base = kaddr + con->out_msg_pos.page_pos + bio_offset; base = kaddr + con->out_msg_pos.page_pos + bio_offset;
crc = crc32c(crc, base, len); crc = crc32c(crc, base, len);
kunmap(page);
msg->footer.data_crc = cpu_to_le32(crc); msg->footer.data_crc = cpu_to_le32(crc);
con->out_msg_pos.did_page_crc = true; con->out_msg_pos.did_page_crc = true;
} }
ret = ceph_tcp_sendpage(con->sock, page, ret = ceph_tcp_sendpage(con->sock, page,
con->out_msg_pos.page_pos + bio_offset, con->out_msg_pos.page_pos + bio_offset,
len, 1); len, 1);
if (do_datacrc)
kunmap(page);
if (ret <= 0) if (ret <= 0)
goto out; goto out;

View File

@ -691,7 +691,8 @@ set_rcvbuf:
case SO_KEEPALIVE: case SO_KEEPALIVE:
#ifdef CONFIG_INET #ifdef CONFIG_INET
if (sk->sk_protocol == IPPROTO_TCP) if (sk->sk_protocol == IPPROTO_TCP &&
sk->sk_type == SOCK_STREAM)
tcp_set_keepalive(sk, valbool); tcp_set_keepalive(sk, valbool);
#endif #endif
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);

View File

@ -510,7 +510,10 @@ relookup:
secure_ipv6_id(daddr->addr.a6)); secure_ipv6_id(daddr->addr.a6));
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0; p->rate_tokens = 0;
p->rate_last = 0; /* 60*HZ is arbitrary, but chosen enough high so that the first
* calculation of tokens is at its maximum.
*/
p->rate_last = jiffies - 60*HZ;
INIT_LIST_HEAD(&p->gc_list); INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */ /* Link the node. */

View File

@ -131,18 +131,20 @@ found:
* 0 - deliver * 0 - deliver
* 1 - block * 1 - block
*/ */
static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
{ {
int type; struct icmphdr _hdr;
const struct icmphdr *hdr;
if (!pskb_may_pull(skb, sizeof(struct icmphdr))) hdr = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_hdr), &_hdr);
if (!hdr)
return 1; return 1;
type = icmp_hdr(skb)->type; if (hdr->type < 32) {
if (type < 32) {
__u32 data = raw_sk(sk)->filter.data; __u32 data = raw_sk(sk)->filter.data;
return ((1 << type) & data) != 0; return ((1U << hdr->type) & data) != 0;
} }
/* Do not block unknown ICMP types */ /* Do not block unknown ICMP types */

View File

@ -86,28 +86,30 @@ static int mip6_mh_len(int type)
static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
{ {
struct ip6_mh *mh; struct ip6_mh _hdr;
const struct ip6_mh *mh;
if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) || mh = skb_header_pointer(skb, skb_transport_offset(skb),
!pskb_may_pull(skb, (skb_transport_offset(skb) + sizeof(_hdr), &_hdr);
((skb_transport_header(skb)[1] + 1) << 3)))) if (!mh)
return -1; return -1;
mh = (struct ip6_mh *)skb_transport_header(skb); if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
return -1;
if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) - mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
skb_network_header(skb))); skb_network_header_len(skb));
return -1; return -1;
} }
if (mh->ip6mh_proto != IPPROTO_NONE) { if (mh->ip6mh_proto != IPPROTO_NONE) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
mh->ip6mh_proto); mh->ip6mh_proto);
mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) - mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
skb_network_header(skb))); skb_network_header_len(skb));
return -1; return -1;
} }

View File

@ -107,21 +107,20 @@ found:
* 0 - deliver * 0 - deliver
* 1 - block * 1 - block
*/ */
static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{ {
struct icmp6hdr *icmph; struct icmp6hdr *_hdr;
struct raw6_sock *rp = raw6_sk(sk); const struct icmp6hdr *hdr;
if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { hdr = skb_header_pointer(skb, skb_transport_offset(skb),
__u32 *data = &rp->filter.data[0]; sizeof(_hdr), &_hdr);
int bit_nr; if (hdr) {
const __u32 *data = &raw6_sk(sk)->filter.data[0];
unsigned int type = hdr->icmp6_type;
icmph = (struct icmp6hdr *) skb->data; return (data[type >> 5] & (1U << (type & 31))) != 0;
bit_nr = icmph->icmp6_type;
return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
} }
return 0; return 1;
} }
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)

View File

@ -80,8 +80,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
&l2tp_nl_family, 0, L2TP_CMD_NOOP); &l2tp_nl_family, 0, L2TP_CMD_NOOP);
if (IS_ERR(hdr)) { if (!hdr) {
ret = PTR_ERR(hdr); ret = -EMSGSIZE;
goto err_out; goto err_out;
} }
@ -250,8 +250,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
L2TP_CMD_TUNNEL_GET); L2TP_CMD_TUNNEL_GET);
if (IS_ERR(hdr)) if (!hdr)
return PTR_ERR(hdr); return -EMSGSIZE;
if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
@ -617,8 +617,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
sk = tunnel->sock; sk = tunnel->sock;
hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
if (IS_ERR(hdr)) if (!hdr)
return PTR_ERR(hdr); return -EMSGSIZE;
if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||

View File

@ -117,11 +117,11 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
/* For SMP, we only want to use one set of state. */ /* For SMP, we only want to use one set of state. */
r->master = priv; r->master = priv;
/* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
128. */
priv->prev = jiffies;
priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
if (r->cost == 0) { if (r->cost == 0) {
/* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
128. */
priv->prev = jiffies;
priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
r->credit_cap = priv->credit; /* Credits full. */ r->credit_cap = priv->credit; /* Credits full. */
r->cost = user2credits(r->avg); r->cost = user2credits(r->avg);
} }

View File

@ -350,6 +350,9 @@ static void reg_regdb_search(struct work_struct *work)
struct reg_regdb_search_request *request; struct reg_regdb_search_request *request;
const struct ieee80211_regdomain *curdom, *regdom; const struct ieee80211_regdomain *curdom, *regdom;
int i, r; int i, r;
bool set_reg = false;
mutex_lock(&cfg80211_mutex);
mutex_lock(&reg_regdb_search_mutex); mutex_lock(&reg_regdb_search_mutex);
while (!list_empty(&reg_regdb_search_list)) { while (!list_empty(&reg_regdb_search_list)) {
@ -365,9 +368,7 @@ static void reg_regdb_search(struct work_struct *work)
r = reg_copy_regd(&regdom, curdom); r = reg_copy_regd(&regdom, curdom);
if (r) if (r)
break; break;
mutex_lock(&cfg80211_mutex); set_reg = true;
set_regdom(regdom);
mutex_unlock(&cfg80211_mutex);
break; break;
} }
} }
@ -375,6 +376,11 @@ static void reg_regdb_search(struct work_struct *work)
kfree(request); kfree(request);
} }
mutex_unlock(&reg_regdb_search_mutex); mutex_unlock(&reg_regdb_search_mutex);
if (set_reg)
set_regdom(regdom);
mutex_unlock(&cfg80211_mutex);
} }
static DECLARE_WORK(reg_regdb_work, reg_regdb_search); static DECLARE_WORK(reg_regdb_work, reg_regdb_search);

View File

@ -200,7 +200,7 @@ EOF
syscall_list() { syscall_list() {
grep '^[0-9]' "$1" | sort -n | ( grep '^[0-9]' "$1" | sort -n | (
while read nr abi name entry ; do while read nr abi name entry ; do
echo <<EOF cat <<EOF
#if !defined(__NR_${name}) && !defined(__IGNORE_${name}) #if !defined(__NR_${name}) && !defined(__IGNORE_${name})
#warning syscall ${name} not implemented #warning syscall ${name} not implemented
#endif #endif

View File

@ -702,7 +702,7 @@ static bool wm2000_readable_reg(struct device *dev, unsigned int reg)
} }
static const struct regmap_config wm2000_regmap = { static const struct regmap_config wm2000_regmap = {
.reg_bits = 8, .reg_bits = 16,
.val_bits = 8, .val_bits = 8,
.max_register = WM2000_REG_IF_CTL, .max_register = WM2000_REG_IF_CTL,

View File

@ -197,7 +197,13 @@ static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
/* no data provider, so send silence */ /* no data provider, so send silence */
unsigned int offs = 0; unsigned int offs = 0;
for (i = 0; i < ctx->packets; ++i) { for (i = 0; i < ctx->packets; ++i) {
int counts = ctx->packet_size[i]; int counts;
if (ctx->packet_size[i])
counts = ctx->packet_size[i];
else
counts = snd_usb_endpoint_next_packet_size(ep);
urb->iso_frame_desc[i].offset = offs * ep->stride; urb->iso_frame_desc[i].offset = offs * ep->stride;
urb->iso_frame_desc[i].length = counts * ep->stride; urb->iso_frame_desc[i].length = counts * ep->stride;
offs += counts; offs += counts;