Merge branches 'tracing/profiling', 'tracing/options' and 'tracing/urgent' into tracing/core

This commit is contained in:
Ingo Molnar 2008-11-23 09:10:32 +01:00
commit a0a70c735e
132 changed files with 958 additions and 660 deletions

View File

@ -294,7 +294,9 @@ and is between 256 and 4096 characters. It is defined in the file
Possible values are:
isolate - enable device isolation (each device, as far
as possible, will get its own protection
domain)
domain) [default]
share - put every device behind one IOMMU into the
same protection domain
fullflush - enable flushing of IO/TLB entries when
they are unmapped. Otherwise they are
flushed before they will be reused, which
@ -1201,8 +1203,8 @@ and is between 256 and 4096 characters. It is defined in the file
it is equivalent to "nosmp", which also disables
the IO APIC.
max_addr=[KMG] [KNL,BOOT,ia64] All physical memory greater than or
equal to this physical address is ignored.
max_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory greater than
or equal to this physical address is ignored.
max_luns= [SCSI] Maximum number of LUNs to probe.
Should be between 1 and 2^32-1.
@ -1302,6 +1304,9 @@ and is between 256 and 4096 characters. It is defined in the file
mga= [HW,DRM]
min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this
physical address is ignored.
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for

View File

@ -96,7 +96,7 @@ Letting the PHY Abstraction Layer do Everything
static void adjust_link(struct net_device *dev);
Next, you need to know the device name of the PHY connected to this device.
The name will look something like, "phy0:0", where the first number is the
The name will look something like, "0:00", where the first number is the
bus id, and the second is the PHY's address on that bus. Typically,
the bus is responsible for making its ID unique.

View File

@ -1809,7 +1809,7 @@ S: Maintained
FTRACE
P: Steven Rostedt
M: srostedt@redhat.com
M: rostedt@goodmis.org
S: Maintained
FUJITSU FR-V (FRV) PORT

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 28
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Killer Bat of Doom
# *DOCUMENTATION*

View File

@ -33,6 +33,7 @@
#define LCD_CONN_TYPE(_x) ((_x) & 0x0f)
#define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f)
#define LCD_TYPE_MASK 0xf
#define LCD_TYPE_UNKNOWN 0
#define LCD_TYPE_MONO_STN 1
#define LCD_TYPE_MONO_DSTN 2

View File

@ -90,12 +90,13 @@ void arch_reset(char mode)
/* Jump into ROM at address 0 */
cpu_reset(0);
break;
case 'h':
do_hw_reset();
break;
case 'g':
do_gpio_reset();
break;
case 'h':
default:
do_hw_reset();
break;
}
}

View File

@ -67,6 +67,7 @@
static unsigned long spitz_pin_config[] __initdata = {
/* Chip Selects */
GPIO78_nCS_2, /* SCOOP #2 */
GPIO79_nCS_3, /* NAND */
GPIO80_nCS_4, /* SCOOP #1 */
/* LCD - 16bpp Active TFT */
@ -97,10 +98,10 @@ static unsigned long spitz_pin_config[] __initdata = {
GPIO51_nPIOW,
GPIO85_nPCE_1,
GPIO54_nPCE_2,
GPIO79_PSKTSEL,
GPIO55_nPREG,
GPIO56_nPWAIT,
GPIO57_nIOIS16,
GPIO104_PSKTSEL,
/* MMC */
GPIO32_MMC_CLK,
@ -686,7 +687,6 @@ static void __init akita_init(void)
spitz_pcmcia_config.num_devs = 1;
platform_scoop_config = &spitz_pcmcia_config;
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(akita_i2c_board_info));
common_init();

View File

@ -226,7 +226,7 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
/************************************************/
#define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
#define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
#define ia64_getreg IA64_INTRINSIC_API(getreg)
#define ia64_getreg IA64_INTRINSIC_MACRO(getreg)
#define ia64_setreg IA64_INTRINSIC_API(setreg)
#define ia64_set_rr IA64_INTRINSIC_API(set_rr)
#define ia64_get_rr IA64_INTRINSIC_API(get_rr)

View File

@ -78,6 +78,19 @@ extern unsigned long ia64_native_getreg_func(int regnum);
ia64_native_rsm(mask); \
} while (0)
/* returned ip value should be the one in the caller,
* not in __paravirt_getreg() */
#define paravirt_getreg(reg) \
({ \
unsigned long res; \
BUILD_BUG_ON(!__builtin_constant_p(reg)); \
if ((reg) == _IA64_REG_IP) \
res = ia64_native_getreg(_IA64_REG_IP); \
else \
res = pv_cpu_ops.getreg(reg); \
res; \
})
/******************************************************************************
* replacement of hand written assembly codes.
*/

View File

@ -499,6 +499,7 @@ GLOBAL_ENTRY(prefetch_stack)
END(prefetch_stack)
GLOBAL_ENTRY(kernel_execve)
rum psr.ac
mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp

View File

@ -260,7 +260,7 @@ start_ap:
* Switch into virtual mode:
*/
movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
|IA64_PSR_DI)
|IA64_PSR_DI|IA64_PSR_AC)
;;
mov cr.ipsr=r16
movl r17=1f

View File

@ -1139,7 +1139,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
return previous_current;
no_mod:
printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
smp_processor_id(), type, msg);
return previous_current;
}

View File

@ -130,7 +130,7 @@ ia64_native_getreg_func(int regnum)
unsigned long res = -1;
switch (regnum) {
CASE_GET_REG(GP);
CASE_GET_REG(IP);
/*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */
CASE_GET_REG(PSR);
CASE_GET_REG(TP);
CASE_GET_REG(SP);

View File

@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/iommu.h>
dma_addr_t bad_dma_address __read_mostly;
EXPORT_SYMBOL(bad_dma_address);

View File

@ -58,7 +58,7 @@ __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
#ifdef CONFIG_IA32_SUPPORT
__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
__HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
#endif /* CONFIG_IA32_SUPPORT */

View File

@ -84,5 +84,7 @@ extern void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned
extern unsigned get_434_reg(unsigned reg_offs);
extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
extern unsigned char get_latch_u5(void);
extern void rb532_gpio_set_ilevel(int bit, unsigned gpio);
extern void rb532_gpio_set_istat(int bit, unsigned gpio);
#endif /* _RC32434_GPIO_H_ */

View File

@ -40,12 +40,14 @@
#define BTCS 0x010040
#define BTCOMPARE 0x010044
#define GPIOBASE 0x050000
#define GPIOCFG 0x050004
#define GPIOD 0x050008
#define GPIOILEVEL 0x05000C
#define GPIOISTAT 0x050010
#define GPIONMIEN 0x050014
#define IMASK6 0x038038
/* Offsets relative to GPIOBASE */
#define GPIOFUNC 0x00
#define GPIOCFG 0x04
#define GPIOD 0x08
#define GPIOILEVEL 0x0C
#define GPIOISTAT 0x10
#define GPIONMIEN 0x14
#define IMASK6 0x38
#define LO_WPX (1 << 0)
#define LO_ALE (1 << 1)
#define LO_CLE (1 << 2)

View File

@ -63,7 +63,7 @@ static inline int mips_clockevent_init(void)
/*
* Initialize the count register as a clocksource
*/
#ifdef CONFIG_CEVT_R4K
#ifdef CONFIG_CSRC_R4K
extern int init_mips_clocksource(void);
#else
static inline int init_mips_clocksource(void)

View File

@ -27,7 +27,7 @@ int __init init_mips_clocksource(void)
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
/* Calclate a somewhat reasonable rating value */
/* Calculate a somewhat reasonable rating value */
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);

View File

@ -161,7 +161,7 @@ static inline int __init indy_sc_probe(void)
/* XXX Check with wje if the Indy caches can differenciate between
writeback + invalidate and just invalidate. */
struct bcache_ops indy_sc_ops = {
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
.bc_wback_inv = indy_sc_wback_invalidate,

View File

@ -22,9 +22,9 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <asm-mips/addrspace.h>
#include <asm-mips/mips-boards/launch.h>
#include <asm-mips/mipsmtregs.h>
#include <asm/addrspace.h>
#include <asm/mips-boards/launch.h>
#include <asm/mipsmtregs.h>
int amon_cpu_avail(int cpu)
{

View File

@ -118,7 +118,7 @@ static struct platform_device cf_slot0 = {
/* Resources and device for NAND */
static int rb532_dev_ready(struct mtd_info *mtd)
{
return readl(IDT434_REG_BASE + GPIOD) & GPIO_RDY;
return gpio_get_value(GPIO_RDY);
}
static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)

View File

@ -39,10 +39,6 @@
struct rb532_gpio_chip {
struct gpio_chip chip;
void __iomem *regbase;
void (*set_int_level)(struct gpio_chip *chip, unsigned offset, int value);
int (*get_int_level)(struct gpio_chip *chip, unsigned offset);
void (*set_int_status)(struct gpio_chip *chip, unsigned offset, int value);
int (*get_int_status)(struct gpio_chip *chip, unsigned offset);
};
struct mpmc_device dev3;
@ -111,15 +107,47 @@ unsigned char get_latch_u5(void)
}
EXPORT_SYMBOL(get_latch_u5);
/* rb532_set_bit - sanely set a bit
*
* bitval: new value for the bit
* offset: bit index in the 4 byte address range
* ioaddr: 4 byte aligned address being altered
*/
static inline void rb532_set_bit(unsigned bitval,
unsigned offset, void __iomem *ioaddr)
{
unsigned long flags;
u32 val;
bitval = !!bitval; /* map parameter to {0,1} */
local_irq_save(flags);
val = readl(ioaddr);
val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */
val |= ( bitval << offset ); /* set bit if bitval == 1 */
writel(val, ioaddr);
local_irq_restore(flags);
}
/* rb532_get_bit - read a bit
*
* returns the boolean state of the bit, which may be > 1
*/
static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
{
return (readl(ioaddr) & (1 << offset));
}
/*
* Return GPIO level */
static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
{
u32 mask = 1 << offset;
struct rb532_gpio_chip *gpch;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
return readl(gpch->regbase + GPIOD) & mask;
return rb532_get_bit(offset, gpch->regbase + GPIOD);
}
/*
@ -128,23 +156,10 @@ static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
static void rb532_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch;
void __iomem *gpvr;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpvr = gpch->regbase + GPIOD;
local_irq_save(flags);
tmp = readl(gpvr);
if (value)
tmp |= mask;
else
tmp &= ~mask;
writel(tmp, gpvr);
local_irq_restore(flags);
rb532_set_bit(value, offset, gpch->regbase + GPIOD);
}
/*
@ -152,21 +167,14 @@ static void rb532_gpio_set(struct gpio_chip *chip,
*/
static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
unsigned long flags;
u32 mask = 1 << offset;
u32 value;
struct rb532_gpio_chip *gpch;
void __iomem *gpdr;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpdr = gpch->regbase + GPIOCFG;
local_irq_save(flags);
value = readl(gpdr);
value &= ~mask;
writel(value, gpdr);
local_irq_restore(flags);
if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
return 1; /* alternate function, GPIOCFG is ignored */
rb532_set_bit(0, offset, gpch->regbase + GPIOCFG);
return 0;
}
@ -176,99 +184,20 @@ static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int rb532_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch;
void __iomem *gpdr;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
writel(mask, gpch->regbase + GPIOD);
gpdr = gpch->regbase + GPIOCFG;
local_irq_save(flags);
tmp = readl(gpdr);
tmp |= mask;
writel(tmp, gpdr);
local_irq_restore(flags);
if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
return 1; /* alternate function, GPIOCFG is ignored */
/* set the initial output value */
rb532_set_bit(value, offset, gpch->regbase + GPIOD);
rb532_set_bit(1, offset, gpch->regbase + GPIOCFG);
return 0;
}
/*
* Set the GPIO interrupt level
*/
static void rb532_gpio_set_int_level(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch;
void __iomem *gpil;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpil = gpch->regbase + GPIOILEVEL;
local_irq_save(flags);
tmp = readl(gpil);
if (value)
tmp |= mask;
else
tmp &= ~mask;
writel(tmp, gpil);
local_irq_restore(flags);
}
/*
* Get the GPIO interrupt level
*/
static int rb532_gpio_get_int_level(struct gpio_chip *chip, unsigned offset)
{
u32 mask = 1 << offset;
struct rb532_gpio_chip *gpch;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
return readl(gpch->regbase + GPIOILEVEL) & mask;
}
/*
* Set the GPIO interrupt status
*/
static void rb532_gpio_set_int_status(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch;
void __iomem *gpis;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpis = gpch->regbase + GPIOISTAT;
local_irq_save(flags);
tmp = readl(gpis);
if (value)
tmp |= mask;
else
tmp &= ~mask;
writel(tmp, gpis);
local_irq_restore(flags);
}
/*
* Get the GPIO interrupt status
*/
static int rb532_gpio_get_int_status(struct gpio_chip *chip, unsigned offset)
{
u32 mask = 1 << offset;
struct rb532_gpio_chip *gpch;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
return readl(gpch->regbase + GPIOISTAT) & mask;
}
static struct rb532_gpio_chip rb532_gpio_chip[] = {
[0] = {
.chip = {
@ -280,13 +209,35 @@ static struct rb532_gpio_chip rb532_gpio_chip[] = {
.base = 0,
.ngpio = 32,
},
.get_int_level = rb532_gpio_get_int_level,
.set_int_level = rb532_gpio_set_int_level,
.get_int_status = rb532_gpio_get_int_status,
.set_int_status = rb532_gpio_set_int_status,
},
};
/*
* Set GPIO interrupt level
*/
void rb532_gpio_set_ilevel(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOILEVEL);
}
EXPORT_SYMBOL(rb532_gpio_set_ilevel);
/*
* Set GPIO interrupt status
*/
void rb532_gpio_set_istat(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOISTAT);
}
EXPORT_SYMBOL(rb532_gpio_set_istat);
/*
* Configure GPIO alternate function
*/
static void rb532_gpio_set_func(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC);
}
int __init rb532_gpio_init(void)
{
struct resource *r;
@ -310,9 +261,11 @@ int __init rb532_gpio_init(void)
return -ENXIO;
}
/* Set the interrupt status and level for the CF pin */
rb532_gpio_set_int_level(&rb532_gpio_chip->chip, CF_GPIO_NUM, 1);
rb532_gpio_set_int_status(&rb532_gpio_chip->chip, CF_GPIO_NUM, 0);
/* configure CF_GPIO_NUM as CFRDY IRQ source */
rb532_gpio_set_func(0, CF_GPIO_NUM);
rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM);
rb532_gpio_set_ilevel(1, CF_GPIO_NUM);
rb532_gpio_set_istat(0, CF_GPIO_NUM);
return 0;
}

View File

@ -183,10 +183,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* being 64 bit in both cases.
*/
static long translate_usr_offset(long offset)
static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{
if (offset < 0)
return -1;
return sizeof(struct pt_regs);
else if (offset <= 32*4) /* gr[0..31] */
return offset * 2 + 4;
else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
@ -194,7 +194,7 @@ static long translate_usr_offset(long offset)
else if (offset < sizeof(struct pt_regs)/2 + 32*4)
return offset * 2 + 4 - 32*8;
else
return -1;
return sizeof(struct pt_regs);
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
@ -209,7 +209,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
if (addr < 0)
if (addr >= sizeof(struct pt_regs))
break;
tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
@ -236,7 +236,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
if (addr < 0)
if (addr >= sizeof(struct pt_regs))
break;
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */

View File

@ -338,8 +338,9 @@
#define __NR_dup3 320
#define __NR_pipe2 321
#define __NR_inotify_init1 322
#define __NR_accept4 323
#define NR_SYSCALLS 323
#define NR_SYSCALLS 324
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
* it never had the plain ones and there is no value to adding those

View File

@ -340,8 +340,9 @@
#define __NR_dup3 320
#define __NR_pipe2 321
#define __NR_inotify_init1 322
#define __NR_accept4 323
#define NR_SYSCALLS 323
#define NR_SYSCALLS 324
#ifdef __KERNEL__
#define __ARCH_WANT_IPC_PARSE_VERSION

View File

@ -81,4 +81,4 @@ sys_call_table:
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4

View File

@ -150,7 +150,7 @@ sys32_mmap2:
sys32_socketcall: /* %o0=call, %o1=args */
cmp %o0, 1
bl,pn %xcc, do_einval
cmp %o0, 17
cmp %o0, 18
bg,pn %xcc, do_einval
sub %o0, 1, %o0
sllx %o0, 5, %o0
@ -319,6 +319,15 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
nop
nop
nop
do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
63: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept4), %g1
64: lduwa [%o1 + 0x8] %asi, %o2
65: ldswa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_accept4), %g0
66: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
.section __ex_table,"a"
.align 4
@ -353,4 +362,6 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
.word 57b, __retl_efault, 58b, __retl_efault
.word 59b, __retl_efault, 60b, __retl_efault
.word 61b, __retl_efault, 62b, __retl_efault
.word 63b, __retl_efault, 64b, __retl_efault
.word 65b, __retl_efault, 66b, __retl_efault
.previous

View File

@ -82,7 +82,7 @@ sys_call_table32:
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
#endif /* CONFIG_COMPAT */
@ -156,4 +156,4 @@ sys_call_table:
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4

View File

@ -169,9 +169,12 @@ config GENERIC_PENDING_IRQ
config X86_SMP
bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
select USE_GENERIC_SMP_HELPERS
default y
config USE_GENERIC_SMP_HELPERS
def_bool y
depends on SMP
config X86_32_SMP
def_bool y
depends on X86_32 && SMP
@ -959,7 +962,7 @@ config ARCH_PHYS_ADDR_T_64BIT
config NUMA
bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
depends on SMP
depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN)
depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
help

View File

@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void)
extern int early_pfn_to_nid(unsigned long pfn);
extern void resume_map_numa_kva(pgd_t *pgd);
#else /* !CONFIG_NUMA */
#define get_memcfg_numa get_memcfg_numa_flat
static inline void resume_map_numa_kva(pgd_t *pgd) {}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DISCONTIGMEM

View File

@ -21,6 +21,7 @@ struct task_struct;
struct exec_domain;
#include <asm/processor.h>
#include <asm/ftrace.h>
#include <asm/atomic.h>
struct thread_info {
struct task_struct *task; /* main task structure */
@ -45,6 +46,11 @@ struct thread_info {
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
#endif
};
@ -61,6 +67,7 @@ struct thread_info {
.fn = do_no_restart_syscall, \
}, \
.curr_ret_stack = -1,\
.trace_overrun = ATOMIC_INIT(0) \
}
#else
#define INIT_THREAD_INFO(tsk) \

View File

@ -46,7 +46,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
return ret;
case 10:
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16);
ret, "q", "", "=r", 10);
if (unlikely(ret))
return ret;
__get_user_asm(*(u16 *)(8 + (char *)dst),

View File

@ -639,8 +639,8 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
#define __NR_timerfd_gettime 287
__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
#define __NR_paccept 288
__SYSCALL(__NR_paccept, sys_paccept)
#define __NR_accept4 288
__SYSCALL(__NR_accept4, sys_accept4)
#define __NR_signalfd4 289
__SYSCALL(__NR_signalfd4, sys_signalfd4)
#define __NR_eventfd2 290

View File

@ -537,7 +537,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address >>= PAGE_SHIFT;
iommu_area_free(dom->bitmap, address, pages);
if (address + pages >= dom->next_bit)
if (address >= dom->next_bit)
dom->need_flush = true;
}

View File

@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
int amd_iommu_isolate; /* if 1, device isolation is enabled */
int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str)
for (; *str; ++str) {
if (strncmp(str, "isolate", 7) == 0)
amd_iommu_isolate = 1;
if (strncmp(str, "fullflush", 11) == 0)
if (strncmp(str, "share", 5) == 0)
amd_iommu_isolate = 0;
if (strncmp(str, "fullflush", 9) == 0)
amd_iommu_unmap_flush = true;
}

View File

@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
struct ds_context *context = *p_context;
if (!context) {
spin_unlock(&ds_lock);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
if (!context) {
spin_lock(&ds_lock);
return NULL;
}
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
if (!context->ds) {
kfree(context);
spin_lock(&ds_lock);
return NULL;
}
spin_lock(&ds_lock);
/*
* Check for race - another CPU could have allocated
* it meanwhile:
*/
if (*p_context) {
kfree(context->ds);
kfree(context);
return *p_context;
}
*p_context = context;
context->this = p_context;
@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
spin_lock(&ds_lock);
if (!check_tracer(task))
return -EPERM;
error = -ENOMEM;
context = ds_alloc_context(task);
if (!context)
goto out_unlock;
error = -EPERM;
if (!check_tracer(task))
goto out_unlock;
error = -EALREADY;
if (context->owner[qual] == current)
goto out_unlock;

View File

@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_header *header = NULL;
int i = 0;
acpi_size tbl_size;
while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header;
oem_addrX = t->OEMTableAddr;
oem_size = t->OEMTableSize;
early_acpi_os_unmap_memory(header, tbl_size);
*oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
oem_size);
return 0;
}
early_acpi_os_unmap_memory(header, tbl_size);
}
return -1;
}
void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{
if (!oem_addr)
return;
__acpi_unmap_table((char *)oem_addr, oem_size);
}
#endif

View File

@ -353,8 +353,10 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
struct thread_info *ti = current_thread_info();
/* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
atomic_inc(&ti->trace_overrun);
return -EBUSY;
}
index = ++ti->curr_ret_stack;
barrier();
@ -367,7 +369,7 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long *func)
unsigned long *func, unsigned long *overrun)
{
int index;
@ -376,6 +378,7 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
*overrun = atomic_read(&ti->trace_overrun);
ti->curr_ret_stack--;
}
@ -386,7 +389,8 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
&trace.overrun);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);

View File

@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq)
cfg->vector = 0;
cpus_clear(cfg->domain);
if (likely(!cfg->move_in_progress))
return;
cpus_and(mask, cfg->old_domain, cpu_online_map);
for_each_cpu_mask_nr(cpu, mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
continue;
per_cpu(vector_irq, cpu)[vector] = -1;
break;
}
}
cfg->move_in_progress = 0;
}
void __setup_vector_irq(int cpu)

View File

@ -169,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 330",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
},
},
{ /* Handle problems with rebooting on Dell 2400's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 2400",

View File

@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
.callback = dmi_low_memory_corruption,
.ident = "Phoenix BIOS",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
},
},
#endif

View File

@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
cycles_t start, now, prev, end;
int i;
rdtsc_barrier();
start = get_cycles();
rdtsc_barrier();
/*
* The measurement runs for 20 msecs:
*/
@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
*/
__raw_spin_lock(&sync_lock);
prev = last_tsc;
rdtsc_barrier();
now = get_cycles();
rdtsc_barrier();
last_tsc = now;
__raw_spin_unlock(&sync_lock);

View File

@ -7,6 +7,7 @@
* This file provides all the same external entries as smp.c but uses
* the voyager hal to provide the functionality
*/
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
x86_write_percpu(cpu_number, hard_smp_processor_id());
}
static void voyager_send_call_func(cpumask_t callmask)
{
__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
}
static void voyager_send_call_func_single(int cpu)
{
send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
}
struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
.smp_prepare_cpus = voyager_smp_prepare_cpus,
@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule,
.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi,
.send_call_func_ipi = voyager_send_call_func,
.send_call_func_single_ipi = voyager_send_call_func_single,
};

View File

@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
}
}
#ifdef CONFIG_HIBERNATION
/**
* resume_map_numa_kva - add KVA mapping to the temporary page tables created
* during resume from hibernation
* @pgd_base - temporary resume page directory
*/
void resume_map_numa_kva(pgd_t *pgd_base)
{
int node;
for_each_online_node(node) {
unsigned long start_va, start_pfn, size, pfn;
start_va = (unsigned long)node_remap_start_vaddr[node];
start_pfn = node_remap_start_pfn[node];
size = node_remap_size[node];
printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
pgd_t *pgd = pgd_base + pgd_index(vaddr);
pud_t *pud = pud_offset(pgd, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
set_pmd(pmd, pfn_pmd(start_pfn + pfn,
PAGE_KERNEL_LARGE_EXEC));
printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
__FUNCTION__, vaddr, start_pfn + pfn);
}
}
}
#endif
static unsigned long calculate_numa_remap_pages(void)
{
int nid;

View File

@ -12,6 +12,7 @@
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmzone.h>
/* Defined in hibernate_asm_32.S */
extern int restore_image(void);
@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
}
}
}
resume_map_numa_kva(pgd_base);
return 0;
}

View File

@ -2847,7 +2847,7 @@ static void do_cciss_request(struct request_queue *q)
h->maxSG = seg;
#ifdef CCISS_DEBUG
printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
creq->nr_sectors, seg);
#endif /* CCISS_DEBUG */
@ -3197,7 +3197,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
#ifdef CCISS_DEBUG
printk("address 0 = %x\n", c->paddr);
printk("address 0 = %lx\n", c->paddr);
#endif /* CCISS_DEBUG */
c->vaddr = remap_pci_mem(c->paddr, 0x250);
@ -3224,7 +3224,8 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
#endif /* CCISS_DEBUG */
cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
#ifdef CCISS_DEBUG
printk("cfg base address index = %x\n", cfg_base_addr_index);
printk("cfg base address index = %llx\n",
(unsigned long long)cfg_base_addr_index);
#endif /* CCISS_DEBUG */
if (cfg_base_addr_index == -1) {
printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
@ -3234,7 +3235,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
#ifdef CCISS_DEBUG
printk("cfg offset = %x\n", cfg_offset);
printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
#endif /* CCISS_DEBUG */
c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
cfg_base_addr_index) +

View File

@ -1134,7 +1134,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
seq_printf(s, " gpio-%-3d (%-12s) %s %s",
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
gpio, gdesc->label,
is_out ? "out" : "in ",
chip->get

View File

@ -128,6 +128,9 @@ static const char* temperature_sensors_sets[][36] = {
/* Set 13: iMac 8,1 */
{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
"TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL },
/* Set 14: iMac 6,1 */
{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
"TO0P", "Tp0P", NULL },
};
/* List of keys used to read/write fan speeds */
@ -1296,6 +1299,8 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 1, .light = 1, .temperature_set = 12 },
/* iMac 8: light sensor only, temperature set 13 */
{ .accelerometer = 0, .light = 0, .temperature_set = 13 },
/* iMac 6: light sensor only, temperature set 14 */
{ .accelerometer = 0, .light = 0, .temperature_set = 14 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@ -1349,10 +1354,18 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
&applesmc_dmi_data[4]},
{ applesmc_dmi_match, "Apple MacPro", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
&applesmc_dmi_data[4]},
{ applesmc_dmi_match, "Apple iMac 8", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
&applesmc_dmi_data[13]},
{ applesmc_dmi_match, "Apple iMac 6", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac6") },
&applesmc_dmi_data[14]},
{ applesmc_dmi_match, "Apple iMac 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },

View File

@ -1,3 +1,7 @@
ifdef CONFIG_SGI_GRU_DEBUG
EXTRA_CFLAGS += -DDEBUG
endif
obj-$(CONFIG_SGI_GRU) := gru.o
gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o

View File

@ -1690,9 +1690,11 @@ static int atl2_resume(struct pci_dev *pdev)
ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
err = atl2_request_irq(adapter);
if (netif_running(netdev) && err)
return err;
if (netif_running(netdev)) {
err = atl2_request_irq(adapter);
if (err)
return err;
}
atl2_reset_hw(&adapter->hw);

View File

@ -1112,7 +1112,7 @@ static void ipg_nic_rx_free_skb(struct net_device *dev)
struct ipg_rx *rxfd = sp->rxd + entry;
pci_unmap_single(sp->pdev,
le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(sp->rx_buff[entry]);
sp->rx_buff[entry] = NULL;
@ -1179,7 +1179,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
*/
if (sp->rx_buff[entry]) {
pci_unmap_single(sp->pdev,
le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(sp->rx_buff[entry]);
@ -1246,7 +1246,7 @@ static void ipg_nic_rx_with_start(struct net_device *dev,
if (jumbo->found_start)
dev_kfree_skb_irq(jumbo->skb);
pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb, sp->rxfrag_size);
@ -1349,7 +1349,7 @@ static int ipg_nic_rx_jumbo(struct net_device *dev)
unsigned int entry = curr % IPG_RFDLIST_LENGTH;
struct ipg_rx *rxfd = sp->rxd + entry;
if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE)))
if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
break;
switch (ipg_nic_rx_check_frame_type(dev)) {

View File

@ -1287,7 +1287,34 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
return;
}
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
/**
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i;
for (i = 0; i < adapter->num_msix_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
}
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
u32 mask;
mask = IXGBE_EIMS_ENABLE_MASK;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
* ixgbe_intr - legacy mode Interrupt Handler
@ -1393,35 +1420,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
}
}
/**
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i;
for (i = 0; i < adapter->num_msix_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
}
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
u32 mask;
mask = IXGBE_EIMS_ENABLE_MASK;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
*

View File

@ -912,23 +912,23 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
if (jme_rxsum_ok(jme, rxdesc->descwb.flags))
if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
if (rxdesc->descwb.flags & RXWBFLAG_TAGON) {
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
if (jme->vlgrp) {
jme->jme_vlan_rx(skb, jme->vlgrp,
le32_to_cpu(rxdesc->descwb.vlan));
le16_to_cpu(rxdesc->descwb.vlan));
NET_STAT(jme).rx_bytes += 4;
}
} else {
jme->jme_rx(skb);
}
if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
RXWBFLAG_DEST_MUL)
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
cpu_to_le16(RXWBFLAG_DEST_MUL))
++(NET_STAT(jme).multicast);
jme->dev->last_rx = jiffies;
@ -961,7 +961,7 @@ jme_process_receive(struct jme_adapter *jme, int limit)
rxdesc = rxring->desc;
rxdesc += i;
if ((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
goto out;
@ -1763,10 +1763,9 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
}
static int
jme_tx_tso(struct sk_buff *skb,
u16 *mss, u8 *flags)
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
*mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT;
*mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
if (*mss) {
*flags |= TXFLAG_LSEN;
@ -1826,11 +1825,11 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
}
static inline void
jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags)
jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
{
if (vlan_tx_tag_present(skb)) {
*flags |= TXFLAG_TAGON;
*vlan = vlan_tx_tag_get(skb);
*vlan = cpu_to_le16(vlan_tx_tag_get(skb));
}
}

View File

@ -899,7 +899,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
if (skb != NULL) {
if (skb_queue_len(&mp->rx_recycle) <
mp->default_rx_ring_size &&
skb_recycle_check(skb, mp->skb_size))
skb_recycle_check(skb, mp->skb_size +
dma_get_cache_alignment() - 1))
__skb_queue_head(&mp->rx_recycle, skb);
else
dev_kfree_skb(skb);
@ -2435,8 +2436,8 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
if (pd == NULL || pd->shared_smi == NULL) {
mdiobus_free(msp->smi_bus);
mdiobus_unregister(msp->smi_bus);
mdiobus_free(msp->smi_bus);
}
if (msp->err_interrupt != NO_IRQ)
free_irq(msp->err_interrupt, msp);

View File

@ -564,20 +564,32 @@ EXPORT_SYMBOL(genphy_restart_aneg);
*/
int genphy_config_aneg(struct phy_device *phydev)
{
int result = 0;
int result;
if (AUTONEG_ENABLE == phydev->autoneg) {
int result = genphy_config_advert(phydev);
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
if (result < 0) /* error */
return result;
result = genphy_config_advert(phydev);
/* Only restart aneg if we are advertising something different
* than we were before. */
if (result > 0)
result = genphy_restart_aneg(phydev);
} else
result = genphy_setup_forced(phydev);
if (result < 0) /* error */
return result;
if (result == 0) {
/* Advertisment hasn't changed, but maybe aneg was never on to
* begin with? Or maybe phy was isolated? */
int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
return ctl;
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
result = 1; /* do restart aneg */
}
/* Only restart aneg if we are advertising something different
* than we were before. */
if (result > 0)
result = genphy_restart_aneg(phydev);
return result;
}

View File

@ -927,7 +927,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
u32 entry;
int flags;
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
@ -1141,7 +1141,7 @@ static int sh_mdio_init(struct net_device *ndev, int id)
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
mdp->mii_bus->id[0] = id;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
/* PHY IRQ */
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);

View File

@ -1813,7 +1813,7 @@ static int __init smc911x_probe(struct net_device *dev)
val = SMC_GET_BYTE_TEST(lp);
DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
if (val != 0x87654321) {
printk(KERN_ERR "Invalid chip endian 0x08%x\n",val);
printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
retval = -ENODEV;
goto err_out;
}

View File

@ -1102,12 +1102,14 @@ static int ax88178_link_reset(struct usbnet *dev)
mode = AX88178_MEDIUM_DEFAULT;
if (ecmd.speed == SPEED_1000)
mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK;
mode |= AX_MEDIUM_GM;
else if (ecmd.speed == SPEED_100)
mode |= AX_MEDIUM_PS;
else
mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
mode |= AX_MEDIUM_ENCK;
if (ecmd.duplex == DUPLEX_FULL)
mode |= AX_MEDIUM_FD;
else

View File

@ -1384,7 +1384,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->aligned_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
@ -1436,8 +1436,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
pci_unmap_single(priv->pci_dev, rxb->dma_addr,
priv->hw_params.rx_buf_size,
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
@ -2341,7 +2341,6 @@ static void iwl_bg_alive_start(struct work_struct *data)
mutex_lock(&priv->mutex);
iwl_alive_start(priv);
mutex_unlock(&priv->mutex);
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
}
static void iwl4965_bg_rf_kill(struct work_struct *work)

View File

@ -89,7 +89,8 @@ extern struct iwl_cfg iwl5100_abg_cfg;
#define DEFAULT_LONG_RETRY_LIMIT 4U
struct iwl_rx_mem_buffer {
dma_addr_t dma_addr;
dma_addr_t real_dma_addr;
dma_addr_t aligned_dma_addr;
struct sk_buff *skb;
struct list_head list;
};

View File

@ -204,7 +204,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
@ -251,7 +251,7 @@ void iwl_rx_allocate(struct iwl_priv *priv)
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
/* Alloc a new receive buffer */
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
__GFP_NOWARN | GFP_ATOMIC);
if (!rxb->skb) {
if (net_ratelimit())
@ -266,9 +266,17 @@ void iwl_rx_allocate(struct iwl_priv *priv)
list_del(element);
/* Get physical address of RB/SKB */
rxb->dma_addr =
pci_map_single(priv->pci_dev, rxb->skb->data,
priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
rxb->real_dma_addr = pci_map_single(
priv->pci_dev,
rxb->skb->data,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
@ -300,8 +308,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].dma_addr,
priv->hw_params.rx_buf_size,
rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
@ -354,8 +362,8 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].dma_addr,
priv->hw_params.rx_buf_size,
rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
priv->alloc_rxb_skb--;
dev_kfree_skb(rxq->pool[i].skb);

View File

@ -6012,7 +6012,6 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
mutex_lock(&priv->mutex);
iwl3945_alive_start(priv);
mutex_unlock(&priv->mutex);
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
}
static void iwl3945_bg_rf_kill(struct work_struct *work)

View File

@ -331,7 +331,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
/* Fill the receive configuration URB and initialise the Rx call back */
usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
(void *) (skb->tail),
skb_tail_pointer(skb),
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;

View File

@ -36,7 +36,7 @@ if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
(!M68K || ISA) && !MN10300 && !AVR32
(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN
---help---
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style

View File

@ -1655,12 +1655,14 @@ int __init init_dmars(void)
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
"invalidation\n", drhd->reg_base_addr);
"invalidation\n",
(unsigned long long)drhd->reg_base_addr);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
"invalidation\n", drhd->reg_base_addr);
"invalidation\n",
(unsigned long long)drhd->reg_base_addr);
}
}

View File

@ -1832,7 +1832,7 @@ int pci_reset_function(struct pci_dev *dev)
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (!dev->msi_enabled && !dev->msix_enabled)
if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
disable_irq(dev->irq);
pci_save_state(dev);
@ -1841,7 +1841,7 @@ int pci_reset_function(struct pci_dev *dev)
r = pci_execute_reset_function(dev);
pci_restore_state(dev);
if (!dev->msi_enabled && !dev->msix_enabled)
if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
enable_irq(dev->irq);
return r;

View File

@ -352,21 +352,21 @@ static int map_dma_buffers(struct driver_data *drv_data)
} else
drv_data->tx_map_len = drv_data->len;
/* Stream map the rx buffer */
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
drv_data->rx_map_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma))
/* Stream map the tx buffer. Always do DMA_TO_DEVICE first
* so we flush the cache *before* invalidating it, in case
* the tx and rx buffers overlap.
*/
drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
drv_data->tx_map_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma))
return 0;
/* Stream map the tx buffer */
drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
drv_data->tx_map_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma)) {
dma_unmap_single(dev, drv_data->rx_dma,
/* Stream map the rx buffer */
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
drv_data->rx_map_len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma)) {
dma_unmap_single(dev, drv_data->tx_dma,
drv_data->tx_map_len, DMA_TO_DEVICE);
return 0;
}

View File

@ -506,20 +506,6 @@ static int map_dma_buffers(struct driver_data *drv_data)
if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
return -1;
/* NULL rx means write-only transfer and no map needed
since rx DMA will not be used */
if (drv_data->rx) {
buf = drv_data->rx;
drv_data->rx_dma = dma_map_single(
dev,
buf,
drv_data->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma))
return -1;
drv_data->rx_dma_needs_unmap = 1;
}
if (drv_data->tx == NULL) {
/* Read only message --> use drv_data->dummy_dma_buf for dummy
writes to achive reads */
@ -533,18 +519,31 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf,
drv_data->tx_map_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma)) {
if (drv_data->rx_dma) {
dma_unmap_single(dev,
drv_data->rx_dma,
drv_data->len,
DMA_FROM_DEVICE);
drv_data->rx_dma_needs_unmap = 0;
}
if (dma_mapping_error(dev, drv_data->tx_dma))
return -1;
}
drv_data->tx_dma_needs_unmap = 1;
/* NULL rx means write-only transfer and no map needed
* since rx DMA will not be used */
if (drv_data->rx) {
buf = drv_data->rx;
drv_data->rx_dma = dma_map_single(dev,
buf,
drv_data->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma)) {
if (drv_data->tx_dma) {
dma_unmap_single(dev,
drv_data->tx_dma,
drv_data->tx_map_len,
DMA_TO_DEVICE);
drv_data->tx_dma_needs_unmap = 0;
}
return -1;
}
drv_data->rx_dma_needs_unmap = 1;
}
return 0;
}

View File

@ -172,7 +172,6 @@ static struct usb_interface_descriptor rndis_data_intf __initdata = {
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bAlternateSetting = 1,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
@ -303,7 +302,7 @@ static void rndis_response_available(void *_rndis)
__le32 *data = req->buf;
int status;
if (atomic_inc_return(&rndis->notify_count))
if (atomic_inc_return(&rndis->notify_count) != 1)
return;
/* Send RNDIS RESPONSE_AVAILABLE notification; a

View File

@ -66,6 +66,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
struct pci_dev *p_smbus;
u8 rev;
u32 temp;
int retval;
@ -166,6 +168,25 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
}
break;
case PCI_VENDOR_ID_ATI:
/* SB700 old version has a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
if (pdev->device == 0x4396) {
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (!p_smbus)
break;
rev = p_smbus->revision;
if ((rev == 0x3a) || (rev == 0x3b)) {
u8 tmp;
pci_read_config_byte(pdev, 0x53, &tmp);
pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
pci_dev_put(p_smbus);
}
break;
}
ehci_reset(ehci);

View File

@ -687,7 +687,10 @@ static ssize_t mon_bin_read(struct file *file, char __user *buf,
}
if (rp->b_read >= sizeof(struct mon_bin_hdr)) {
step_len = min(nbytes, (size_t)ep->len_cap);
step_len = ep->len_cap;
step_len -= rp->b_read - sizeof(struct mon_bin_hdr);
if (step_len > nbytes)
step_len = nbytes;
offset = rp->b_out + PKT_SIZE;
offset += rp->b_read - sizeof(struct mon_bin_hdr);
if (offset >= rp->b_size)

View File

@ -1757,7 +1757,7 @@ static int musb_schedule(
}
}
/* use bulk reserved ep1 if no other ep is free */
if (best_end > 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
hw_ep = musb->bulk_ep;
if (is_in)
head = &musb->in_bulk;

View File

@ -56,6 +56,7 @@ static void cp2101_shutdown(struct usb_serial *);
static int debug;
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */

View File

@ -167,6 +167,13 @@ UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
/* Patch for Nokia 5310 capacity */
UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
"Nokia",
"5310",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
/* Reported by Mario Rettig <mariorettig@web.de> */
UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
"Nokia",
@ -233,14 +240,14 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
US_FL_MAX_SECTORS_64 ),
/* Reported by Cedric Godin <cedric@belbone.be> */
UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551,
"Nokia",
"5300",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
/* Reported by Richard Nauber <RichardNauber@web.de> */
UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601,
UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660,
"Nokia",
"6300",
US_SC_DEVICE, US_PR_DEVICE, NULL,

View File

@ -132,7 +132,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
bl = backlight_device_register("backlight", &sinfo->pdev->dev,
sinfo, &atmel_lcdc_bl_ops);
if (IS_ERR(sinfo->backlight)) {
if (IS_ERR(bl)) {
dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
PTR_ERR(bl));
return;

View File

@ -119,6 +119,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "invalid backlight device ID(%d)\n",
pdev->id);
kfree(data);
return -EINVAL;
}
@ -130,6 +131,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
data, &da903x_backlight_ops);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
kfree(data);
return PTR_ERR(bl);
}

View File

@ -42,10 +42,13 @@ static int fb_notifier_callback(struct notifier_block *self,
mutex_lock(&ld->ops_lock);
if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) {
if (event == FB_EVENT_BLANK)
ld->ops->set_power(ld, *(int *)evdata->data);
else
ld->ops->set_mode(ld, evdata->data);
if (event == FB_EVENT_BLANK) {
if (ld->ops->set_power)
ld->ops->set_power(ld, *(int *)evdata->data);
} else {
if (ld->ops->set_mode)
ld->ops->set_mode(ld, evdata->data);
}
}
mutex_unlock(&ld->ops_lock);
return 0;

View File

@ -2462,8 +2462,7 @@ static int __init cirrusfb_init(void)
#ifndef MODULE
static int __init cirrusfb_setup(char *options) {
char *this_opt, s[32];
int i;
char *this_opt;
DPRINTK("ENTER\n");

View File

@ -230,7 +230,7 @@ static void fb_set_logo_directpalette(struct fb_info *info,
greenshift = info->var.green.offset;
blueshift = info->var.blue.offset;
for (i = 32; i < logo->clutsize; i++)
for (i = 32; i < 32 + logo->clutsize; i++)
palette[i] = i << redshift | i << greenshift | i << blueshift;
}

View File

@ -804,6 +804,9 @@ static int pxafb_smart_thread(void *arg)
static int pxafb_smart_init(struct pxafb_info *fbi)
{
if (!(fbi->lccr0 | LCCR0_LCDT))
return 0;
fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi,
"lcd_refresh");
if (IS_ERR(fbi->smart_thread)) {
@ -1372,7 +1375,7 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi,
fbi->cmap_inverse = inf->cmap_inverse;
fbi->cmap_static = inf->cmap_static;
switch (lcd_conn & 0xf) {
switch (lcd_conn & LCD_TYPE_MASK) {
case LCD_TYPE_MONO_STN:
fbi->lccr0 = LCCR0_CMS;
break;

View File

@ -222,6 +222,9 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
unsigned int bbisc = tmio_ioread16(par->lcr + LCR_BBISC);
tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
#ifdef CONFIG_FB_TMIO_ACCELL
/*
* We were in polling mode and now we got correct irq.
* Switch back to IRQ-based sync of command FIFO
@ -231,9 +234,6 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
par->use_polling = false;
}
tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
#ifdef CONFIG_FB_TMIO_ACCELL
if (bbisc & 1)
wake_up(&par->wait_acc);
#endif
@ -938,7 +938,9 @@ static void tmiofb_dump_regs(struct platform_device *dev)
static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
{
struct fb_info *info = platform_get_drvdata(dev);
#ifdef CONFIG_FB_TMIO_ACCELL
struct tmiofb_par *par = info->par;
#endif
struct mfd_cell *cell = dev->dev.platform_data;
int retval = 0;
@ -950,12 +952,14 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
info->fbops->fb_sync(info);
#ifdef CONFIG_FB_TMIO_ACCELL
/*
* The fb should be usable even if interrupts are disabled (and they are
* during suspend/resume). Switch temporary to forced polling.
*/
printk(KERN_INFO "tmiofb: switching to polling\n");
par->use_polling = true;
#endif
tmiofb_hw_stop(dev);
if (cell->suspend)

View File

@ -2036,30 +2036,30 @@ static int viafb_vt1636_proc_write(struct file *file,
return count;
}
static void viafb_init_proc(struct proc_dir_entry *viafb_entry)
static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
{
struct proc_dir_entry *entry;
viafb_entry = proc_mkdir("viafb", NULL);
*viafb_entry = proc_mkdir("viafb", NULL);
if (viafb_entry) {
entry = create_proc_entry("dvp0", 0, viafb_entry);
entry = create_proc_entry("dvp0", 0, *viafb_entry);
if (entry) {
entry->owner = THIS_MODULE;
entry->read_proc = viafb_dvp0_proc_read;
entry->write_proc = viafb_dvp0_proc_write;
}
entry = create_proc_entry("dvp1", 0, viafb_entry);
entry = create_proc_entry("dvp1", 0, *viafb_entry);
if (entry) {
entry->owner = THIS_MODULE;
entry->read_proc = viafb_dvp1_proc_read;
entry->write_proc = viafb_dvp1_proc_write;
}
entry = create_proc_entry("dfph", 0, viafb_entry);
entry = create_proc_entry("dfph", 0, *viafb_entry);
if (entry) {
entry->owner = THIS_MODULE;
entry->read_proc = viafb_dfph_proc_read;
entry->write_proc = viafb_dfph_proc_write;
}
entry = create_proc_entry("dfpl", 0, viafb_entry);
entry = create_proc_entry("dfpl", 0, *viafb_entry);
if (entry) {
entry->owner = THIS_MODULE;
entry->read_proc = viafb_dfpl_proc_read;
@ -2068,7 +2068,7 @@ static void viafb_init_proc(struct proc_dir_entry *viafb_entry)
if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.
lvds_chip_name || VT1636_LVDS ==
viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) {
entry = create_proc_entry("vt1636", 0, viafb_entry);
entry = create_proc_entry("vt1636", 0, *viafb_entry);
if (entry) {
entry->owner = THIS_MODULE;
entry->read_proc = viafb_vt1636_proc_read;
@ -2087,6 +2087,7 @@ static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
remove_proc_entry("dfpl", viafb_entry);
remove_proc_entry("vt1636", viafb_entry);
remove_proc_entry("vt1625", viafb_entry);
remove_proc_entry("viafb", NULL);
}
static int __devinit via_pci_probe(void)
@ -2348,7 +2349,7 @@ static int __devinit via_pci_probe(void)
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
default_var.yres, default_var.bits_per_pixel);
viafb_init_proc(viaparinfo->proc_entry);
viafb_init_proc(&viaparinfo->proc_entry);
viafb_init_dac(IGA2);
return 0;
}

View File

@ -86,8 +86,8 @@ static struct platform_driver omap_hdq_driver = {
static u8 omap_w1_read_byte(void *_hdq);
static void omap_w1_write_byte(void *_hdq, u8 byte);
static u8 omap_w1_reset_bus(void *_hdq);
static void omap_w1_search_bus(void *_hdq, u8 search_type,
w1_slave_found_callback slave_found);
static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
u8 search_type, w1_slave_found_callback slave_found);
static struct w1_bus_master omap_w1_master = {
@ -231,8 +231,8 @@ static u8 omap_w1_reset_bus(void *_hdq)
}
/* W1 search callback function */
static void omap_w1_search_bus(void *_hdq, u8 search_type,
w1_slave_found_callback slave_found)
static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
u8 search_type, w1_slave_found_callback slave_found)
{
u64 module_id, rn_le, cs, id;
@ -249,7 +249,7 @@ static void omap_w1_search_bus(void *_hdq, u8 search_type,
cs = w1_calc_crc8((u8 *)&rn_le, 7);
id = (cs << 56) | module_id;
slave_found(_hdq, id);
slave_found(master_dev, id);
}
static int _omap_hdq_reset(struct hdq_data *hdq_data)

View File

@ -122,14 +122,7 @@ static struct timer_list balloon_timer;
static void scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
if (PageHighMem(page)) {
void *v = kmap(page);
clear_page(v);
kunmap(v);
} else {
void *v = page_address(page);
clear_page(v);
}
clear_highpage(page);
#endif
}

View File

@ -8,7 +8,11 @@ handling fcntl(F_SETLEASE). Convert cifs to using blocking tcp
sends, and also let tcp autotune the socket send and receive buffers.
This reduces the number of EAGAIN errors returned by TCP/IP in
high stress workloads (and the number of retries on socket writes
when sending large SMBWriteX requests).
when sending large SMBWriteX requests). Fix case in which a portion of
data can in some cases not get written to the file on the server before the
file is closed. Fix DFS parsing to properly handle path consumed field,
and to handle certain codepage conversions better. Fix mount and
umount race that can cause oops in mount or umount or reconnect.
Version 1.54
------------

View File

@ -606,7 +606,15 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
* changes to the tcon->tidStatus should be done while holding this lock.
*/
GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock;
GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
/*
* This lock protects the cifs_file->llist and cifs_file->flist
* list operations, and updates to some flags (cifs_file->invalidHandle)
* It will be moved to either use the tcon->stat_lock or equivalent later.
* If cifs_tcp_ses_lock and the lock below are both needed to be held, then
* the cifs_tcp_ses_lock must be grabbed first and released last.
*/
GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;
GLOBAL_EXTERN struct list_head GlobalOplock_Q;

View File

@ -295,7 +295,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
check for tcp and smb session status done differently
for those three - in the calling routine */
if (tcon) {
if (tcon->need_reconnect) {
if (tcon->tidStatus == CifsExiting) {
/* only tree disconnect, open, and write,
(and ulogoff which does not have tcon)
are allowed as we start force umount */

View File

@ -488,12 +488,13 @@ int cifs_close(struct inode *inode, struct file *file)
pTcon = cifs_sb->tcon;
if (pSMBFile) {
struct cifsLockInfo *li, *tmp;
write_lock(&GlobalSMBSeslock);
pSMBFile->closePend = true;
if (pTcon) {
/* no sense reconnecting to close a file that is
already closed */
if (!pTcon->need_reconnect) {
write_unlock(&GlobalSMBSeslock);
timeout = 2;
while ((atomic_read(&pSMBFile->wrtPending) != 0)
&& (timeout <= 2048)) {
@ -510,12 +511,15 @@ int cifs_close(struct inode *inode, struct file *file)
timeout *= 4;
}
if (atomic_read(&pSMBFile->wrtPending))
cERROR(1,
("close with pending writes"));
rc = CIFSSMBClose(xid, pTcon,
cERROR(1, ("close with pending write"));
if (!pTcon->need_reconnect &&
!pSMBFile->invalidHandle)
rc = CIFSSMBClose(xid, pTcon,
pSMBFile->netfid);
}
}
} else
write_unlock(&GlobalSMBSeslock);
} else
write_unlock(&GlobalSMBSeslock);
/* Delete any outstanding lock records.
We'll lose them when the file is closed anyway. */
@ -587,15 +591,18 @@ int cifs_closedir(struct inode *inode, struct file *file)
pTcon = cifs_sb->tcon;
cFYI(1, ("Freeing private data in close dir"));
write_lock(&GlobalSMBSeslock);
if (!pCFileStruct->srch_inf.endOfSearch &&
!pCFileStruct->invalidHandle) {
pCFileStruct->invalidHandle = true;
write_unlock(&GlobalSMBSeslock);
rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
cFYI(1, ("Closing uncompleted readdir with rc %d",
rc));
/* not much we can do if it fails anyway, ignore rc */
rc = 0;
}
} else
write_unlock(&GlobalSMBSeslock);
ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
if (ptmp) {
cFYI(1, ("closedir free smb buf in srch struct"));

View File

@ -555,12 +555,14 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
continue;
cifs_stats_inc(&tcon->num_oplock_brks);
write_lock(&GlobalSMBSeslock);
list_for_each(tmp2, &tcon->openFileList) {
netfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
if (pSMB->Fid != netfile->netfid)
continue;
write_unlock(&GlobalSMBSeslock);
read_unlock(&cifs_tcp_ses_lock);
cFYI(1, ("file id match, oplock break"));
pCifsInode = CIFS_I(netfile->pInode);
@ -576,6 +578,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
return true;
}
write_unlock(&GlobalSMBSeslock);
read_unlock(&cifs_tcp_ses_lock);
cFYI(1, ("No matching file for oplock break"));
return true;

View File

@ -741,11 +741,14 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
(index_to_find < first_entry_in_buffer)) {
/* close and restart search */
cFYI(1, ("search backing up - close and restart search"));
write_lock(&GlobalSMBSeslock);
if (!cifsFile->srch_inf.endOfSearch &&
!cifsFile->invalidHandle) {
cifsFile->invalidHandle = true;
write_unlock(&GlobalSMBSeslock);
CIFSFindClose(xid, pTcon, cifsFile->netfid);
}
} else
write_unlock(&GlobalSMBSeslock);
if (cifsFile->srch_inf.ntwrk_buf_start) {
cFYI(1, ("freeing SMB ff cache buf on search rewind"));
if (cifsFile->srch_inf.smallBuf)

View File

@ -1037,17 +1037,14 @@ static int
decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_crypt_stat *crypt_stat)
{
struct scatterlist dst_sg;
struct scatterlist src_sg;
struct scatterlist dst_sg[2];
struct scatterlist src_sg[2];
struct mutex *tfm_mutex;
struct blkcipher_desc desc = {
.flags = CRYPTO_TFM_REQ_MAY_SLEEP
};
int rc = 0;
sg_init_table(&dst_sg, 1);
sg_init_table(&src_sg, 1);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(
KERN_DEBUG, "Session key encryption key (size [%d]):\n",
@ -1066,8 +1063,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
}
rc = virt_to_scatterlist(auth_tok->session_key.encrypted_key,
auth_tok->session_key.encrypted_key_size,
&src_sg, 1);
if (rc != 1) {
src_sg, 2);
if (rc < 1 || rc > 2) {
printk(KERN_ERR "Internal error whilst attempting to convert "
"auth_tok->session_key.encrypted_key to scatterlist; "
"expected rc = 1; got rc = [%d]. "
@ -1079,8 +1076,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
auth_tok->session_key.encrypted_key_size;
rc = virt_to_scatterlist(auth_tok->session_key.decrypted_key,
auth_tok->session_key.decrypted_key_size,
&dst_sg, 1);
if (rc != 1) {
dst_sg, 2);
if (rc < 1 || rc > 2) {
printk(KERN_ERR "Internal error whilst attempting to convert "
"auth_tok->session_key.decrypted_key to scatterlist; "
"expected rc = 1; got rc = [%d]\n", rc);
@ -1096,7 +1093,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
rc = -EINVAL;
goto out;
}
rc = crypto_blkcipher_decrypt(&desc, &dst_sg, &src_sg,
rc = crypto_blkcipher_decrypt(&desc, dst_sg, src_sg,
auth_tok->session_key.encrypted_key_size);
mutex_unlock(tfm_mutex);
if (unlikely(rc)) {
@ -1539,8 +1536,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
size_t i;
size_t encrypted_session_key_valid = 0;
char session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES];
struct scatterlist dst_sg;
struct scatterlist src_sg;
struct scatterlist dst_sg[2];
struct scatterlist src_sg[2];
struct mutex *tfm_mutex = NULL;
u8 cipher_code;
size_t packet_size_length;
@ -1619,8 +1616,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
ecryptfs_dump_hex(session_key_encryption_key, 16);
}
rc = virt_to_scatterlist(crypt_stat->key, key_rec->enc_key_size,
&src_sg, 1);
if (rc != 1) {
src_sg, 2);
if (rc < 1 || rc > 2) {
ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
"for crypt_stat session key; expected rc = 1; "
"got rc = [%d]. key_rec->enc_key_size = [%d]\n",
@ -1629,8 +1626,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
goto out;
}
rc = virt_to_scatterlist(key_rec->enc_key, key_rec->enc_key_size,
&dst_sg, 1);
if (rc != 1) {
dst_sg, 2);
if (rc < 1 || rc > 2) {
ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
"for crypt_stat encrypted session key; "
"expected rc = 1; got rc = [%d]. "
@ -1651,7 +1648,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
rc = 0;
ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n",
crypt_stat->key_size);
rc = crypto_blkcipher_encrypt(&desc, &dst_sg, &src_sg,
rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg,
(*key_rec).enc_key_size);
mutex_unlock(tfm_mutex);
if (rc) {

View File

@ -81,7 +81,7 @@ extern int do_rmdir(const char *file);
extern int do_mknod(const char *file, int mode, unsigned int major,
unsigned int minor);
extern int link_file(const char *from, const char *to);
extern int do_readlink(char *file, char *buf, int size);
extern int hostfs_do_readlink(char *file, char *buf, int size);
extern int rename_file(char *from, char *to);
extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
long long *bfree_out, long long *bavail_out,

View File

@ -168,7 +168,7 @@ static char *follow_link(char *link)
if (name == NULL)
goto out;
n = do_readlink(link, name, len);
n = hostfs_do_readlink(link, name, len);
if (n < len)
break;
len *= 2;
@ -943,7 +943,7 @@ int hostfs_link_readpage(struct file *file, struct page *page)
name = inode_name(page->mapping->host, 0);
if (name == NULL)
return -ENOMEM;
err = do_readlink(name, buffer, PAGE_CACHE_SIZE);
err = hostfs_do_readlink(name, buffer, PAGE_CACHE_SIZE);
kfree(name);
if (err == PAGE_CACHE_SIZE)
err = -E2BIG;

View File

@ -377,7 +377,7 @@ int link_file(const char *to, const char *from)
return 0;
}
int do_readlink(char *file, char *buf, int size)
int hostfs_do_readlink(char *file, char *buf, int size)
{
int n;

View File

@ -1378,7 +1378,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
IS_IMMUTABLE(victim->d_inode))
IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
return -EPERM;
if (isdir) {
if (!S_ISDIR(victim->d_inode->i_mode))

View File

@ -74,8 +74,6 @@ static inline int cpuset_do_slab_mem_spread(void)
return current->flags & PF_SPREAD_SLAB;
}
extern void cpuset_track_online_nodes(void);
extern int current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
@ -151,8 +149,6 @@ static inline int cpuset_do_slab_mem_spread(void)
return 0;
}
static inline void cpuset_track_online_nodes(void) {}
static inline int current_cpuset_is_being_rebound(void)
{
return 0;

View File

@ -318,6 +318,8 @@ struct ftrace_retfunc {
unsigned long func; /* Current function */
unsigned long long calltime;
unsigned long long rettime;
/* Number of functions that overran the depth limit for current task */
unsigned long overrun;
};
#ifdef CONFIG_FUNCTION_RET_TRACER

View File

@ -40,7 +40,7 @@
#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
#define SYS_PACCEPT 18 /* sys_paccept(2) */
#define SYS_ACCEPT4 18 /* sys_accept4(2) */
typedef enum {
SS_FREE = 0, /* not allocated */
@ -100,7 +100,7 @@ enum sock_type {
* remaining bits are used as flags. */
#define SOCK_TYPE_MASK 0xf
/* Flags for socket, socketpair, paccept */
/* Flags for socket, socketpair, accept4 */
#define SOCK_CLOEXEC O_CLOEXEC
#ifndef SOCK_NONBLOCK
#define SOCK_NONBLOCK O_NONBLOCK
@ -223,8 +223,6 @@ extern int sock_map_fd(struct socket *sock, int flags);
extern struct socket *sockfd_lookup(int fd, int *err);
#define sockfd_put(sock) fput(sock->file)
extern int net_ratelimit(void);
extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
#define net_random() random32()
#define net_srandom(seed) srandom32((__force u32)seed)

Some files were not shown because too many files have changed in this diff Show More