Linux 3.7-rc8
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.18 (GNU/Linux) iQEcBAABAgAGBQJQvPxHAAoJEHm+PkMAQRiGkFUIAJz761Kp4J4Nj/wrv5ZHGQso MHRbzMkSfRNz6lGCkgxS61ydYKtrV2vuE6VH8HriGlLkI8Lj7MaQTXvYSdj/O0zy yV/2H5R3s7n5JZTw3g3eOf3K33tL6xhwd4tYHI7QHjdzSzQyaNhuUuNhxrlT95iv twNetm0tyhpf76TurRzF14hLUaShVRXT/FrqWK9wgmGjg7Ij0xp+UFNkeUGUwbeF 3HMJ98fdd0VD/W8qF5GZr3USks4C+NKtXEya8zQKc59XumKCiRJZmbE6JsJlp+OP CsHs7ZaNlInvPcKTFzkNs8ThYWC/NHBqLO5tX5UphW4qFSS39EmHd8igrwXLPaI= =RS1F -----END PGP SIGNATURE----- Merge tag 'v3.7-rc8' into staging/for_v3.8 Linux 3.7-rc8 * tag 'v3.7-rc8': (112 commits) Linux 3.7-rc8 [parisc] open(2) compat bug Revert "sched, autogroup: Stop going ahead if autogroup is disabled" open*(2) compat fixes (s390, arm64) 8139cp: fix coherent mapping leak in error path. tcp: fix crashes in do_tcp_sendpages() workqueue: mod_delayed_work_on() shouldn't queue timer on 0 delay workqueue: exit rescuer_thread() as TASK_RUNNING x86, fpu: Avoid FPU lazy restore after suspend drivers/rtc/rtc-tps65910.c: fix invalid pointer access on _remove() mm: soft offline: split thp at the beginning of soft_offline_page() mm: avoid waking kswapd for THP allocations when compaction is deferred or contended revert "Revert "mm: remove __GFP_NO_KSWAPD"" mm: vmscan: fix endless loop in kswapd balancing mm/vmemmap: fix wrong use of virt_to_page mm: compaction: fix return value of capture_free_page() fix off-by-one in argument passed by iterate_fd() to callbacks lookup_one_len: don't accept . and .. cifs: get rid of blind d_drop() in readdir nfs_lookup_revalidate(): fix a leak ...
This commit is contained in:
commit
df5450d519
Makefile
arch
arm
Kconfig
common
mach-dove
mach-ixp4xx
mach-kirkwood
plat-s3c24xx
arm64/include/asm
c6x
microblaze/kernel
openrisc/kernel
parisc/kernel
s390/kernel
score/kernel
sh/kernel
um/kernel
x86
drivers
atm
char
crypto
edac
gpu/drm
exynos
i915
radeon
md
media/platform
exynos-gsc
s5p-fimc
s5p-mfc
mtd
net
bonding
can/usb/peak_usb
ethernet
team
usb
wan
wireless/iwlwifi/dvm
remoteproc
rtc
target
vhost
fs
include
kernel
mm
8
Makefile
8
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 7
|
PATCHLEVEL = 7
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc7
|
EXTRAVERSION = -rc8
|
||||||
NAME = Terrified Chipmunk
|
NAME = Terrified Chipmunk
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
@ -1321,10 +1321,12 @@ kernelversion:
|
|||||||
|
|
||||||
# Clear a bunch of variables before executing the submake
|
# Clear a bunch of variables before executing the submake
|
||||||
tools/: FORCE
|
tools/: FORCE
|
||||||
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/
|
$(Q)mkdir -p $(objtree)/tools
|
||||||
|
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/
|
||||||
|
|
||||||
tools/%: FORCE
|
tools/%: FORCE
|
||||||
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $*
|
$(Q)mkdir -p $(objtree)/tools
|
||||||
|
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/ $*
|
||||||
|
|
||||||
# Single targets
|
# Single targets
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
@ -547,6 +547,7 @@ config ARCH_KIRKWOOD
|
|||||||
select CPU_FEROCEON
|
select CPU_FEROCEON
|
||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
select PCI
|
select PCI
|
||||||
|
select PCI_QUIRKS
|
||||||
select PLAT_ORION_LEGACY
|
select PLAT_ORION_LEGACY
|
||||||
help
|
help
|
||||||
Support for the following Marvell Kirkwood series SoCs:
|
Support for the following Marvell Kirkwood series SoCs:
|
||||||
|
@ -162,7 +162,6 @@ static struct clock_event_device sp804_clockevent = {
|
|||||||
.set_mode = sp804_set_mode,
|
.set_mode = sp804_set_mode,
|
||||||
.set_next_event = sp804_set_next_event,
|
.set_next_event = sp804_set_next_event,
|
||||||
.rating = 300,
|
.rating = 300,
|
||||||
.cpumask = cpu_all_mask,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irqaction sp804_timer_irq = {
|
static struct irqaction sp804_timer_irq = {
|
||||||
@ -185,6 +184,7 @@ void __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
|
|||||||
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
|
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
|
||||||
evt->name = name;
|
evt->name = name;
|
||||||
evt->irq = irq;
|
evt->irq = irq;
|
||||||
|
evt->cpumask = cpu_possible_mask;
|
||||||
|
|
||||||
setup_irq(irq, &sp804_timer_irq);
|
setup_irq(irq, &sp804_timer_irq);
|
||||||
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
|
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
|
||||||
|
@ -63,7 +63,7 @@ static inline int pmu_to_irq(int pin)
|
|||||||
|
|
||||||
static inline int irq_to_pmu(int irq)
|
static inline int irq_to_pmu(int irq)
|
||||||
{
|
{
|
||||||
if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
|
if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
|
||||||
return irq - IRQ_DOVE_PMU_START;
|
return irq - IRQ_DOVE_PMU_START;
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -46,8 +46,20 @@ static void pmu_irq_ack(struct irq_data *d)
|
|||||||
int pin = irq_to_pmu(d->irq);
|
int pin = irq_to_pmu(d->irq);
|
||||||
u32 u;
|
u32 u;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The PMU mask register is not RW0C: it is RW. This means that
|
||||||
|
* the bits take whatever value is written to them; if you write
|
||||||
|
* a '1', you will set the interrupt.
|
||||||
|
*
|
||||||
|
* Unfortunately this means there is NO race free way to clear
|
||||||
|
* these interrupts.
|
||||||
|
*
|
||||||
|
* So, let's structure the code so that the window is as small as
|
||||||
|
* possible.
|
||||||
|
*/
|
||||||
u = ~(1 << (pin & 31));
|
u = ~(1 << (pin & 31));
|
||||||
writel(u, PMU_INTERRUPT_CAUSE);
|
u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
|
||||||
|
writel_relaxed(u, PMU_INTERRUPT_CAUSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip pmu_irq_chip = {
|
static struct irq_chip pmu_irq_chip = {
|
||||||
|
@ -410,6 +410,7 @@ void __init ixp4xx_pci_preinit(void)
|
|||||||
* Enable the IO window to be way up high, at 0xfffffc00
|
* Enable the IO window to be way up high, at 0xfffffc00
|
||||||
*/
|
*/
|
||||||
local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01);
|
local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01);
|
||||||
|
local_write_config(0x40, 4, 0x000080FF); /* No TRDY time limit */
|
||||||
} else {
|
} else {
|
||||||
printk("PCI: IXP4xx is target - No bus scan performed\n");
|
printk("PCI: IXP4xx is target - No bus scan performed\n");
|
||||||
}
|
}
|
||||||
|
@ -67,15 +67,12 @@ static struct map_desc ixp4xx_io_desc[] __initdata = {
|
|||||||
.pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
|
.pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
|
||||||
.length = IXP4XX_PCI_CFG_REGION_SIZE,
|
.length = IXP4XX_PCI_CFG_REGION_SIZE,
|
||||||
.type = MT_DEVICE
|
.type = MT_DEVICE
|
||||||
},
|
}, { /* Queue Manager */
|
||||||
#ifdef CONFIG_DEBUG_LL
|
.virtual = (unsigned long)IXP4XX_QMGR_BASE_VIRT,
|
||||||
{ /* Debug UART mapping */
|
.pfn = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS),
|
||||||
.virtual = (unsigned long)IXP4XX_DEBUG_UART_BASE_VIRT,
|
.length = IXP4XX_QMGR_REGION_SIZE,
|
||||||
.pfn = __phys_to_pfn(IXP4XX_DEBUG_UART_BASE_PHYS),
|
|
||||||
.length = IXP4XX_DEBUG_UART_REGION_SIZE,
|
|
||||||
.type = MT_DEVICE
|
.type = MT_DEVICE
|
||||||
}
|
},
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init ixp4xx_map_io(void)
|
void __init ixp4xx_map_io(void)
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/flash.h>
|
#include <asm/mach/flash.h>
|
||||||
#include <asm/mach/pci.h>
|
#include <asm/mach/pci.h>
|
||||||
|
#include <asm/system_info.h>
|
||||||
|
|
||||||
#define SLOT_ETHA 0x0B /* IDSEL = AD21 */
|
#define SLOT_ETHA 0x0B /* IDSEL = AD21 */
|
||||||
#define SLOT_ETHB 0x0C /* IDSEL = AD20 */
|
#define SLOT_ETHB 0x0C /* IDSEL = AD20 */
|
||||||
@ -329,7 +330,7 @@ static struct platform_device device_hss_tab[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static struct platform_device *device_tab[6] __initdata = {
|
static struct platform_device *device_tab[7] __initdata = {
|
||||||
&device_flash, /* index 0 */
|
&device_flash, /* index 0 */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
#else
|
#else
|
||||||
mov \rp, #0
|
mov \rp, #0
|
||||||
#endif
|
#endif
|
||||||
orr \rv, \rp, #0xff000000 @ virtual
|
orr \rv, \rp, #0xfe000000 @ virtual
|
||||||
orr \rv, \rv, #0x00b00000
|
orr \rv, \rv, #0x00f00000
|
||||||
orr \rp, \rp, #0xc8000000 @ physical
|
orr \rp, \rp, #0xc8000000 @ physical
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -30,51 +30,43 @@
|
|||||||
*
|
*
|
||||||
* 0x50000000 0x10000000 ioremap'd EXP BUS
|
* 0x50000000 0x10000000 ioremap'd EXP BUS
|
||||||
*
|
*
|
||||||
* 0x6000000 0x00004000 ioremap'd QMgr
|
* 0xC8000000 0x00013000 0xFEF00000 On-Chip Peripherals
|
||||||
*
|
*
|
||||||
* 0xC0000000 0x00001000 0xffbff000 PCI CFG
|
* 0xC0000000 0x00001000 0xFEF13000 PCI CFG
|
||||||
*
|
*
|
||||||
* 0xC4000000 0x00001000 0xffbfe000 EXP CFG
|
* 0xC4000000 0x00001000 0xFEF14000 EXP CFG
|
||||||
*
|
*
|
||||||
* 0xC8000000 0x00013000 0xffbeb000 On-Chip Peripherals
|
* 0x60000000 0x00004000 0xFEF15000 QMgr
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Queue Manager
|
* Queue Manager
|
||||||
*/
|
*/
|
||||||
#define IXP4XX_QMGR_BASE_PHYS (0x60000000)
|
#define IXP4XX_QMGR_BASE_PHYS 0x60000000
|
||||||
#define IXP4XX_QMGR_REGION_SIZE (0x00004000)
|
#define IXP4XX_QMGR_BASE_VIRT IOMEM(0xFEF15000)
|
||||||
|
#define IXP4XX_QMGR_REGION_SIZE 0x00004000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Expansion BUS Configuration registers
|
* Peripheral space, including debug UART. Must be section-aligned so that
|
||||||
|
* it can be used with the low-level debug code.
|
||||||
*/
|
*/
|
||||||
#define IXP4XX_EXP_CFG_BASE_PHYS (0xC4000000)
|
#define IXP4XX_PERIPHERAL_BASE_PHYS 0xC8000000
|
||||||
#define IXP4XX_EXP_CFG_BASE_VIRT IOMEM(0xFFBFE000)
|
#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFEF00000)
|
||||||
#define IXP4XX_EXP_CFG_REGION_SIZE (0x00001000)
|
#define IXP4XX_PERIPHERAL_REGION_SIZE 0x00013000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PCI Config registers
|
* PCI Config registers
|
||||||
*/
|
*/
|
||||||
#define IXP4XX_PCI_CFG_BASE_PHYS (0xC0000000)
|
#define IXP4XX_PCI_CFG_BASE_PHYS 0xC0000000
|
||||||
#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFFBFF000)
|
#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFEF13000)
|
||||||
#define IXP4XX_PCI_CFG_REGION_SIZE (0x00001000)
|
#define IXP4XX_PCI_CFG_REGION_SIZE 0x00001000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Peripheral space
|
* Expansion BUS Configuration registers
|
||||||
*/
|
*/
|
||||||
#define IXP4XX_PERIPHERAL_BASE_PHYS (0xC8000000)
|
#define IXP4XX_EXP_CFG_BASE_PHYS 0xC4000000
|
||||||
#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFFBEB000)
|
#define IXP4XX_EXP_CFG_BASE_VIRT 0xFEF14000
|
||||||
#define IXP4XX_PERIPHERAL_REGION_SIZE (0x00013000)
|
#define IXP4XX_EXP_CFG_REGION_SIZE 0x00001000
|
||||||
|
|
||||||
/*
|
|
||||||
* Debug UART
|
|
||||||
*
|
|
||||||
* This is basically a remap of UART1 into a region that is section
|
|
||||||
* aligned so that it * can be used with the low-level debug code.
|
|
||||||
*/
|
|
||||||
#define IXP4XX_DEBUG_UART_BASE_PHYS (0xC8000000)
|
|
||||||
#define IXP4XX_DEBUG_UART_BASE_VIRT IOMEM(0xffb00000)
|
|
||||||
#define IXP4XX_DEBUG_UART_REGION_SIZE (0x00001000)
|
|
||||||
|
|
||||||
#define IXP4XX_EXP_CS0_OFFSET 0x00
|
#define IXP4XX_EXP_CS0_OFFSET 0x00
|
||||||
#define IXP4XX_EXP_CS1_OFFSET 0x04
|
#define IXP4XX_EXP_CS1_OFFSET 0x04
|
||||||
|
@ -86,7 +86,7 @@ void qmgr_release_queue(unsigned int queue);
|
|||||||
|
|
||||||
static inline void qmgr_put_entry(unsigned int queue, u32 val)
|
static inline void qmgr_put_entry(unsigned int queue, u32 val)
|
||||||
{
|
{
|
||||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
|
||||||
#if DEBUG_QMGR
|
#if DEBUG_QMGR
|
||||||
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
|
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ static inline void qmgr_put_entry(unsigned int queue, u32 val)
|
|||||||
static inline u32 qmgr_get_entry(unsigned int queue)
|
static inline u32 qmgr_get_entry(unsigned int queue)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
|
||||||
val = __raw_readl(&qmgr_regs->acc[queue][0]);
|
val = __raw_readl(&qmgr_regs->acc[queue][0]);
|
||||||
#if DEBUG_QMGR
|
#if DEBUG_QMGR
|
||||||
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
|
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
|
||||||
@ -112,14 +112,14 @@ static inline u32 qmgr_get_entry(unsigned int queue)
|
|||||||
|
|
||||||
static inline int __qmgr_get_stat1(unsigned int queue)
|
static inline int __qmgr_get_stat1(unsigned int queue)
|
||||||
{
|
{
|
||||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
|
||||||
return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
|
return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
|
||||||
>> ((queue & 7) << 2)) & 0xF;
|
>> ((queue & 7) << 2)) & 0xF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __qmgr_get_stat2(unsigned int queue)
|
static inline int __qmgr_get_stat2(unsigned int queue)
|
||||||
{
|
{
|
||||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
|
||||||
BUG_ON(queue >= HALF_QUEUES);
|
BUG_ON(queue >= HALF_QUEUES);
|
||||||
return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
|
return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
|
||||||
>> ((queue & 0xF) << 1)) & 0x3;
|
>> ((queue & 0xF) << 1)) & 0x3;
|
||||||
@ -145,7 +145,7 @@ static inline int qmgr_stat_empty(unsigned int queue)
|
|||||||
*/
|
*/
|
||||||
static inline int qmgr_stat_below_low_watermark(unsigned int queue)
|
static inline int qmgr_stat_below_low_watermark(unsigned int queue)
|
||||||
{
|
{
|
||||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
|
||||||
if (queue >= HALF_QUEUES)
|
if (queue >= HALF_QUEUES)
|
||||||
return (__raw_readl(&qmgr_regs->statne_h) >>
|
return (__raw_readl(&qmgr_regs->statne_h) >>
|
||||||
(queue - HALF_QUEUES)) & 0x01;
|
(queue - HALF_QUEUES)) & 0x01;
|
||||||
@ -172,7 +172,7 @@ static inline int qmgr_stat_above_high_watermark(unsigned int queue)
|
|||||||
*/
|
*/
|
||||||
static inline int qmgr_stat_full(unsigned int queue)
|
static inline int qmgr_stat_full(unsigned int queue)
|
||||||
{
|
{
|
||||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
|
||||||
if (queue >= HALF_QUEUES)
|
if (queue >= HALF_QUEUES)
|
||||||
return (__raw_readl(&qmgr_regs->statf_h) >>
|
return (__raw_readl(&qmgr_regs->statf_h) >>
|
||||||
(queue - HALF_QUEUES)) & 0x01;
|
(queue - HALF_QUEUES)) & 0x01;
|
||||||
|
@ -116,7 +116,11 @@
|
|||||||
/* NPE mailbox_status value for reset */
|
/* NPE mailbox_status value for reset */
|
||||||
#define RESET_MBOX_STAT 0x0000F0F0
|
#define RESET_MBOX_STAT 0x0000F0F0
|
||||||
|
|
||||||
const char *npe_names[] = { "NPE-A", "NPE-B", "NPE-C" };
|
#define NPE_A_FIRMWARE "NPE-A"
|
||||||
|
#define NPE_B_FIRMWARE "NPE-B"
|
||||||
|
#define NPE_C_FIRMWARE "NPE-C"
|
||||||
|
|
||||||
|
const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
|
||||||
|
|
||||||
#define print_npe(pri, npe, fmt, ...) \
|
#define print_npe(pri, npe, fmt, ...) \
|
||||||
printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
|
printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
|
||||||
@ -724,6 +728,9 @@ module_exit(npe_cleanup_module);
|
|||||||
|
|
||||||
MODULE_AUTHOR("Krzysztof Halasa");
|
MODULE_AUTHOR("Krzysztof Halasa");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
MODULE_FIRMWARE(NPE_A_FIRMWARE);
|
||||||
|
MODULE_FIRMWARE(NPE_B_FIRMWARE);
|
||||||
|
MODULE_FIRMWARE(NPE_C_FIRMWARE);
|
||||||
|
|
||||||
EXPORT_SYMBOL(npe_names);
|
EXPORT_SYMBOL(npe_names);
|
||||||
EXPORT_SYMBOL(npe_running);
|
EXPORT_SYMBOL(npe_running);
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <mach/qmgr.h>
|
#include <mach/qmgr.h>
|
||||||
|
|
||||||
struct qmgr_regs __iomem *qmgr_regs;
|
static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
|
||||||
static struct resource *mem_res;
|
static struct resource *mem_res;
|
||||||
static spinlock_t qmgr_lock;
|
static spinlock_t qmgr_lock;
|
||||||
static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
|
static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
|
||||||
@ -293,12 +293,6 @@ static int qmgr_init(void)
|
|||||||
if (mem_res == NULL)
|
if (mem_res == NULL)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
|
|
||||||
if (qmgr_regs == NULL) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto error_map;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* reset qmgr registers */
|
/* reset qmgr registers */
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
|
__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
|
||||||
@ -347,8 +341,6 @@ static int qmgr_init(void)
|
|||||||
error_irq2:
|
error_irq2:
|
||||||
free_irq(IRQ_IXP4XX_QM1, NULL);
|
free_irq(IRQ_IXP4XX_QM1, NULL);
|
||||||
error_irq:
|
error_irq:
|
||||||
iounmap(qmgr_regs);
|
|
||||||
error_map:
|
|
||||||
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
|
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -359,7 +351,6 @@ static void qmgr_remove(void)
|
|||||||
free_irq(IRQ_IXP4XX_QM2, NULL);
|
free_irq(IRQ_IXP4XX_QM2, NULL);
|
||||||
synchronize_irq(IRQ_IXP4XX_QM1);
|
synchronize_irq(IRQ_IXP4XX_QM1);
|
||||||
synchronize_irq(IRQ_IXP4XX_QM2);
|
synchronize_irq(IRQ_IXP4XX_QM2);
|
||||||
iounmap(qmgr_regs);
|
|
||||||
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
|
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,7 +360,6 @@ module_exit(qmgr_remove);
|
|||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_AUTHOR("Krzysztof Halasa");
|
MODULE_AUTHOR("Krzysztof Halasa");
|
||||||
|
|
||||||
EXPORT_SYMBOL(qmgr_regs);
|
|
||||||
EXPORT_SYMBOL(qmgr_set_irq);
|
EXPORT_SYMBOL(qmgr_set_irq);
|
||||||
EXPORT_SYMBOL(qmgr_enable_irq);
|
EXPORT_SYMBOL(qmgr_enable_irq);
|
||||||
EXPORT_SYMBOL(qmgr_disable_irq);
|
EXPORT_SYMBOL(qmgr_disable_irq);
|
||||||
|
@ -207,14 +207,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
|
||||||
|
* is operating as a root complex this needs to be switched to
|
||||||
|
* PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
|
||||||
|
* the device. Decoding setup is handled by the orion code.
|
||||||
|
*/
|
||||||
static void __devinit rc_pci_fixup(struct pci_dev *dev)
|
static void __devinit rc_pci_fixup(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Prevent enumeration of root complex.
|
|
||||||
*/
|
|
||||||
if (dev->bus->parent == NULL && dev->devfn == 0) {
|
if (dev->bus->parent == NULL && dev->devfn == 0) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
dev->class &= 0xff;
|
||||||
|
dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
|
||||||
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||||
dev->resource[i].start = 0;
|
dev->resource[i].start = 0;
|
||||||
dev->resource[i].end = 0;
|
dev->resource[i].end = 0;
|
||||||
|
@ -473,12 +473,13 @@ int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
|
|||||||
pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
|
pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
|
||||||
chan->number, __func__, buf);
|
chan->number, __func__, buf);
|
||||||
|
|
||||||
if (chan->end == NULL)
|
if (chan->end == NULL) {
|
||||||
pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
|
pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
|
||||||
chan->number, __func__, chan);
|
chan->number, __func__, chan);
|
||||||
|
} else {
|
||||||
chan->end->next = buf;
|
chan->end->next = buf;
|
||||||
chan->end = buf;
|
chan->end = buf;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if necessary, update the next buffer field */
|
/* if necessary, update the next buffer field */
|
||||||
|
@ -392,7 +392,7 @@ __SYSCALL(367, sys_fanotify_init)
|
|||||||
__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
|
__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
|
||||||
__SYSCALL(369, sys_prlimit64)
|
__SYSCALL(369, sys_prlimit64)
|
||||||
__SYSCALL(370, sys_name_to_handle_at)
|
__SYSCALL(370, sys_name_to_handle_at)
|
||||||
__SYSCALL(371, sys_open_by_handle_at)
|
__SYSCALL(371, compat_sys_open_by_handle_at)
|
||||||
__SYSCALL(372, sys_clock_adjtime)
|
__SYSCALL(372, sys_clock_adjtime)
|
||||||
__SYSCALL(373, sys_syncfs)
|
__SYSCALL(373, sys_syncfs)
|
||||||
|
|
||||||
|
33
arch/c6x/include/asm/setup.h
Normal file
33
arch/c6x/include/asm/setup.h
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
/*
|
||||||
|
* Port on Texas Instruments TMS320C6x architecture
|
||||||
|
*
|
||||||
|
* Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
|
||||||
|
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
#ifndef _ASM_C6X_SETUP_H
|
||||||
|
#define _ASM_C6X_SETUP_H
|
||||||
|
|
||||||
|
#include <uapi/asm/setup.h>
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
extern char c6x_command_line[COMMAND_LINE_SIZE];
|
||||||
|
|
||||||
|
extern int c6x_add_memory(phys_addr_t start, unsigned long size);
|
||||||
|
|
||||||
|
extern unsigned long ram_start;
|
||||||
|
extern unsigned long ram_end;
|
||||||
|
|
||||||
|
extern int c6x_num_cores;
|
||||||
|
extern unsigned int c6x_silicon_rev;
|
||||||
|
extern unsigned int c6x_devstat;
|
||||||
|
extern unsigned char c6x_fuse_mac[6];
|
||||||
|
|
||||||
|
extern void machine_init(unsigned long dt_ptr);
|
||||||
|
extern void time_init(void);
|
||||||
|
|
||||||
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
#endif /* _ASM_C6X_SETUP_H */
|
@ -1,6 +1,8 @@
|
|||||||
# UAPI Header export list
|
# UAPI Header export list
|
||||||
include include/uapi/asm-generic/Kbuild.asm
|
include include/uapi/asm-generic/Kbuild.asm
|
||||||
|
|
||||||
|
generic-y += kvm_para.h
|
||||||
|
|
||||||
header-y += byteorder.h
|
header-y += byteorder.h
|
||||||
header-y += kvm_para.h
|
header-y += kvm_para.h
|
||||||
header-y += ptrace.h
|
header-y += ptrace.h
|
||||||
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/kvm_para.h>
|
|
@ -1,33 +1,6 @@
|
|||||||
/*
|
#ifndef _UAPI_ASM_C6X_SETUP_H
|
||||||
* Port on Texas Instruments TMS320C6x architecture
|
#define _UAPI_ASM_C6X_SETUP_H
|
||||||
*
|
|
||||||
* Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
|
|
||||||
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*/
|
|
||||||
#ifndef _ASM_C6X_SETUP_H
|
|
||||||
#define _ASM_C6X_SETUP_H
|
|
||||||
|
|
||||||
#define COMMAND_LINE_SIZE 1024
|
#define COMMAND_LINE_SIZE 1024
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#endif /* _UAPI_ASM_C6X_SETUP_H */
|
||||||
extern char c6x_command_line[COMMAND_LINE_SIZE];
|
|
||||||
|
|
||||||
extern int c6x_add_memory(phys_addr_t start, unsigned long size);
|
|
||||||
|
|
||||||
extern unsigned long ram_start;
|
|
||||||
extern unsigned long ram_end;
|
|
||||||
|
|
||||||
extern int c6x_num_cores;
|
|
||||||
extern unsigned int c6x_silicon_rev;
|
|
||||||
extern unsigned int c6x_devstat;
|
|
||||||
extern unsigned char c6x_fuse_mac[6];
|
|
||||||
|
|
||||||
extern void machine_init(unsigned long dt_ptr);
|
|
||||||
extern void time_init(void);
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
#endif /* _ASM_C6X_SETUP_H */
|
|
||||||
|
@ -277,6 +277,8 @@ work_rescheduled:
|
|||||||
[A1] BNOP .S1 work_resched,5
|
[A1] BNOP .S1 work_resched,5
|
||||||
|
|
||||||
work_notifysig:
|
work_notifysig:
|
||||||
|
;; enable interrupts for do_notify_resume()
|
||||||
|
UNMASK_INT B2
|
||||||
B .S2 do_notify_resume
|
B .S2 do_notify_resume
|
||||||
LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag
|
LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag
|
||||||
ADDKPC .S2 resume_userspace,B3,1
|
ADDKPC .S2 resume_userspace,B3,1
|
||||||
@ -427,8 +429,7 @@ ENTRY(ret_from_kernel_execve)
|
|||||||
ENDPROC(ret_from_kernel_execve)
|
ENDPROC(ret_from_kernel_execve)
|
||||||
|
|
||||||
;;
|
;;
|
||||||
;; These are the interrupt handlers, responsible for calling __do_IRQ()
|
;; These are the interrupt handlers, responsible for calling c6x_do_IRQ()
|
||||||
;; int6 is used for syscalls (see _system_call entry)
|
|
||||||
;;
|
;;
|
||||||
.macro SAVE_ALL_INT
|
.macro SAVE_ALL_INT
|
||||||
SAVE_ALL IRP,ITSR
|
SAVE_ALL IRP,ITSR
|
||||||
|
@ -111,7 +111,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
|||||||
|
|
||||||
/* It is more difficult to avoid calling this function than to
|
/* It is more difficult to avoid calling this function than to
|
||||||
call it and ignore errors. */
|
call it and ignore errors. */
|
||||||
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1))
|
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1) == -EFAULT)
|
||||||
goto badframe;
|
goto badframe;
|
||||||
|
|
||||||
return rval;
|
return rval;
|
||||||
|
@ -84,7 +84,6 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
|
struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
|
||||||
sigset_t set;
|
sigset_t set;
|
||||||
stack_t st;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since we stacked the signal on a dword boundary,
|
* Since we stacked the signal on a dword boundary,
|
||||||
@ -104,11 +103,10 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
|
|||||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
|
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
|
||||||
goto badframe;
|
goto badframe;
|
||||||
|
|
||||||
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
|
|
||||||
goto badframe;
|
|
||||||
/* It is more difficult to avoid calling this function than to
|
/* It is more difficult to avoid calling this function than to
|
||||||
call it and ignore errors. */
|
call it and ignore errors. */
|
||||||
do_sigaltstack(&st, NULL, regs->sp);
|
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
|
||||||
|
goto badframe;
|
||||||
|
|
||||||
return regs->gpr[11];
|
return regs->gpr[11];
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@
|
|||||||
ENTRY_SAME(fork_wrapper)
|
ENTRY_SAME(fork_wrapper)
|
||||||
ENTRY_SAME(read)
|
ENTRY_SAME(read)
|
||||||
ENTRY_SAME(write)
|
ENTRY_SAME(write)
|
||||||
ENTRY_SAME(open) /* 5 */
|
ENTRY_COMP(open) /* 5 */
|
||||||
ENTRY_SAME(close)
|
ENTRY_SAME(close)
|
||||||
ENTRY_SAME(waitpid)
|
ENTRY_SAME(waitpid)
|
||||||
ENTRY_SAME(creat)
|
ENTRY_SAME(creat)
|
||||||
|
@ -28,7 +28,7 @@ ENTRY(sys32_open_wrapper)
|
|||||||
llgtr %r2,%r2 # const char *
|
llgtr %r2,%r2 # const char *
|
||||||
lgfr %r3,%r3 # int
|
lgfr %r3,%r3 # int
|
||||||
lgfr %r4,%r4 # int
|
lgfr %r4,%r4 # int
|
||||||
jg sys_open # branch to system call
|
jg compat_sys_open # branch to system call
|
||||||
|
|
||||||
ENTRY(sys32_close_wrapper)
|
ENTRY(sys32_close_wrapper)
|
||||||
llgfr %r2,%r2 # unsigned int
|
llgfr %r2,%r2 # unsigned int
|
||||||
|
@ -148,7 +148,6 @@ score_rt_sigreturn(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
struct rt_sigframe __user *frame;
|
struct rt_sigframe __user *frame;
|
||||||
sigset_t set;
|
sigset_t set;
|
||||||
stack_t st;
|
|
||||||
int sig;
|
int sig;
|
||||||
|
|
||||||
/* Always make any pending restarted system calls return -EINTR */
|
/* Always make any pending restarted system calls return -EINTR */
|
||||||
@ -168,12 +167,10 @@ score_rt_sigreturn(struct pt_regs *regs)
|
|||||||
else if (sig)
|
else if (sig)
|
||||||
force_sig(sig, current);
|
force_sig(sig, current);
|
||||||
|
|
||||||
if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
/* It is more difficult to avoid calling this function than to
|
/* It is more difficult to avoid calling this function than to
|
||||||
call it and ignore errors. */
|
call it and ignore errors. */
|
||||||
do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]);
|
if (do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs->regs[0]) == -EFAULT)
|
||||||
|
goto badframe;
|
||||||
regs->is_syscall = 0;
|
regs->is_syscall = 0;
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
|
@ -347,7 +347,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
|
|||||||
{
|
{
|
||||||
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
|
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
|
||||||
sigset_t set;
|
sigset_t set;
|
||||||
stack_t __user st;
|
|
||||||
long long ret;
|
long long ret;
|
||||||
|
|
||||||
/* Always make any pending restarted system calls return -EINTR */
|
/* Always make any pending restarted system calls return -EINTR */
|
||||||
@ -365,11 +364,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
|
|||||||
goto badframe;
|
goto badframe;
|
||||||
regs->pc -= 4;
|
regs->pc -= 4;
|
||||||
|
|
||||||
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
|
|
||||||
goto badframe;
|
|
||||||
/* It is more difficult to avoid calling this function than to
|
/* It is more difficult to avoid calling this function than to
|
||||||
call it and ignore errors. */
|
call it and ignore errors. */
|
||||||
do_sigaltstack(&st, NULL, REF_REG_SP);
|
if (do_sigaltstack(&frame->uc.uc_stack, NULL, REF_REG_SP) == -EFAULT)
|
||||||
|
goto badframe;
|
||||||
|
|
||||||
return (int) ret;
|
return (int) ret;
|
||||||
|
|
||||||
|
@ -32,13 +32,14 @@ void flush_thread(void)
|
|||||||
"err = %d\n", ret);
|
"err = %d\n", ret);
|
||||||
force_sig(SIGKILL, current);
|
force_sig(SIGKILL, current);
|
||||||
}
|
}
|
||||||
|
get_safe_registers(current_pt_regs()->regs.gp,
|
||||||
|
current_pt_regs()->regs.fp);
|
||||||
|
|
||||||
__switch_mm(¤t->mm->context.id);
|
__switch_mm(¤t->mm->context.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
|
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
|
||||||
{
|
{
|
||||||
get_safe_registers(regs->regs.gp, regs->regs.fp);
|
|
||||||
PT_REGS_IP(regs) = eip;
|
PT_REGS_IP(regs) = eip;
|
||||||
PT_REGS_SP(regs) = esp;
|
PT_REGS_SP(regs) = esp;
|
||||||
current->ptrace &= ~PT_DTRACE;
|
current->ptrace &= ~PT_DTRACE;
|
||||||
|
@ -12,6 +12,7 @@ header-y += mce.h
|
|||||||
header-y += msr-index.h
|
header-y += msr-index.h
|
||||||
header-y += msr.h
|
header-y += msr.h
|
||||||
header-y += mtrr.h
|
header-y += mtrr.h
|
||||||
|
header-y += perf_regs.h
|
||||||
header-y += posix_types_32.h
|
header-y += posix_types_32.h
|
||||||
header-y += posix_types_64.h
|
header-y += posix_types_64.h
|
||||||
header-y += posix_types_x32.h
|
header-y += posix_types_x32.h
|
||||||
@ -19,8 +20,10 @@ header-y += prctl.h
|
|||||||
header-y += processor-flags.h
|
header-y += processor-flags.h
|
||||||
header-y += ptrace-abi.h
|
header-y += ptrace-abi.h
|
||||||
header-y += sigcontext32.h
|
header-y += sigcontext32.h
|
||||||
|
header-y += svm.h
|
||||||
header-y += ucontext.h
|
header-y += ucontext.h
|
||||||
header-y += vm86.h
|
header-y += vm86.h
|
||||||
|
header-y += vmx.h
|
||||||
header-y += vsyscall.h
|
header-y += vsyscall.h
|
||||||
|
|
||||||
genhdr-y += unistd_32.h
|
genhdr-y += unistd_32.h
|
||||||
|
@ -399,14 +399,17 @@ static inline void drop_init_fpu(struct task_struct *tsk)
|
|||||||
typedef struct { int preload; } fpu_switch_t;
|
typedef struct { int preload; } fpu_switch_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME! We could do a totally lazy restore, but we need to
|
* Must be run with preemption disabled: this clears the fpu_owner_task,
|
||||||
* add a per-cpu "this was the task that last touched the FPU
|
* on this CPU.
|
||||||
* on this CPU" variable, and the task needs to have a "I last
|
|
||||||
* touched the FPU on this CPU" and check them.
|
|
||||||
*
|
*
|
||||||
* We don't do that yet, so "fpu_lazy_restore()" always returns
|
* This will disable any lazy FPU state restore of the current FPU state,
|
||||||
* false, but some day..
|
* but if the current thread owns the FPU, it will still be saved by.
|
||||||
*/
|
*/
|
||||||
|
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
|
||||||
|
{
|
||||||
|
per_cpu(fpu_owner_task, cpu) = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
|
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
|
||||||
{
|
{
|
||||||
return new == this_cpu_read_stable(fpu_owner_task) &&
|
return new == this_cpu_read_stable(fpu_owner_task) &&
|
||||||
|
@ -292,8 +292,8 @@ default_entry:
|
|||||||
* be using the global pages.
|
* be using the global pages.
|
||||||
*
|
*
|
||||||
* NOTE! If we are on a 486 we may have no cr4 at all!
|
* NOTE! If we are on a 486 we may have no cr4 at all!
|
||||||
* Specifically, cr4 exists if and only if CPUID exists,
|
* Specifically, cr4 exists if and only if CPUID exists
|
||||||
* which in turn exists if and only if EFLAGS.ID exists.
|
* and has flags other than the FPU flag set.
|
||||||
*/
|
*/
|
||||||
movl $X86_EFLAGS_ID,%ecx
|
movl $X86_EFLAGS_ID,%ecx
|
||||||
pushl %ecx
|
pushl %ecx
|
||||||
@ -308,6 +308,11 @@ default_entry:
|
|||||||
testl %ecx,%eax
|
testl %ecx,%eax
|
||||||
jz 6f # No ID flag = no CPUID = no CR4
|
jz 6f # No ID flag = no CPUID = no CR4
|
||||||
|
|
||||||
|
movl $1,%eax
|
||||||
|
cpuid
|
||||||
|
andl $~1,%edx # Ignore CPUID.FPU
|
||||||
|
jz 6f # No flags or only CPUID.FPU = no CR4
|
||||||
|
|
||||||
movl pa(mmu_cr4_features),%eax
|
movl pa(mmu_cr4_features),%eax
|
||||||
movl %eax,%cr4
|
movl %eax,%cr4
|
||||||
|
|
||||||
|
@ -1541,6 +1541,13 @@ void syscall_trace_leave(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
bool step;
|
bool step;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We may come here right after calling schedule_user()
|
||||||
|
* or do_notify_resume(), in which case we can be in RCU
|
||||||
|
* user mode.
|
||||||
|
*/
|
||||||
|
rcu_user_exit();
|
||||||
|
|
||||||
audit_syscall_exit(regs);
|
audit_syscall_exit(regs);
|
||||||
|
|
||||||
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
||||||
|
@ -68,6 +68,8 @@
|
|||||||
#include <asm/mwait.h>
|
#include <asm/mwait.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/io_apic.h>
|
#include <asm/io_apic.h>
|
||||||
|
#include <asm/i387.h>
|
||||||
|
#include <asm/fpu-internal.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/uv/uv.h>
|
#include <asm/uv/uv.h>
|
||||||
#include <linux/mc146818rtc.h>
|
#include <linux/mc146818rtc.h>
|
||||||
@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||||||
|
|
||||||
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
||||||
|
|
||||||
|
/* the FPU context is blank, nobody can own it */
|
||||||
|
__cpu_disable_lazy_restore(cpu);
|
||||||
|
|
||||||
err = do_boot_cpu(apicid, cpu, tidle);
|
err = do_boot_cpu(apicid, cpu, tidle);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_debug("do_boot_cpu failed %d\n", err);
|
pr_debug("do_boot_cpu failed %d\n", err);
|
||||||
|
@ -426,8 +426,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
|
|||||||
_ASM_EXTABLE(1b, 3b) \
|
_ASM_EXTABLE(1b, 3b) \
|
||||||
: "=m" ((ctxt)->eflags), "=&r" (_tmp), \
|
: "=m" ((ctxt)->eflags), "=&r" (_tmp), \
|
||||||
"+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
|
"+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
|
||||||
: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
|
: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
|
||||||
"a" (*rax), "d" (*rdx)); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
|
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
|
||||||
|
@ -1961,6 +1961,7 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
|
|||||||
res = loader_verify(lb, dev, rec);
|
res = loader_verify(lb, dev, rec);
|
||||||
if (res)
|
if (res)
|
||||||
break;
|
break;
|
||||||
|
rec = ihex_next_binrec(rec);
|
||||||
}
|
}
|
||||||
release_firmware(fw);
|
release_firmware(fw);
|
||||||
if (!res)
|
if (!res)
|
||||||
|
@ -127,12 +127,12 @@ config HW_RANDOM_VIA
|
|||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
config HW_RANDOM_IXP4XX
|
config HW_RANDOM_IXP4XX
|
||||||
tristate "Intel IXP4xx NPU HW Random Number Generator support"
|
tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
|
||||||
depends on HW_RANDOM && ARCH_IXP4XX
|
depends on HW_RANDOM && ARCH_IXP4XX
|
||||||
default HW_RANDOM
|
default HW_RANDOM
|
||||||
---help---
|
---help---
|
||||||
This driver provides kernel-side support for the Random
|
This driver provides kernel-side support for the Pseudo-Random
|
||||||
Number Generator hardware found on the Intel IXP4xx NPU.
|
Number Generator hardware found on the Intel IXP45x/46x NPU.
|
||||||
|
|
||||||
To compile this driver as a module, choose M here: the
|
To compile this driver as a module, choose M here: the
|
||||||
module will be called ixp4xx-rng.
|
module will be called ixp4xx-rng.
|
||||||
|
@ -45,6 +45,9 @@ static int __init ixp4xx_rng_init(void)
|
|||||||
void __iomem * rng_base;
|
void __iomem * rng_base;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!cpu_is_ixp46x()) /* includes IXP455 */
|
||||||
|
return -ENOSYS;
|
||||||
|
|
||||||
rng_base = ioremap(0x70002100, 4);
|
rng_base = ioremap(0x70002100, 4);
|
||||||
if (!rng_base)
|
if (!rng_base)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -68,5 +71,5 @@ module_init(ixp4xx_rng_init);
|
|||||||
module_exit(ixp4xx_rng_exit);
|
module_exit(ixp4xx_rng_exit);
|
||||||
|
|
||||||
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
|
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
|
||||||
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
|
MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
|
|||||||
|
|
||||||
static const struct file_operations raw_fops = {
|
static const struct file_operations raw_fops = {
|
||||||
.read = do_sync_read,
|
.read = do_sync_read,
|
||||||
.aio_read = blkdev_aio_read,
|
.aio_read = generic_file_aio_read,
|
||||||
.write = do_sync_write,
|
.write = do_sync_write,
|
||||||
.aio_write = blkdev_aio_write,
|
.aio_write = blkdev_aio_write,
|
||||||
.fsync = blkdev_fsync,
|
.fsync = blkdev_fsync,
|
||||||
|
@ -224,7 +224,7 @@ config CRYPTO_DEV_TALITOS
|
|||||||
|
|
||||||
config CRYPTO_DEV_IXP4XX
|
config CRYPTO_DEV_IXP4XX
|
||||||
tristate "Driver for IXP4xx crypto hardware acceleration"
|
tristate "Driver for IXP4xx crypto hardware acceleration"
|
||||||
depends on ARCH_IXP4XX
|
depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
|
||||||
select CRYPTO_DES
|
select CRYPTO_DES
|
||||||
select CRYPTO_ALGAPI
|
select CRYPTO_ALGAPI
|
||||||
select CRYPTO_AUTHENC
|
select CRYPTO_AUTHENC
|
||||||
|
@ -750,12 +750,12 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
|
|||||||
}
|
}
|
||||||
if (cipher_cfg & MOD_AES) {
|
if (cipher_cfg & MOD_AES) {
|
||||||
switch (key_len) {
|
switch (key_len) {
|
||||||
case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
|
case 16: keylen_cfg = MOD_AES128; break;
|
||||||
case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
|
case 24: keylen_cfg = MOD_AES192; break;
|
||||||
case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
|
case 32: keylen_cfg = MOD_AES256; break;
|
||||||
default:
|
default:
|
||||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
cipher_cfg |= keylen_cfg;
|
cipher_cfg |= keylen_cfg;
|
||||||
} else if (cipher_cfg & MOD_3DES) {
|
} else if (cipher_cfg & MOD_3DES) {
|
||||||
|
@ -416,10 +416,18 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
|
|||||||
dimm->cschannel = chn;
|
dimm->cschannel = chn;
|
||||||
|
|
||||||
/* Increment csrow location */
|
/* Increment csrow location */
|
||||||
row++;
|
if (layers[0].is_virt_csrow) {
|
||||||
if (row == tot_csrows) {
|
|
||||||
row = 0;
|
|
||||||
chn++;
|
chn++;
|
||||||
|
if (chn == tot_channels) {
|
||||||
|
chn = 0;
|
||||||
|
row++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
row++;
|
||||||
|
if (row == tot_csrows) {
|
||||||
|
row = 0;
|
||||||
|
chn++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Increment dimm location */
|
/* Increment dimm location */
|
||||||
|
@ -197,8 +197,8 @@ static const char *ferr_fat_fbd_name[] = {
|
|||||||
[0] = "Memory Write error on non-redundant retry or "
|
[0] = "Memory Write error on non-redundant retry or "
|
||||||
"FBD configuration Write error on retry",
|
"FBD configuration Write error on retry",
|
||||||
};
|
};
|
||||||
#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
|
#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
|
||||||
#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
|
#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
|
||||||
|
|
||||||
#define FERR_NF_FBD 0xa0
|
#define FERR_NF_FBD 0xa0
|
||||||
static const char *ferr_nf_fbd_name[] = {
|
static const char *ferr_nf_fbd_name[] = {
|
||||||
@ -225,7 +225,7 @@ static const char *ferr_nf_fbd_name[] = {
|
|||||||
[1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
|
[1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
|
||||||
[0] = "Uncorrectable Data ECC on Replay",
|
[0] = "Uncorrectable Data ECC on Replay",
|
||||||
};
|
};
|
||||||
#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
|
#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
|
||||||
#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
|
#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
|
||||||
(1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
|
(1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
|
||||||
(1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
|
(1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
|
||||||
@ -464,7 +464,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
|
|||||||
errnum = find_first_bit(&errors,
|
errnum = find_first_bit(&errors,
|
||||||
ARRAY_SIZE(ferr_nf_fbd_name));
|
ARRAY_SIZE(ferr_nf_fbd_name));
|
||||||
specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
|
specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
|
||||||
branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
|
branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
|
||||||
|
|
||||||
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
|
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
|
||||||
REDMEMA, &syndrome);
|
REDMEMA, &syndrome);
|
||||||
|
@ -816,7 +816,7 @@ static ssize_t i7core_inject_store_##param( \
|
|||||||
struct device_attribute *mattr, \
|
struct device_attribute *mattr, \
|
||||||
const char *data, size_t count) \
|
const char *data, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
struct mem_ctl_info *mci = to_mci(dev); \
|
struct mem_ctl_info *mci = dev_get_drvdata(dev); \
|
||||||
struct i7core_pvt *pvt; \
|
struct i7core_pvt *pvt; \
|
||||||
long value; \
|
long value; \
|
||||||
int rc; \
|
int rc; \
|
||||||
@ -845,7 +845,7 @@ static ssize_t i7core_inject_show_##param( \
|
|||||||
struct device_attribute *mattr, \
|
struct device_attribute *mattr, \
|
||||||
char *data) \
|
char *data) \
|
||||||
{ \
|
{ \
|
||||||
struct mem_ctl_info *mci = to_mci(dev); \
|
struct mem_ctl_info *mci = dev_get_drvdata(dev); \
|
||||||
struct i7core_pvt *pvt; \
|
struct i7core_pvt *pvt; \
|
||||||
\
|
\
|
||||||
pvt = mci->pvt_info; \
|
pvt = mci->pvt_info; \
|
||||||
@ -1052,7 +1052,7 @@ static ssize_t i7core_show_counter_##param( \
|
|||||||
struct device_attribute *mattr, \
|
struct device_attribute *mattr, \
|
||||||
char *data) \
|
char *data) \
|
||||||
{ \
|
{ \
|
||||||
struct mem_ctl_info *mci = to_mci(dev); \
|
struct mem_ctl_info *mci = dev_get_drvdata(dev); \
|
||||||
struct i7core_pvt *pvt = mci->pvt_info; \
|
struct i7core_pvt *pvt = mci->pvt_info; \
|
||||||
\
|
\
|
||||||
edac_dbg(1, "\n"); \
|
edac_dbg(1, "\n"); \
|
||||||
|
@ -370,10 +370,6 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
|
|||||||
static void i82975x_init_csrows(struct mem_ctl_info *mci,
|
static void i82975x_init_csrows(struct mem_ctl_info *mci,
|
||||||
struct pci_dev *pdev, void __iomem *mch_window)
|
struct pci_dev *pdev, void __iomem *mch_window)
|
||||||
{
|
{
|
||||||
static const char *labels[4] = {
|
|
||||||
"DIMM A1", "DIMM A2",
|
|
||||||
"DIMM B1", "DIMM B2"
|
|
||||||
};
|
|
||||||
struct csrow_info *csrow;
|
struct csrow_info *csrow;
|
||||||
unsigned long last_cumul_size;
|
unsigned long last_cumul_size;
|
||||||
u8 value;
|
u8 value;
|
||||||
@ -423,9 +419,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
|
|||||||
dimm = mci->csrows[index]->channels[chan]->dimm;
|
dimm = mci->csrows[index]->channels[chan]->dimm;
|
||||||
|
|
||||||
dimm->nr_pages = nr_pages / csrow->nr_channels;
|
dimm->nr_pages = nr_pages / csrow->nr_channels;
|
||||||
strncpy(csrow->channels[chan]->dimm->label,
|
|
||||||
labels[(index >> 1) + (chan * 2)],
|
snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d",
|
||||||
EDAC_MC_LABEL_LEN);
|
(chan == 0) ? 'A' : 'B',
|
||||||
|
index);
|
||||||
dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
|
dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
|
||||||
dimm->dtype = i82975x_dram_type(mch_window, index);
|
dimm->dtype = i82975x_dram_type(mch_window, index);
|
||||||
dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
|
dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
|
||||||
|
@ -226,6 +226,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
|
|||||||
* already updated or not by exynos_drm_encoder_dpms function.
|
* already updated or not by exynos_drm_encoder_dpms function.
|
||||||
*/
|
*/
|
||||||
exynos_encoder->updated = true;
|
exynos_encoder->updated = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In case of setcrtc, there is no way to update encoder's dpms
|
||||||
|
* so update it here.
|
||||||
|
*/
|
||||||
|
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
|
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
|
||||||
@ -507,6 +513,6 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
|
|||||||
* because the setting for disabling the overlay will be updated
|
* because the setting for disabling the overlay will be updated
|
||||||
* at vsync.
|
* at vsync.
|
||||||
*/
|
*/
|
||||||
if (overlay_ops->wait_for_vblank)
|
if (overlay_ops && overlay_ops->wait_for_vblank)
|
||||||
overlay_ops->wait_for_vblank(manager->dev);
|
overlay_ops->wait_for_vblank(manager->dev);
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
|
|||||||
|
|
||||||
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
|
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
|
||||||
fbi->screen_base = buffer->kvaddr + offset;
|
fbi->screen_base = buffer->kvaddr + offset;
|
||||||
fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
|
fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
|
||||||
|
offset);
|
||||||
fbi->screen_size = size;
|
fbi->screen_size = size;
|
||||||
fbi->fix.smem_len = size;
|
fbi->fix.smem_len = size;
|
||||||
|
|
||||||
|
@ -61,11 +61,11 @@ struct fimd_driver_data {
|
|||||||
unsigned int timing_base;
|
unsigned int timing_base;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fimd_driver_data exynos4_fimd_driver_data = {
|
static struct fimd_driver_data exynos4_fimd_driver_data = {
|
||||||
.timing_base = 0x0,
|
.timing_base = 0x0,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fimd_driver_data exynos5_fimd_driver_data = {
|
static struct fimd_driver_data exynos5_fimd_driver_data = {
|
||||||
.timing_base = 0x20000,
|
.timing_base = 0x20000,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -204,7 +204,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
plane->crtc = crtc;
|
plane->crtc = crtc;
|
||||||
plane->fb = crtc->fb;
|
|
||||||
|
|
||||||
exynos_plane_commit(plane);
|
exynos_plane_commit(plane);
|
||||||
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
|
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
|
||||||
|
@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||||||
|
|
||||||
edp = find_section(bdb, BDB_EDP);
|
edp = find_section(bdb, BDB_EDP);
|
||||||
if (!edp) {
|
if (!edp) {
|
||||||
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
|
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
|
||||||
DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
|
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
|
||||||
"supported, assume %dbpp panel color "
|
|
||||||
"depth.\n",
|
|
||||||
dev_priv->edp.bpp);
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
|||||||
dev_priv->lvds_use_ssc = 1;
|
dev_priv->lvds_use_ssc = 1;
|
||||||
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
|
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
|
||||||
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
|
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
|
||||||
|
|
||||||
/* eDP data */
|
|
||||||
dev_priv->edp.bpp = 18;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
|
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
|
||||||
|
@ -3845,7 +3845,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
|
|||||||
/* Use VBT settings if we have an eDP panel */
|
/* Use VBT settings if we have an eDP panel */
|
||||||
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
|
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
|
||||||
|
|
||||||
if (edp_bpc < display_bpc) {
|
if (edp_bpc && edp_bpc < display_bpc) {
|
||||||
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
|
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
|
||||||
display_bpc = edp_bpc;
|
display_bpc = edp_bpc;
|
||||||
}
|
}
|
||||||
|
@ -2373,15 +2373,9 @@ int intel_enable_rc6(const struct drm_device *dev)
|
|||||||
if (i915_enable_rc6 >= 0)
|
if (i915_enable_rc6 >= 0)
|
||||||
return i915_enable_rc6;
|
return i915_enable_rc6;
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen == 5) {
|
/* Disable RC6 on Ironlake */
|
||||||
#ifdef CONFIG_INTEL_IOMMU
|
if (INTEL_INFO(dev)->gen == 5)
|
||||||
/* Disable rc6 on ilk if VT-d is on. */
|
return 0;
|
||||||
if (intel_iommu_gfx_mapped)
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
|
|
||||||
return INTEL_RC6_ENABLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_HASWELL(dev)) {
|
if (IS_HASWELL(dev)) {
|
||||||
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
|
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
|
||||||
|
@ -2201,7 +2201,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
|||||||
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
|
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
|
||||||
intel_sdvo->is_hdmi = true;
|
intel_sdvo->is_hdmi = true;
|
||||||
}
|
}
|
||||||
intel_sdvo->base.cloneable = true;
|
|
||||||
|
|
||||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||||
if (intel_sdvo->is_hdmi)
|
if (intel_sdvo->is_hdmi)
|
||||||
@ -2232,7 +2231,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
|
|||||||
|
|
||||||
intel_sdvo->is_tv = true;
|
intel_sdvo->is_tv = true;
|
||||||
intel_sdvo->base.needs_tv_clock = true;
|
intel_sdvo->base.needs_tv_clock = true;
|
||||||
intel_sdvo->base.cloneable = false;
|
|
||||||
|
|
||||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||||
|
|
||||||
@ -2275,8 +2273,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
|
|||||||
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
|
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_sdvo->base.cloneable = true;
|
|
||||||
|
|
||||||
intel_sdvo_connector_init(intel_sdvo_connector,
|
intel_sdvo_connector_init(intel_sdvo_connector,
|
||||||
intel_sdvo);
|
intel_sdvo);
|
||||||
return true;
|
return true;
|
||||||
@ -2307,9 +2303,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
|
|||||||
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
|
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
|
|
||||||
intel_sdvo->base.cloneable = false;
|
|
||||||
|
|
||||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||||
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
|
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
|
||||||
goto err;
|
goto err;
|
||||||
@ -2721,6 +2714,16 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
|||||||
goto err_output;
|
goto err_output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cloning SDVO with anything is often impossible, since the SDVO
|
||||||
|
* encoder can request a special input timing mode. And even if that's
|
||||||
|
* not the case we have evidence that cloning a plain unscaled mode with
|
||||||
|
* VGA doesn't really work. Furthermore the cloning flags are way too
|
||||||
|
* simplistic anyway to express such constraints, so just give up on
|
||||||
|
* cloning for SDVO encoders.
|
||||||
|
*/
|
||||||
|
intel_sdvo->base.cloneable = false;
|
||||||
|
|
||||||
/* Only enable the hotplug irq if we need it, to work around noisy
|
/* Only enable the hotplug irq if we need it, to work around noisy
|
||||||
* hotplug lines.
|
* hotplug lines.
|
||||||
*/
|
*/
|
||||||
|
@ -1696,42 +1696,22 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||||||
return ATOM_PPLL2;
|
return ATOM_PPLL2;
|
||||||
DRM_ERROR("unable to allocate a PPLL\n");
|
DRM_ERROR("unable to allocate a PPLL\n");
|
||||||
return ATOM_PPLL_INVALID;
|
return ATOM_PPLL_INVALID;
|
||||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
|
||||||
/* in DP mode, the DP ref clock can come from either PPLL
|
|
||||||
* depending on the asic:
|
|
||||||
* DCE3: PPLL1 or PPLL2
|
|
||||||
*/
|
|
||||||
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
|
|
||||||
/* use the same PPLL for all DP monitors */
|
|
||||||
pll = radeon_get_shared_dp_ppll(crtc);
|
|
||||||
if (pll != ATOM_PPLL_INVALID)
|
|
||||||
return pll;
|
|
||||||
} else {
|
|
||||||
/* use the same PPLL for all monitors with the same clock */
|
|
||||||
pll = radeon_get_shared_nondp_ppll(crtc);
|
|
||||||
if (pll != ATOM_PPLL_INVALID)
|
|
||||||
return pll;
|
|
||||||
}
|
|
||||||
/* all other cases */
|
|
||||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
|
||||||
/* the order shouldn't matter here, but we probably
|
|
||||||
* need this until we have atomic modeset
|
|
||||||
*/
|
|
||||||
if (rdev->flags & RADEON_IS_IGP) {
|
|
||||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
|
||||||
return ATOM_PPLL1;
|
|
||||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
|
||||||
return ATOM_PPLL2;
|
|
||||||
} else {
|
|
||||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
|
||||||
return ATOM_PPLL2;
|
|
||||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
|
||||||
return ATOM_PPLL1;
|
|
||||||
}
|
|
||||||
DRM_ERROR("unable to allocate a PPLL\n");
|
|
||||||
return ATOM_PPLL_INVALID;
|
|
||||||
} else {
|
} else {
|
||||||
/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
|
/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
|
||||||
|
/* some atombios (observed in some DCE2/DCE3) code have a bug,
|
||||||
|
* the matching btw pll and crtc is done through
|
||||||
|
* PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
|
||||||
|
* pll (1 or 2) to select which register to write. ie if using
|
||||||
|
* pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
|
||||||
|
* it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
|
||||||
|
* choose which value to write. Which is reverse order from
|
||||||
|
* register logic. So only case that works is when pllid is
|
||||||
|
* same as crtcid or when both pll and crtc are enabled and
|
||||||
|
* both use same clock.
|
||||||
|
*
|
||||||
|
* So just return crtc id as if crtc and pll were hard linked
|
||||||
|
* together even if they aren't
|
||||||
|
*/
|
||||||
return radeon_crtc->crtc_id;
|
return radeon_crtc->crtc_id;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -963,7 +963,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||||||
struct r1conf *conf = mddev->private;
|
struct r1conf *conf = mddev->private;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
if (from_schedule) {
|
if (from_schedule || current->bio_list) {
|
||||||
spin_lock_irq(&conf->device_lock);
|
spin_lock_irq(&conf->device_lock);
|
||||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||||
conf->pending_count += plug->pending_cnt;
|
conf->pending_count += plug->pending_cnt;
|
||||||
|
@ -1069,7 +1069,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||||||
struct r10conf *conf = mddev->private;
|
struct r10conf *conf = mddev->private;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
if (from_schedule) {
|
if (from_schedule || current->bio_list) {
|
||||||
spin_lock_irq(&conf->device_lock);
|
spin_lock_irq(&conf->device_lock);
|
||||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||||
conf->pending_count += plug->pending_cnt;
|
conf->pending_count += plug->pending_cnt;
|
||||||
|
@ -657,8 +657,7 @@ static int gsc_m2m_release(struct file *file)
|
|||||||
pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
|
pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
|
||||||
task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
|
task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
|
||||||
|
|
||||||
if (mutex_lock_interruptible(&gsc->lock))
|
mutex_lock(&gsc->lock);
|
||||||
return -ERESTARTSYS;
|
|
||||||
|
|
||||||
v4l2_m2m_ctx_release(ctx->m2m_ctx);
|
v4l2_m2m_ctx_release(ctx->m2m_ctx);
|
||||||
gsc_ctrls_delete(ctx);
|
gsc_ctrls_delete(ctx);
|
||||||
@ -732,6 +731,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
|
|||||||
gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
|
gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
|
||||||
gsc->vdev.release = video_device_release_empty;
|
gsc->vdev.release = video_device_release_empty;
|
||||||
gsc->vdev.lock = &gsc->lock;
|
gsc->vdev.lock = &gsc->lock;
|
||||||
|
gsc->vdev.vfl_dir = VFL_DIR_M2M;
|
||||||
snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
|
snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
|
||||||
GSC_MODULE_NAME, gsc->id);
|
GSC_MODULE_NAME, gsc->id);
|
||||||
|
|
||||||
|
@ -40,10 +40,10 @@
|
|||||||
#define GSC_IN_ROT_YFLIP (2 << 16)
|
#define GSC_IN_ROT_YFLIP (2 << 16)
|
||||||
#define GSC_IN_ROT_XFLIP (1 << 16)
|
#define GSC_IN_ROT_XFLIP (1 << 16)
|
||||||
#define GSC_IN_RGB_TYPE_MASK (3 << 14)
|
#define GSC_IN_RGB_TYPE_MASK (3 << 14)
|
||||||
#define GSC_IN_RGB_HD_WIDE (3 << 14)
|
#define GSC_IN_RGB_HD_NARROW (3 << 14)
|
||||||
#define GSC_IN_RGB_HD_NARROW (2 << 14)
|
#define GSC_IN_RGB_HD_WIDE (2 << 14)
|
||||||
#define GSC_IN_RGB_SD_WIDE (1 << 14)
|
#define GSC_IN_RGB_SD_NARROW (1 << 14)
|
||||||
#define GSC_IN_RGB_SD_NARROW (0 << 14)
|
#define GSC_IN_RGB_SD_WIDE (0 << 14)
|
||||||
#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
|
#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
|
||||||
#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
|
#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
|
||||||
#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
|
#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
|
||||||
@ -85,10 +85,10 @@
|
|||||||
#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
|
#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
|
||||||
#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
|
#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
|
||||||
#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
|
#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
|
||||||
#define GSC_OUT_RGB_HD_NARROW (3 << 10)
|
#define GSC_OUT_RGB_HD_WIDE (3 << 10)
|
||||||
#define GSC_OUT_RGB_HD_WIDE (2 << 10)
|
#define GSC_OUT_RGB_HD_NARROW (2 << 10)
|
||||||
#define GSC_OUT_RGB_SD_NARROW (1 << 10)
|
#define GSC_OUT_RGB_SD_WIDE (1 << 10)
|
||||||
#define GSC_OUT_RGB_SD_WIDE (0 << 10)
|
#define GSC_OUT_RGB_SD_NARROW (0 << 10)
|
||||||
#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
|
#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
|
||||||
#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
|
#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
|
||||||
#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
|
#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
|
||||||
|
@ -556,8 +556,7 @@ static int fimc_capture_close(struct file *file)
|
|||||||
|
|
||||||
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
|
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
|
||||||
|
|
||||||
if (mutex_lock_interruptible(&fimc->lock))
|
mutex_lock(&fimc->lock);
|
||||||
return -ERESTARTSYS;
|
|
||||||
|
|
||||||
if (--fimc->vid_cap.refcnt == 0) {
|
if (--fimc->vid_cap.refcnt == 0) {
|
||||||
clear_bit(ST_CAPT_BUSY, &fimc->state);
|
clear_bit(ST_CAPT_BUSY, &fimc->state);
|
||||||
@ -1783,9 +1782,13 @@ static int fimc_capture_subdev_registered(struct v4l2_subdev *sd)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd);
|
||||||
|
|
||||||
ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
|
ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
|
||||||
if (ret)
|
if (ret) {
|
||||||
fimc_unregister_m2m_device(fimc);
|
fimc_unregister_m2m_device(fimc);
|
||||||
|
fimc->pipeline_ops = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1802,6 +1805,7 @@ static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd)
|
|||||||
if (video_is_registered(&fimc->vid_cap.vfd)) {
|
if (video_is_registered(&fimc->vid_cap.vfd)) {
|
||||||
video_unregister_device(&fimc->vid_cap.vfd);
|
video_unregister_device(&fimc->vid_cap.vfd);
|
||||||
media_entity_cleanup(&fimc->vid_cap.vfd.entity);
|
media_entity_cleanup(&fimc->vid_cap.vfd.entity);
|
||||||
|
fimc->pipeline_ops = NULL;
|
||||||
}
|
}
|
||||||
kfree(fimc->vid_cap.ctx);
|
kfree(fimc->vid_cap.ctx);
|
||||||
fimc->vid_cap.ctx = NULL;
|
fimc->vid_cap.ctx = NULL;
|
||||||
|
@ -491,8 +491,7 @@ static int fimc_lite_close(struct file *file)
|
|||||||
struct fimc_lite *fimc = video_drvdata(file);
|
struct fimc_lite *fimc = video_drvdata(file);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (mutex_lock_interruptible(&fimc->lock))
|
mutex_lock(&fimc->lock);
|
||||||
return -ERESTARTSYS;
|
|
||||||
|
|
||||||
if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
|
if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
|
||||||
clear_bit(ST_FLITE_IN_USE, &fimc->state);
|
clear_bit(ST_FLITE_IN_USE, &fimc->state);
|
||||||
@ -1263,10 +1262,12 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
video_set_drvdata(vfd, fimc);
|
video_set_drvdata(vfd, fimc);
|
||||||
|
fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd);
|
||||||
|
|
||||||
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
|
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
media_entity_cleanup(&vfd->entity);
|
media_entity_cleanup(&vfd->entity);
|
||||||
|
fimc->pipeline_ops = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1285,6 +1286,7 @@ static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd)
|
|||||||
if (video_is_registered(&fimc->vfd)) {
|
if (video_is_registered(&fimc->vfd)) {
|
||||||
video_unregister_device(&fimc->vfd);
|
video_unregister_device(&fimc->vfd);
|
||||||
media_entity_cleanup(&fimc->vfd.entity);
|
media_entity_cleanup(&fimc->vfd.entity);
|
||||||
|
fimc->pipeline_ops = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -728,8 +728,7 @@ static int fimc_m2m_release(struct file *file)
|
|||||||
dbg("pid: %d, state: 0x%lx, refcnt= %d",
|
dbg("pid: %d, state: 0x%lx, refcnt= %d",
|
||||||
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
|
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
|
||||||
|
|
||||||
if (mutex_lock_interruptible(&fimc->lock))
|
mutex_lock(&fimc->lock);
|
||||||
return -ERESTARTSYS;
|
|
||||||
|
|
||||||
v4l2_m2m_ctx_release(ctx->m2m_ctx);
|
v4l2_m2m_ctx_release(ctx->m2m_ctx);
|
||||||
fimc_ctrls_delete(ctx);
|
fimc_ctrls_delete(ctx);
|
||||||
|
@ -352,6 +352,7 @@ static int fimc_register_callback(struct device *dev, void *p)
|
|||||||
|
|
||||||
sd = &fimc->vid_cap.subdev;
|
sd = &fimc->vid_cap.subdev;
|
||||||
sd->grp_id = FIMC_GROUP_ID;
|
sd->grp_id = FIMC_GROUP_ID;
|
||||||
|
v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops);
|
||||||
|
|
||||||
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
|
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -360,7 +361,6 @@ static int fimc_register_callback(struct device *dev, void *p)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
fimc->pipeline_ops = &fimc_pipeline_ops;
|
|
||||||
fmd->fimc[fimc->id] = fimc;
|
fmd->fimc[fimc->id] = fimc;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -375,6 +375,7 @@ static int fimc_lite_register_callback(struct device *dev, void *p)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fimc->subdev.grp_id = FLITE_GROUP_ID;
|
fimc->subdev.grp_id = FLITE_GROUP_ID;
|
||||||
|
v4l2_set_subdev_hostdata(&fimc->subdev, (void *)&fimc_pipeline_ops);
|
||||||
|
|
||||||
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev);
|
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -384,7 +385,6 @@ static int fimc_lite_register_callback(struct device *dev, void *p)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
fimc->pipeline_ops = &fimc_pipeline_ops;
|
|
||||||
fmd->fimc_lite[fimc->index] = fimc;
|
fmd->fimc_lite[fimc->index] = fimc;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -381,11 +381,8 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
|
|||||||
ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
|
ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
|
||||||
get_consumed_stream, dev);
|
get_consumed_stream, dev);
|
||||||
if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
|
if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
|
||||||
s5p_mfc_hw_call(dev->mfc_ops,
|
ctx->consumed_stream + STUFF_BYTE <
|
||||||
get_dec_frame_type, dev) ==
|
src_buf->b->v4l2_planes[0].bytesused) {
|
||||||
S5P_FIMV_DECODE_FRAME_P_FRAME
|
|
||||||
&& ctx->consumed_stream + STUFF_BYTE <
|
|
||||||
src_buf->b->v4l2_planes[0].bytesused) {
|
|
||||||
/* Run MFC again on the same buffer */
|
/* Run MFC again on the same buffer */
|
||||||
mfc_debug(2, "Running again the same buffer\n");
|
mfc_debug(2, "Running again the same buffer\n");
|
||||||
ctx->after_packed_pb = 1;
|
ctx->after_packed_pb = 1;
|
||||||
|
@ -1762,7 +1762,7 @@ int s5p_mfc_get_dspl_y_adr_v6(struct s5p_mfc_dev *dev)
|
|||||||
|
|
||||||
int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev)
|
int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev)
|
||||||
{
|
{
|
||||||
return mfc_read(dev, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6);
|
return mfc_read(dev, S5P_FIMV_D_DECODED_LUMA_ADDR_V6);
|
||||||
}
|
}
|
||||||
|
|
||||||
int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
|
int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
|
||||||
|
@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
|
|||||||
* until the request succeeds or until the allocation size falls below
|
* until the request succeeds or until the allocation size falls below
|
||||||
* the system page size. This attempts to make sure it does not adversely
|
* the system page size. This attempts to make sure it does not adversely
|
||||||
* impact system performance, so when allocating more than one page, we
|
* impact system performance, so when allocating more than one page, we
|
||||||
* ask the memory allocator to avoid re-trying, swapping, writing back
|
* ask the memory allocator to avoid re-trying.
|
||||||
* or performing I/O.
|
|
||||||
*
|
*
|
||||||
* Note, this function also makes sure that the allocated buffer is aligned to
|
* Note, this function also makes sure that the allocated buffer is aligned to
|
||||||
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
|
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
|
||||||
@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
|
|||||||
*/
|
*/
|
||||||
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
|
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
|
||||||
{
|
{
|
||||||
gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
|
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
|
||||||
__GFP_NORETRY | __GFP_NO_KSWAPD;
|
|
||||||
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
|
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
|
||||||
void *kbuf;
|
void *kbuf;
|
||||||
|
|
||||||
|
@ -3459,6 +3459,28 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
|
|||||||
|
|
||||||
/*-------------------------- Device entry points ----------------------------*/
|
/*-------------------------- Device entry points ----------------------------*/
|
||||||
|
|
||||||
|
static void bond_work_init_all(struct bonding *bond)
|
||||||
|
{
|
||||||
|
INIT_DELAYED_WORK(&bond->mcast_work,
|
||||||
|
bond_resend_igmp_join_requests_delayed);
|
||||||
|
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
|
||||||
|
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
|
||||||
|
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
|
||||||
|
INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
|
||||||
|
else
|
||||||
|
INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
|
||||||
|
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bond_work_cancel_all(struct bonding *bond)
|
||||||
|
{
|
||||||
|
cancel_delayed_work_sync(&bond->mii_work);
|
||||||
|
cancel_delayed_work_sync(&bond->arp_work);
|
||||||
|
cancel_delayed_work_sync(&bond->alb_work);
|
||||||
|
cancel_delayed_work_sync(&bond->ad_work);
|
||||||
|
cancel_delayed_work_sync(&bond->mcast_work);
|
||||||
|
}
|
||||||
|
|
||||||
static int bond_open(struct net_device *bond_dev)
|
static int bond_open(struct net_device *bond_dev)
|
||||||
{
|
{
|
||||||
struct bonding *bond = netdev_priv(bond_dev);
|
struct bonding *bond = netdev_priv(bond_dev);
|
||||||
@ -3481,41 +3503,27 @@ static int bond_open(struct net_device *bond_dev)
|
|||||||
}
|
}
|
||||||
read_unlock(&bond->lock);
|
read_unlock(&bond->lock);
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
|
bond_work_init_all(bond);
|
||||||
|
|
||||||
if (bond_is_lb(bond)) {
|
if (bond_is_lb(bond)) {
|
||||||
/* bond_alb_initialize must be called before the timer
|
/* bond_alb_initialize must be called before the timer
|
||||||
* is started.
|
* is started.
|
||||||
*/
|
*/
|
||||||
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
|
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
|
||||||
/* something went wrong - fail the open operation */
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
|
|
||||||
queue_delayed_work(bond->wq, &bond->alb_work, 0);
|
queue_delayed_work(bond->wq, &bond->alb_work, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bond->params.miimon) { /* link check interval, in milliseconds. */
|
if (bond->params.miimon) /* link check interval, in milliseconds. */
|
||||||
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
|
|
||||||
queue_delayed_work(bond->wq, &bond->mii_work, 0);
|
queue_delayed_work(bond->wq, &bond->mii_work, 0);
|
||||||
}
|
|
||||||
|
|
||||||
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
|
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
|
||||||
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
|
|
||||||
INIT_DELAYED_WORK(&bond->arp_work,
|
|
||||||
bond_activebackup_arp_mon);
|
|
||||||
else
|
|
||||||
INIT_DELAYED_WORK(&bond->arp_work,
|
|
||||||
bond_loadbalance_arp_mon);
|
|
||||||
|
|
||||||
queue_delayed_work(bond->wq, &bond->arp_work, 0);
|
queue_delayed_work(bond->wq, &bond->arp_work, 0);
|
||||||
if (bond->params.arp_validate)
|
if (bond->params.arp_validate)
|
||||||
bond->recv_probe = bond_arp_rcv;
|
bond->recv_probe = bond_arp_rcv;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bond->params.mode == BOND_MODE_8023AD) {
|
if (bond->params.mode == BOND_MODE_8023AD) {
|
||||||
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
|
|
||||||
queue_delayed_work(bond->wq, &bond->ad_work, 0);
|
queue_delayed_work(bond->wq, &bond->ad_work, 0);
|
||||||
/* register to receive LACPDUs */
|
/* register to receive LACPDUs */
|
||||||
bond->recv_probe = bond_3ad_lacpdu_recv;
|
bond->recv_probe = bond_3ad_lacpdu_recv;
|
||||||
@ -3530,34 +3538,10 @@ static int bond_close(struct net_device *bond_dev)
|
|||||||
struct bonding *bond = netdev_priv(bond_dev);
|
struct bonding *bond = netdev_priv(bond_dev);
|
||||||
|
|
||||||
write_lock_bh(&bond->lock);
|
write_lock_bh(&bond->lock);
|
||||||
|
|
||||||
bond->send_peer_notif = 0;
|
bond->send_peer_notif = 0;
|
||||||
|
|
||||||
write_unlock_bh(&bond->lock);
|
write_unlock_bh(&bond->lock);
|
||||||
|
|
||||||
if (bond->params.miimon) { /* link check interval, in milliseconds. */
|
bond_work_cancel_all(bond);
|
||||||
cancel_delayed_work_sync(&bond->mii_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
|
|
||||||
cancel_delayed_work_sync(&bond->arp_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (bond->params.mode) {
|
|
||||||
case BOND_MODE_8023AD:
|
|
||||||
cancel_delayed_work_sync(&bond->ad_work);
|
|
||||||
break;
|
|
||||||
case BOND_MODE_TLB:
|
|
||||||
case BOND_MODE_ALB:
|
|
||||||
cancel_delayed_work_sync(&bond->alb_work);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (delayed_work_pending(&bond->mcast_work))
|
|
||||||
cancel_delayed_work_sync(&bond->mcast_work);
|
|
||||||
|
|
||||||
if (bond_is_lb(bond)) {
|
if (bond_is_lb(bond)) {
|
||||||
/* Must be called only after all
|
/* Must be called only after all
|
||||||
* slaves have been released
|
* slaves have been released
|
||||||
@ -4436,26 +4420,6 @@ static void bond_setup(struct net_device *bond_dev)
|
|||||||
bond_dev->features |= bond_dev->hw_features;
|
bond_dev->features |= bond_dev->hw_features;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bond_work_cancel_all(struct bonding *bond)
|
|
||||||
{
|
|
||||||
if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
|
|
||||||
cancel_delayed_work_sync(&bond->mii_work);
|
|
||||||
|
|
||||||
if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
|
|
||||||
cancel_delayed_work_sync(&bond->arp_work);
|
|
||||||
|
|
||||||
if (bond->params.mode == BOND_MODE_ALB &&
|
|
||||||
delayed_work_pending(&bond->alb_work))
|
|
||||||
cancel_delayed_work_sync(&bond->alb_work);
|
|
||||||
|
|
||||||
if (bond->params.mode == BOND_MODE_8023AD &&
|
|
||||||
delayed_work_pending(&bond->ad_work))
|
|
||||||
cancel_delayed_work_sync(&bond->ad_work);
|
|
||||||
|
|
||||||
if (delayed_work_pending(&bond->mcast_work))
|
|
||||||
cancel_delayed_work_sync(&bond->mcast_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Destroy a bonding device.
|
* Destroy a bonding device.
|
||||||
* Must be under rtnl_lock when this function is called.
|
* Must be under rtnl_lock when this function is called.
|
||||||
@ -4706,12 +4670,13 @@ static int bond_check_params(struct bond_params *params)
|
|||||||
arp_ip_count++) {
|
arp_ip_count++) {
|
||||||
/* not complete check, but should be good enough to
|
/* not complete check, but should be good enough to
|
||||||
catch mistakes */
|
catch mistakes */
|
||||||
if (!isdigit(arp_ip_target[arp_ip_count][0])) {
|
__be32 ip = in_aton(arp_ip_target[arp_ip_count]);
|
||||||
|
if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
|
||||||
|
ip == 0 || ip == htonl(INADDR_BROADCAST)) {
|
||||||
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
|
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
|
||||||
arp_ip_target[arp_ip_count]);
|
arp_ip_target[arp_ip_count]);
|
||||||
arp_interval = 0;
|
arp_interval = 0;
|
||||||
} else {
|
} else {
|
||||||
__be32 ip = in_aton(arp_ip_target[arp_ip_count]);
|
|
||||||
arp_target[arp_ip_count] = ip;
|
arp_target[arp_ip_count] = ip;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -513,6 +513,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
|
|||||||
int new_value, ret = count;
|
int new_value, ret = count;
|
||||||
struct bonding *bond = to_bond(d);
|
struct bonding *bond = to_bond(d);
|
||||||
|
|
||||||
|
if (!rtnl_trylock())
|
||||||
|
return restart_syscall();
|
||||||
if (sscanf(buf, "%d", &new_value) != 1) {
|
if (sscanf(buf, "%d", &new_value) != 1) {
|
||||||
pr_err("%s: no arp_interval value specified.\n",
|
pr_err("%s: no arp_interval value specified.\n",
|
||||||
bond->dev->name);
|
bond->dev->name);
|
||||||
@ -539,10 +541,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
|
|||||||
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
|
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
|
||||||
bond->dev->name, bond->dev->name);
|
bond->dev->name, bond->dev->name);
|
||||||
bond->params.miimon = 0;
|
bond->params.miimon = 0;
|
||||||
if (delayed_work_pending(&bond->mii_work)) {
|
|
||||||
cancel_delayed_work(&bond->mii_work);
|
|
||||||
flush_workqueue(bond->wq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!bond->params.arp_targets[0]) {
|
if (!bond->params.arp_targets[0]) {
|
||||||
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
|
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
|
||||||
@ -554,19 +552,12 @@ static ssize_t bonding_store_arp_interval(struct device *d,
|
|||||||
* timer will get fired off when the open function
|
* timer will get fired off when the open function
|
||||||
* is called.
|
* is called.
|
||||||
*/
|
*/
|
||||||
if (!delayed_work_pending(&bond->arp_work)) {
|
cancel_delayed_work_sync(&bond->mii_work);
|
||||||
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
|
queue_delayed_work(bond->wq, &bond->arp_work, 0);
|
||||||
INIT_DELAYED_WORK(&bond->arp_work,
|
|
||||||
bond_activebackup_arp_mon);
|
|
||||||
else
|
|
||||||
INIT_DELAYED_WORK(&bond->arp_work,
|
|
||||||
bond_loadbalance_arp_mon);
|
|
||||||
|
|
||||||
queue_delayed_work(bond->wq, &bond->arp_work, 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
rtnl_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
|
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
|
||||||
@ -962,6 +953,8 @@ static ssize_t bonding_store_miimon(struct device *d,
|
|||||||
int new_value, ret = count;
|
int new_value, ret = count;
|
||||||
struct bonding *bond = to_bond(d);
|
struct bonding *bond = to_bond(d);
|
||||||
|
|
||||||
|
if (!rtnl_trylock())
|
||||||
|
return restart_syscall();
|
||||||
if (sscanf(buf, "%d", &new_value) != 1) {
|
if (sscanf(buf, "%d", &new_value) != 1) {
|
||||||
pr_err("%s: no miimon value specified.\n",
|
pr_err("%s: no miimon value specified.\n",
|
||||||
bond->dev->name);
|
bond->dev->name);
|
||||||
@ -993,10 +986,6 @@ static ssize_t bonding_store_miimon(struct device *d,
|
|||||||
bond->params.arp_validate =
|
bond->params.arp_validate =
|
||||||
BOND_ARP_VALIDATE_NONE;
|
BOND_ARP_VALIDATE_NONE;
|
||||||
}
|
}
|
||||||
if (delayed_work_pending(&bond->arp_work)) {
|
|
||||||
cancel_delayed_work(&bond->arp_work);
|
|
||||||
flush_workqueue(bond->wq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bond->dev->flags & IFF_UP) {
|
if (bond->dev->flags & IFF_UP) {
|
||||||
@ -1005,15 +994,12 @@ static ssize_t bonding_store_miimon(struct device *d,
|
|||||||
* timer will get fired off when the open function
|
* timer will get fired off when the open function
|
||||||
* is called.
|
* is called.
|
||||||
*/
|
*/
|
||||||
if (!delayed_work_pending(&bond->mii_work)) {
|
cancel_delayed_work_sync(&bond->arp_work);
|
||||||
INIT_DELAYED_WORK(&bond->mii_work,
|
queue_delayed_work(bond->wq, &bond->mii_work, 0);
|
||||||
bond_mii_monitor);
|
|
||||||
queue_delayed_work(bond->wq,
|
|
||||||
&bond->mii_work, 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
rtnl_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
|
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
|
||||||
@ -1582,6 +1568,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
read_lock(&bond->lock);
|
||||||
bond_for_each_slave(bond, slave, i) {
|
bond_for_each_slave(bond, slave, i) {
|
||||||
if (!bond_is_active_slave(slave)) {
|
if (!bond_is_active_slave(slave)) {
|
||||||
if (new_value)
|
if (new_value)
|
||||||
@ -1590,6 +1577,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
|
|||||||
slave->inactive = 1;
|
slave->inactive = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
read_unlock(&bond->lock);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -519,8 +519,10 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
|
|||||||
mc->pdev->dev.can.state = new_state;
|
mc->pdev->dev.can.state = new_state;
|
||||||
|
|
||||||
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
|
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
|
||||||
|
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
|
||||||
|
|
||||||
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
|
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
|
||||||
skb->tstamp = timeval_to_ktime(tv);
|
hwts->hwtstamp = timeval_to_ktime(tv);
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
@ -605,6 +607,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct can_frame *cf;
|
struct can_frame *cf;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
|
struct skb_shared_hwtstamps *hwts;
|
||||||
|
|
||||||
skb = alloc_can_skb(mc->netdev, &cf);
|
skb = alloc_can_skb(mc->netdev, &cf);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
@ -652,7 +655,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
|
|||||||
|
|
||||||
/* convert timestamp into kernel time */
|
/* convert timestamp into kernel time */
|
||||||
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
|
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
|
||||||
skb->tstamp = timeval_to_ktime(tv);
|
hwts = skb_hwtstamps(skb);
|
||||||
|
hwts->hwtstamp = timeval_to_ktime(tv);
|
||||||
|
|
||||||
/* push the skb */
|
/* push the skb */
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
|
@ -532,6 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
|
|||||||
struct can_frame *can_frame;
|
struct can_frame *can_frame;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
|
struct skb_shared_hwtstamps *hwts;
|
||||||
|
|
||||||
skb = alloc_can_skb(netdev, &can_frame);
|
skb = alloc_can_skb(netdev, &can_frame);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
@ -549,7 +550,8 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
|
|||||||
memcpy(can_frame->data, rx->data, can_frame->can_dlc);
|
memcpy(can_frame->data, rx->data, can_frame->can_dlc);
|
||||||
|
|
||||||
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
|
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
|
||||||
skb->tstamp = timeval_to_ktime(tv);
|
hwts = skb_hwtstamps(skb);
|
||||||
|
hwts->hwtstamp = timeval_to_ktime(tv);
|
||||||
|
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
netdev->stats.rx_packets++;
|
netdev->stats.rx_packets++;
|
||||||
@ -570,6 +572,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
|
|||||||
u8 err_mask = 0;
|
u8 err_mask = 0;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
|
struct skb_shared_hwtstamps *hwts;
|
||||||
|
|
||||||
/* nothing should be sent while in BUS_OFF state */
|
/* nothing should be sent while in BUS_OFF state */
|
||||||
if (dev->can.state == CAN_STATE_BUS_OFF)
|
if (dev->can.state == CAN_STATE_BUS_OFF)
|
||||||
@ -664,7 +667,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
|
|||||||
dev->can.state = new_state;
|
dev->can.state = new_state;
|
||||||
|
|
||||||
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
|
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
|
||||||
skb->tstamp = timeval_to_ktime(tv);
|
hwts = skb_hwtstamps(skb);
|
||||||
|
hwts->hwtstamp = timeval_to_ktime(tv);
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
netdev->stats.rx_packets++;
|
netdev->stats.rx_packets++;
|
||||||
netdev->stats.rx_bytes += can_frame->can_dlc;
|
netdev->stats.rx_bytes += can_frame->can_dlc;
|
||||||
|
@ -237,7 +237,7 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
|
memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1060,17 +1060,22 @@ static int cp_init_rings (struct cp_private *cp)
|
|||||||
|
|
||||||
static int cp_alloc_rings (struct cp_private *cp)
|
static int cp_alloc_rings (struct cp_private *cp)
|
||||||
{
|
{
|
||||||
|
struct device *d = &cp->pdev->dev;
|
||||||
void *mem;
|
void *mem;
|
||||||
|
int rc;
|
||||||
|
|
||||||
mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
|
mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
|
||||||
&cp->ring_dma, GFP_KERNEL);
|
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cp->rx_ring = mem;
|
cp->rx_ring = mem;
|
||||||
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
|
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
|
||||||
|
|
||||||
return cp_init_rings(cp);
|
rc = cp_init_rings(cp);
|
||||||
|
if (rc < 0)
|
||||||
|
dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
|
||||||
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cp_clean_rings (struct cp_private *cp)
|
static void cp_clean_rings (struct cp_private *cp)
|
||||||
|
@ -1794,10 +1794,12 @@ static void team_setup(struct net_device *dev)
|
|||||||
|
|
||||||
dev->features |= NETIF_F_LLTX;
|
dev->features |= NETIF_F_LLTX;
|
||||||
dev->features |= NETIF_F_GRO;
|
dev->features |= NETIF_F_GRO;
|
||||||
dev->hw_features = NETIF_F_HW_VLAN_TX |
|
dev->hw_features = TEAM_VLAN_FEATURES |
|
||||||
|
NETIF_F_HW_VLAN_TX |
|
||||||
NETIF_F_HW_VLAN_RX |
|
NETIF_F_HW_VLAN_RX |
|
||||||
NETIF_F_HW_VLAN_FILTER;
|
NETIF_F_HW_VLAN_FILTER;
|
||||||
|
|
||||||
|
dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
|
||||||
dev->features |= dev->hw_features;
|
dev->features |= dev->hw_features;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -385,6 +385,7 @@ static const struct usb_device_id products[] = {
|
|||||||
},
|
},
|
||||||
|
|
||||||
/* 3. Combined interface devices matching on interface number */
|
/* 3. Combined interface devices matching on interface number */
|
||||||
|
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
|
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
|
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
|
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
|
||||||
|
@ -1365,7 +1365,7 @@ static int __devinit hss_init_one(struct platform_device *pdev)
|
|||||||
|
|
||||||
platform_set_drvdata(pdev, port);
|
platform_set_drvdata(pdev, port);
|
||||||
|
|
||||||
netdev_info(dev, "HSS-%i\n", port->id);
|
netdev_info(dev, "initialized\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_netdev:
|
err_free_netdev:
|
||||||
|
@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
|
|||||||
* As a consequence, it's not as complicated as it sounds, just add
|
* As a consequence, it's not as complicated as it sounds, just add
|
||||||
* any lower rates to the ACK rate bitmap.
|
* any lower rates to the ACK rate bitmap.
|
||||||
*/
|
*/
|
||||||
if (IWL_RATE_11M_INDEX < lowest_present_ofdm)
|
if (IWL_RATE_11M_INDEX < lowest_present_cck)
|
||||||
ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
|
cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
|
||||||
if (IWL_RATE_5M_INDEX < lowest_present_ofdm)
|
if (IWL_RATE_5M_INDEX < lowest_present_cck)
|
||||||
ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
|
cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
|
||||||
if (IWL_RATE_2M_INDEX < lowest_present_ofdm)
|
if (IWL_RATE_2M_INDEX < lowest_present_cck)
|
||||||
ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
|
cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
|
||||||
/* 1M already there or needed so always add */
|
/* 1M already there or needed so always add */
|
||||||
cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
|
cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
|
||||||
|
|
||||||
|
@ -120,15 +120,11 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
|
|||||||
return vq;
|
return vq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rproc_virtio_del_vqs(struct virtio_device *vdev)
|
static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
|
||||||
{
|
{
|
||||||
struct virtqueue *vq, *n;
|
struct virtqueue *vq, *n;
|
||||||
struct rproc *rproc = vdev_to_rproc(vdev);
|
|
||||||
struct rproc_vring *rvring;
|
struct rproc_vring *rvring;
|
||||||
|
|
||||||
/* power down the remote processor before deleting vqs */
|
|
||||||
rproc_shutdown(rproc);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
|
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
|
||||||
rvring = vq->priv;
|
rvring = vq->priv;
|
||||||
rvring->vq = NULL;
|
rvring->vq = NULL;
|
||||||
@ -137,6 +133,16 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rproc_virtio_del_vqs(struct virtio_device *vdev)
|
||||||
|
{
|
||||||
|
struct rproc *rproc = vdev_to_rproc(vdev);
|
||||||
|
|
||||||
|
/* power down the remote processor before deleting vqs */
|
||||||
|
rproc_shutdown(rproc);
|
||||||
|
|
||||||
|
__rproc_virtio_del_vqs(vdev);
|
||||||
|
}
|
||||||
|
|
||||||
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||||
struct virtqueue *vqs[],
|
struct virtqueue *vqs[],
|
||||||
vq_callback_t *callbacks[],
|
vq_callback_t *callbacks[],
|
||||||
@ -163,7 +169,7 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
rproc_virtio_del_vqs(vdev);
|
__rproc_virtio_del_vqs(vdev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,11 +288,11 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
|
|||||||
static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
|
static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
/* leave rtc running, but disable irqs */
|
/* leave rtc running, but disable irqs */
|
||||||
struct rtc_device *rtc = platform_get_drvdata(pdev);
|
struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
tps65910_rtc_alarm_irq_enable(&rtc->dev, 0);
|
tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
|
||||||
|
|
||||||
rtc_device_unregister(rtc);
|
rtc_device_unregister(tps_rtc->rtc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1819,8 +1819,10 @@ void target_execute_cmd(struct se_cmd *cmd)
|
|||||||
/*
|
/*
|
||||||
* If the received CDB has aleady been aborted stop processing it here.
|
* If the received CDB has aleady been aborted stop processing it here.
|
||||||
*/
|
*/
|
||||||
if (transport_check_aborted_status(cmd, 1))
|
if (transport_check_aborted_status(cmd, 1)) {
|
||||||
|
complete(&cmd->t_transport_stop_comp);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine if IOCTL context caller in requesting the stopping of this
|
* Determine if IOCTL context caller in requesting the stopping of this
|
||||||
@ -3067,7 +3069,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||||
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
|
if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
|
||||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1076,7 +1076,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
|
|||||||
}
|
}
|
||||||
_iov = iov + ret;
|
_iov = iov + ret;
|
||||||
size = reg->memory_size - addr + reg->guest_phys_addr;
|
size = reg->memory_size - addr + reg->guest_phys_addr;
|
||||||
_iov->iov_len = min((u64)len, size);
|
_iov->iov_len = min((u64)len - s, size);
|
||||||
_iov->iov_base = (void __user *)(unsigned long)
|
_iov->iov_base = (void __user *)(unsigned long)
|
||||||
(reg->userspace_addr + addr - reg->guest_phys_addr);
|
(reg->userspace_addr + addr - reg->guest_phys_addr);
|
||||||
s += size;
|
s += size;
|
||||||
|
160
fs/block_dev.c
160
fs/block_dev.c
@ -70,19 +70,6 @@ static void bdev_inode_switch_bdi(struct inode *inode,
|
|||||||
spin_unlock(&dst->wb.list_lock);
|
spin_unlock(&dst->wb.list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
sector_t blkdev_max_block(struct block_device *bdev)
|
|
||||||
{
|
|
||||||
sector_t retval = ~((sector_t)0);
|
|
||||||
loff_t sz = i_size_read(bdev->bd_inode);
|
|
||||||
|
|
||||||
if (sz) {
|
|
||||||
unsigned int size = block_size(bdev);
|
|
||||||
unsigned int sizebits = blksize_bits(size);
|
|
||||||
retval = (sz >> sizebits);
|
|
||||||
}
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Kill _all_ buffers and pagecache , dirty or not.. */
|
/* Kill _all_ buffers and pagecache , dirty or not.. */
|
||||||
void kill_bdev(struct block_device *bdev)
|
void kill_bdev(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
@ -116,8 +103,6 @@ EXPORT_SYMBOL(invalidate_bdev);
|
|||||||
|
|
||||||
int set_blocksize(struct block_device *bdev, int size)
|
int set_blocksize(struct block_device *bdev, int size)
|
||||||
{
|
{
|
||||||
struct address_space *mapping;
|
|
||||||
|
|
||||||
/* Size must be a power of two, and between 512 and PAGE_SIZE */
|
/* Size must be a power of two, and between 512 and PAGE_SIZE */
|
||||||
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
|
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -126,19 +111,6 @@ int set_blocksize(struct block_device *bdev, int size)
|
|||||||
if (size < bdev_logical_block_size(bdev))
|
if (size < bdev_logical_block_size(bdev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Prevent starting I/O or mapping the device */
|
|
||||||
percpu_down_write(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
/* Check that the block device is not memory mapped */
|
|
||||||
mapping = bdev->bd_inode->i_mapping;
|
|
||||||
mutex_lock(&mapping->i_mmap_mutex);
|
|
||||||
if (mapping_mapped(mapping)) {
|
|
||||||
mutex_unlock(&mapping->i_mmap_mutex);
|
|
||||||
percpu_up_write(&bdev->bd_block_size_semaphore);
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
mutex_unlock(&mapping->i_mmap_mutex);
|
|
||||||
|
|
||||||
/* Don't change the size if it is same as current */
|
/* Don't change the size if it is same as current */
|
||||||
if (bdev->bd_block_size != size) {
|
if (bdev->bd_block_size != size) {
|
||||||
sync_blockdev(bdev);
|
sync_blockdev(bdev);
|
||||||
@ -146,9 +118,6 @@ int set_blocksize(struct block_device *bdev, int size)
|
|||||||
bdev->bd_inode->i_blkbits = blksize_bits(size);
|
bdev->bd_inode->i_blkbits = blksize_bits(size);
|
||||||
kill_bdev(bdev);
|
kill_bdev(bdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
percpu_up_write(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,52 +150,12 @@ static int
|
|||||||
blkdev_get_block(struct inode *inode, sector_t iblock,
|
blkdev_get_block(struct inode *inode, sector_t iblock,
|
||||||
struct buffer_head *bh, int create)
|
struct buffer_head *bh, int create)
|
||||||
{
|
{
|
||||||
if (iblock >= blkdev_max_block(I_BDEV(inode))) {
|
|
||||||
if (create)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* for reads, we're just trying to fill a partial page.
|
|
||||||
* return a hole, they will have to call get_block again
|
|
||||||
* before they can fill it, and they will get -EIO at that
|
|
||||||
* time
|
|
||||||
*/
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
bh->b_bdev = I_BDEV(inode);
|
bh->b_bdev = I_BDEV(inode);
|
||||||
bh->b_blocknr = iblock;
|
bh->b_blocknr = iblock;
|
||||||
set_buffer_mapped(bh);
|
set_buffer_mapped(bh);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
blkdev_get_blocks(struct inode *inode, sector_t iblock,
|
|
||||||
struct buffer_head *bh, int create)
|
|
||||||
{
|
|
||||||
sector_t end_block = blkdev_max_block(I_BDEV(inode));
|
|
||||||
unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
|
|
||||||
|
|
||||||
if ((iblock + max_blocks) > end_block) {
|
|
||||||
max_blocks = end_block - iblock;
|
|
||||||
if ((long)max_blocks <= 0) {
|
|
||||||
if (create)
|
|
||||||
return -EIO; /* write fully beyond EOF */
|
|
||||||
/*
|
|
||||||
* It is a read which is fully beyond EOF. We return
|
|
||||||
* a !buffer_mapped buffer
|
|
||||||
*/
|
|
||||||
max_blocks = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bh->b_bdev = I_BDEV(inode);
|
|
||||||
bh->b_blocknr = iblock;
|
|
||||||
bh->b_size = max_blocks << inode->i_blkbits;
|
|
||||||
if (max_blocks)
|
|
||||||
set_buffer_mapped(bh);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||||
loff_t offset, unsigned long nr_segs)
|
loff_t offset, unsigned long nr_segs)
|
||||||
@ -235,7 +164,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
|||||||
struct inode *inode = file->f_mapping->host;
|
struct inode *inode = file->f_mapping->host;
|
||||||
|
|
||||||
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
|
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
|
||||||
nr_segs, blkdev_get_blocks, NULL, NULL, 0);
|
nr_segs, blkdev_get_block, NULL, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __sync_blockdev(struct block_device *bdev, int wait)
|
int __sync_blockdev(struct block_device *bdev, int wait)
|
||||||
@ -459,12 +388,6 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
|
|||||||
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
|
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (unlikely(percpu_init_rwsem(&ei->bdev.bd_block_size_semaphore))) {
|
|
||||||
kmem_cache_free(bdev_cachep, ei);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -473,8 +396,6 @@ static void bdev_i_callback(struct rcu_head *head)
|
|||||||
struct inode *inode = container_of(head, struct inode, i_rcu);
|
struct inode *inode = container_of(head, struct inode, i_rcu);
|
||||||
struct bdev_inode *bdi = BDEV_I(inode);
|
struct bdev_inode *bdi = BDEV_I(inode);
|
||||||
|
|
||||||
percpu_free_rwsem(&bdi->bdev.bd_block_size_semaphore);
|
|
||||||
|
|
||||||
kmem_cache_free(bdev_cachep, bdi);
|
kmem_cache_free(bdev_cachep, bdi);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1593,22 +1514,6 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||||||
return blkdev_ioctl(bdev, mode, cmd, arg);
|
return blkdev_ioctl(bdev, mode, cmd, arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
|
||||||
unsigned long nr_segs, loff_t pos)
|
|
||||||
{
|
|
||||||
ssize_t ret;
|
|
||||||
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
|
|
||||||
|
|
||||||
percpu_down_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
|
|
||||||
|
|
||||||
percpu_up_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(blkdev_aio_read);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write data to the block device. Only intended for the block device itself
|
* Write data to the block device. Only intended for the block device itself
|
||||||
* and the raw driver which basically is a fake block device.
|
* and the raw driver which basically is a fake block device.
|
||||||
@ -1620,16 +1525,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||||||
unsigned long nr_segs, loff_t pos)
|
unsigned long nr_segs, loff_t pos)
|
||||||
{
|
{
|
||||||
struct file *file = iocb->ki_filp;
|
struct file *file = iocb->ki_filp;
|
||||||
struct block_device *bdev = I_BDEV(file->f_mapping->host);
|
|
||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
BUG_ON(iocb->ki_pos != pos);
|
BUG_ON(iocb->ki_pos != pos);
|
||||||
|
|
||||||
blk_start_plug(&plug);
|
blk_start_plug(&plug);
|
||||||
|
|
||||||
percpu_down_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||||
if (ret > 0 || ret == -EIOCBQUEUED) {
|
if (ret > 0 || ret == -EIOCBQUEUED) {
|
||||||
ssize_t err;
|
ssize_t err;
|
||||||
@ -1638,62 +1539,11 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||||||
if (err < 0 && ret > 0)
|
if (err < 0 && ret > 0)
|
||||||
ret = err;
|
ret = err;
|
||||||
}
|
}
|
||||||
|
|
||||||
percpu_up_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkdev_aio_write);
|
EXPORT_SYMBOL_GPL(blkdev_aio_write);
|
||||||
|
|
||||||
static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct block_device *bdev = I_BDEV(file->f_mapping->host);
|
|
||||||
|
|
||||||
percpu_down_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
ret = generic_file_mmap(file, vma);
|
|
||||||
|
|
||||||
percpu_up_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t blkdev_splice_read(struct file *file, loff_t *ppos,
|
|
||||||
struct pipe_inode_info *pipe, size_t len,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
ssize_t ret;
|
|
||||||
struct block_device *bdev = I_BDEV(file->f_mapping->host);
|
|
||||||
|
|
||||||
percpu_down_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
ret = generic_file_splice_read(file, ppos, pipe, len, flags);
|
|
||||||
|
|
||||||
percpu_up_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t blkdev_splice_write(struct pipe_inode_info *pipe,
|
|
||||||
struct file *file, loff_t *ppos, size_t len,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
ssize_t ret;
|
|
||||||
struct block_device *bdev = I_BDEV(file->f_mapping->host);
|
|
||||||
|
|
||||||
percpu_down_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
ret = generic_file_splice_write(pipe, file, ppos, len, flags);
|
|
||||||
|
|
||||||
percpu_up_read(&bdev->bd_block_size_semaphore);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to release a page associated with block device when the system
|
* Try to release a page associated with block device when the system
|
||||||
* is under memory pressure.
|
* is under memory pressure.
|
||||||
@ -1724,16 +1574,16 @@ const struct file_operations def_blk_fops = {
|
|||||||
.llseek = block_llseek,
|
.llseek = block_llseek,
|
||||||
.read = do_sync_read,
|
.read = do_sync_read,
|
||||||
.write = do_sync_write,
|
.write = do_sync_write,
|
||||||
.aio_read = blkdev_aio_read,
|
.aio_read = generic_file_aio_read,
|
||||||
.aio_write = blkdev_aio_write,
|
.aio_write = blkdev_aio_write,
|
||||||
.mmap = blkdev_mmap,
|
.mmap = generic_file_mmap,
|
||||||
.fsync = blkdev_fsync,
|
.fsync = blkdev_fsync,
|
||||||
.unlocked_ioctl = block_ioctl,
|
.unlocked_ioctl = block_ioctl,
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
.compat_ioctl = compat_blkdev_ioctl,
|
.compat_ioctl = compat_blkdev_ioctl,
|
||||||
#endif
|
#endif
|
||||||
.splice_read = blkdev_splice_read,
|
.splice_read = generic_file_splice_read,
|
||||||
.splice_write = blkdev_splice_write,
|
.splice_write = generic_file_splice_write,
|
||||||
};
|
};
|
||||||
|
|
||||||
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
|
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
|
||||||
|
93
fs/buffer.c
93
fs/buffer.c
@ -911,6 +911,18 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
|
|||||||
attach_page_buffers(page, head);
|
attach_page_buffers(page, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
|
||||||
|
{
|
||||||
|
sector_t retval = ~((sector_t)0);
|
||||||
|
loff_t sz = i_size_read(bdev->bd_inode);
|
||||||
|
|
||||||
|
if (sz) {
|
||||||
|
unsigned int sizebits = blksize_bits(size);
|
||||||
|
retval = (sz >> sizebits);
|
||||||
|
}
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialise the state of a blockdev page's buffers.
|
* Initialise the state of a blockdev page's buffers.
|
||||||
*/
|
*/
|
||||||
@ -921,7 +933,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
|
|||||||
struct buffer_head *head = page_buffers(page);
|
struct buffer_head *head = page_buffers(page);
|
||||||
struct buffer_head *bh = head;
|
struct buffer_head *bh = head;
|
||||||
int uptodate = PageUptodate(page);
|
int uptodate = PageUptodate(page);
|
||||||
sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
|
sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (!buffer_mapped(bh)) {
|
if (!buffer_mapped(bh)) {
|
||||||
@ -1552,6 +1564,28 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(unmap_underlying_metadata);
|
EXPORT_SYMBOL(unmap_underlying_metadata);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Size is a power-of-two in the range 512..PAGE_SIZE,
|
||||||
|
* and the case we care about most is PAGE_SIZE.
|
||||||
|
*
|
||||||
|
* So this *could* possibly be written with those
|
||||||
|
* constraints in mind (relevant mostly if some
|
||||||
|
* architecture has a slow bit-scan instruction)
|
||||||
|
*/
|
||||||
|
static inline int block_size_bits(unsigned int blocksize)
|
||||||
|
{
|
||||||
|
return ilog2(blocksize);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
|
||||||
|
{
|
||||||
|
BUG_ON(!PageLocked(page));
|
||||||
|
|
||||||
|
if (!page_has_buffers(page))
|
||||||
|
create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
|
||||||
|
return page_buffers(page);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE! All mapped/uptodate combinations are valid:
|
* NOTE! All mapped/uptodate combinations are valid:
|
||||||
*
|
*
|
||||||
@ -1589,19 +1623,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
|
|||||||
sector_t block;
|
sector_t block;
|
||||||
sector_t last_block;
|
sector_t last_block;
|
||||||
struct buffer_head *bh, *head;
|
struct buffer_head *bh, *head;
|
||||||
const unsigned blocksize = 1 << inode->i_blkbits;
|
unsigned int blocksize, bbits;
|
||||||
int nr_underway = 0;
|
int nr_underway = 0;
|
||||||
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
|
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
|
||||||
WRITE_SYNC : WRITE);
|
WRITE_SYNC : WRITE);
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
head = create_page_buffers(page, inode,
|
||||||
|
|
||||||
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
|
|
||||||
|
|
||||||
if (!page_has_buffers(page)) {
|
|
||||||
create_empty_buffers(page, blocksize,
|
|
||||||
(1 << BH_Dirty)|(1 << BH_Uptodate));
|
(1 << BH_Dirty)|(1 << BH_Uptodate));
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Be very careful. We have no exclusion from __set_page_dirty_buffers
|
* Be very careful. We have no exclusion from __set_page_dirty_buffers
|
||||||
@ -1613,9 +1641,12 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
|
|||||||
* handle that here by just cleaning them.
|
* handle that here by just cleaning them.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
|
||||||
head = page_buffers(page);
|
|
||||||
bh = head;
|
bh = head;
|
||||||
|
blocksize = bh->b_size;
|
||||||
|
bbits = block_size_bits(blocksize);
|
||||||
|
|
||||||
|
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
|
||||||
|
last_block = (i_size_read(inode) - 1) >> bbits;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get all the dirty buffers mapped to disk addresses and
|
* Get all the dirty buffers mapped to disk addresses and
|
||||||
@ -1806,12 +1837,10 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
|||||||
BUG_ON(to > PAGE_CACHE_SIZE);
|
BUG_ON(to > PAGE_CACHE_SIZE);
|
||||||
BUG_ON(from > to);
|
BUG_ON(from > to);
|
||||||
|
|
||||||
blocksize = 1 << inode->i_blkbits;
|
head = create_page_buffers(page, inode, 0);
|
||||||
if (!page_has_buffers(page))
|
blocksize = head->b_size;
|
||||||
create_empty_buffers(page, blocksize, 0);
|
bbits = block_size_bits(blocksize);
|
||||||
head = page_buffers(page);
|
|
||||||
|
|
||||||
bbits = inode->i_blkbits;
|
|
||||||
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
|
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
|
||||||
|
|
||||||
for(bh = head, block_start = 0; bh != head || !block_start;
|
for(bh = head, block_start = 0; bh != head || !block_start;
|
||||||
@ -1881,11 +1910,11 @@ static int __block_commit_write(struct inode *inode, struct page *page,
|
|||||||
unsigned blocksize;
|
unsigned blocksize;
|
||||||
struct buffer_head *bh, *head;
|
struct buffer_head *bh, *head;
|
||||||
|
|
||||||
blocksize = 1 << inode->i_blkbits;
|
bh = head = page_buffers(page);
|
||||||
|
blocksize = bh->b_size;
|
||||||
|
|
||||||
for(bh = head = page_buffers(page), block_start = 0;
|
block_start = 0;
|
||||||
bh != head || !block_start;
|
do {
|
||||||
block_start=block_end, bh = bh->b_this_page) {
|
|
||||||
block_end = block_start + blocksize;
|
block_end = block_start + blocksize;
|
||||||
if (block_end <= from || block_start >= to) {
|
if (block_end <= from || block_start >= to) {
|
||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
@ -1895,7 +1924,10 @@ static int __block_commit_write(struct inode *inode, struct page *page,
|
|||||||
mark_buffer_dirty(bh);
|
mark_buffer_dirty(bh);
|
||||||
}
|
}
|
||||||
clear_buffer_new(bh);
|
clear_buffer_new(bh);
|
||||||
}
|
|
||||||
|
block_start = block_end;
|
||||||
|
bh = bh->b_this_page;
|
||||||
|
} while (bh != head);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is a partial write which happened to make all buffers
|
* If this is a partial write which happened to make all buffers
|
||||||
@ -2020,7 +2052,6 @@ EXPORT_SYMBOL(generic_write_end);
|
|||||||
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
|
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
|
||||||
unsigned long from)
|
unsigned long from)
|
||||||
{
|
{
|
||||||
struct inode *inode = page->mapping->host;
|
|
||||||
unsigned block_start, block_end, blocksize;
|
unsigned block_start, block_end, blocksize;
|
||||||
unsigned to;
|
unsigned to;
|
||||||
struct buffer_head *bh, *head;
|
struct buffer_head *bh, *head;
|
||||||
@ -2029,13 +2060,13 @@ int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
|
|||||||
if (!page_has_buffers(page))
|
if (!page_has_buffers(page))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
blocksize = 1 << inode->i_blkbits;
|
head = page_buffers(page);
|
||||||
|
blocksize = head->b_size;
|
||||||
to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
|
to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
|
||||||
to = from + to;
|
to = from + to;
|
||||||
if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
|
if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
head = page_buffers(page);
|
|
||||||
bh = head;
|
bh = head;
|
||||||
block_start = 0;
|
block_start = 0;
|
||||||
do {
|
do {
|
||||||
@ -2068,18 +2099,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
|
|||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
sector_t iblock, lblock;
|
sector_t iblock, lblock;
|
||||||
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
|
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
|
||||||
unsigned int blocksize;
|
unsigned int blocksize, bbits;
|
||||||
int nr, i;
|
int nr, i;
|
||||||
int fully_mapped = 1;
|
int fully_mapped = 1;
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
head = create_page_buffers(page, inode, 0);
|
||||||
blocksize = 1 << inode->i_blkbits;
|
blocksize = head->b_size;
|
||||||
if (!page_has_buffers(page))
|
bbits = block_size_bits(blocksize);
|
||||||
create_empty_buffers(page, blocksize, 0);
|
|
||||||
head = page_buffers(page);
|
|
||||||
|
|
||||||
iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
|
||||||
lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
|
lblock = (i_size_read(inode)+blocksize-1) >> bbits;
|
||||||
bh = head;
|
bh = head;
|
||||||
nr = 0;
|
nr = 0;
|
||||||
i = 0;
|
i = 0;
|
||||||
|
@ -1794,7 +1794,6 @@ static int cifs_writepages(struct address_space *mapping,
|
|||||||
struct TCP_Server_Info *server;
|
struct TCP_Server_Info *server;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
loff_t isize = i_size_read(mapping->host);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If wsize is smaller than the page cache size, default to writing
|
* If wsize is smaller than the page cache size, default to writing
|
||||||
@ -1899,7 +1898,7 @@ retry:
|
|||||||
*/
|
*/
|
||||||
set_page_writeback(page);
|
set_page_writeback(page);
|
||||||
|
|
||||||
if (page_offset(page) >= isize) {
|
if (page_offset(page) >= i_size_read(mapping->host)) {
|
||||||
done = true;
|
done = true;
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
end_page_writeback(page);
|
end_page_writeback(page);
|
||||||
@ -1932,7 +1931,8 @@ retry:
|
|||||||
wdata->offset = page_offset(wdata->pages[0]);
|
wdata->offset = page_offset(wdata->pages[0]);
|
||||||
wdata->pagesz = PAGE_CACHE_SIZE;
|
wdata->pagesz = PAGE_CACHE_SIZE;
|
||||||
wdata->tailsz =
|
wdata->tailsz =
|
||||||
min(isize - page_offset(wdata->pages[nr_pages - 1]),
|
min(i_size_read(mapping->host) -
|
||||||
|
page_offset(wdata->pages[nr_pages - 1]),
|
||||||
(loff_t)PAGE_CACHE_SIZE);
|
(loff_t)PAGE_CACHE_SIZE);
|
||||||
wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
|
wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
|
||||||
wdata->tailsz;
|
wdata->tailsz;
|
||||||
|
@ -86,14 +86,17 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
|
|||||||
|
|
||||||
dentry = d_lookup(parent, name);
|
dentry = d_lookup(parent, name);
|
||||||
if (dentry) {
|
if (dentry) {
|
||||||
|
int err;
|
||||||
inode = dentry->d_inode;
|
inode = dentry->d_inode;
|
||||||
/* update inode in place if i_ino didn't change */
|
/* update inode in place if i_ino didn't change */
|
||||||
if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
|
if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
|
||||||
cifs_fattr_to_inode(inode, fattr);
|
cifs_fattr_to_inode(inode, fattr);
|
||||||
return dentry;
|
return dentry;
|
||||||
}
|
}
|
||||||
d_drop(dentry);
|
err = d_invalidate(dentry);
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
|
if (err)
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
dentry = d_alloc(parent, name);
|
dentry = d_alloc(parent, name);
|
||||||
|
@ -766,7 +766,6 @@ smb_set_file_info(struct inode *inode, const char *full_path,
|
|||||||
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
|
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
|
||||||
struct tcon_link *tlink = NULL;
|
struct tcon_link *tlink = NULL;
|
||||||
struct cifs_tcon *tcon;
|
struct cifs_tcon *tcon;
|
||||||
FILE_BASIC_INFO info_buf;
|
|
||||||
|
|
||||||
/* if the file is already open for write, just use that fileid */
|
/* if the file is already open for write, just use that fileid */
|
||||||
open_file = find_writable_file(cinode, true);
|
open_file = find_writable_file(cinode, true);
|
||||||
@ -817,7 +816,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
|
|||||||
netpid = current->tgid;
|
netpid = current->tgid;
|
||||||
|
|
||||||
set_via_filehandle:
|
set_via_filehandle:
|
||||||
rc = CIFSSMBSetFileInfo(xid, tcon, &info_buf, netfid, netpid);
|
rc = CIFSSMBSetFileInfo(xid, tcon, buf, netfid, netpid);
|
||||||
if (!rc)
|
if (!rc)
|
||||||
cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
|
cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
|
||||||
|
|
||||||
|
@ -540,6 +540,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
|
|||||||
sector_t fs_endblk; /* Into file, in filesystem-sized blocks */
|
sector_t fs_endblk; /* Into file, in filesystem-sized blocks */
|
||||||
unsigned long fs_count; /* Number of filesystem-sized blocks */
|
unsigned long fs_count; /* Number of filesystem-sized blocks */
|
||||||
int create;
|
int create;
|
||||||
|
unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there was a memory error and we've overwritten all the
|
* If there was a memory error and we've overwritten all the
|
||||||
@ -554,7 +555,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
|
|||||||
fs_count = fs_endblk - fs_startblk + 1;
|
fs_count = fs_endblk - fs_startblk + 1;
|
||||||
|
|
||||||
map_bh->b_state = 0;
|
map_bh->b_state = 0;
|
||||||
map_bh->b_size = fs_count << dio->inode->i_blkbits;
|
map_bh->b_size = fs_count << i_blkbits;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For writes inside i_size on a DIO_SKIP_HOLES filesystem we
|
* For writes inside i_size on a DIO_SKIP_HOLES filesystem we
|
||||||
@ -1053,7 +1054,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|||||||
int seg;
|
int seg;
|
||||||
size_t size;
|
size_t size;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
unsigned blkbits = inode->i_blkbits;
|
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
|
||||||
|
unsigned blkbits = i_blkbits;
|
||||||
unsigned blocksize_mask = (1 << blkbits) - 1;
|
unsigned blocksize_mask = (1 << blkbits) - 1;
|
||||||
ssize_t retval = -EINVAL;
|
ssize_t retval = -EINVAL;
|
||||||
loff_t end = offset;
|
loff_t end = offset;
|
||||||
@ -1149,7 +1151,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|||||||
dio->inode = inode;
|
dio->inode = inode;
|
||||||
dio->rw = rw;
|
dio->rw = rw;
|
||||||
sdio.blkbits = blkbits;
|
sdio.blkbits = blkbits;
|
||||||
sdio.blkfactor = inode->i_blkbits - blkbits;
|
sdio.blkfactor = i_blkbits - blkbits;
|
||||||
sdio.block_in_file = offset >> blkbits;
|
sdio.block_in_file = offset >> blkbits;
|
||||||
|
|
||||||
sdio.get_block = get_block;
|
sdio.get_block = get_block;
|
||||||
|
14
fs/file.c
14
fs/file.c
@ -994,16 +994,18 @@ int iterate_fd(struct files_struct *files, unsigned n,
|
|||||||
const void *p)
|
const void *p)
|
||||||
{
|
{
|
||||||
struct fdtable *fdt;
|
struct fdtable *fdt;
|
||||||
struct file *file;
|
|
||||||
int res = 0;
|
int res = 0;
|
||||||
if (!files)
|
if (!files)
|
||||||
return 0;
|
return 0;
|
||||||
spin_lock(&files->file_lock);
|
spin_lock(&files->file_lock);
|
||||||
fdt = files_fdtable(files);
|
for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
|
||||||
while (!res && n < fdt->max_fds) {
|
struct file *file;
|
||||||
file = rcu_dereference_check_fdtable(files, fdt->fd[n++]);
|
file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
|
||||||
if (file)
|
if (!file)
|
||||||
res = f(p, file, n);
|
continue;
|
||||||
|
res = f(p, file, n);
|
||||||
|
if (res)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock(&files->file_lock);
|
spin_unlock(&files->file_lock);
|
||||||
return res;
|
return res;
|
||||||
|
@ -2131,6 +2131,11 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
|
|||||||
if (!len)
|
if (!len)
|
||||||
return ERR_PTR(-EACCES);
|
return ERR_PTR(-EACCES);
|
||||||
|
|
||||||
|
if (unlikely(name[0] == '.')) {
|
||||||
|
if (len < 2 || (len == 2 && name[1] == '.'))
|
||||||
|
return ERR_PTR(-EACCES);
|
||||||
|
}
|
||||||
|
|
||||||
while (len--) {
|
while (len--) {
|
||||||
c = *(const unsigned char *)name++;
|
c = *(const unsigned char *)name++;
|
||||||
if (c == '/' || c == '\0')
|
if (c == '/' || c == '\0')
|
||||||
|
@ -450,7 +450,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
|
|||||||
nfs_refresh_inode(dentry->d_inode, entry->fattr);
|
nfs_refresh_inode(dentry->d_inode, entry->fattr);
|
||||||
goto out;
|
goto out;
|
||||||
} else {
|
} else {
|
||||||
d_drop(dentry);
|
if (d_invalidate(dentry) != 0)
|
||||||
|
goto out;
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1100,6 +1101,8 @@ out_set_verifier:
|
|||||||
out_zap_parent:
|
out_zap_parent:
|
||||||
nfs_zap_caches(dir);
|
nfs_zap_caches(dir);
|
||||||
out_bad:
|
out_bad:
|
||||||
|
nfs_free_fattr(fattr);
|
||||||
|
nfs_free_fhandle(fhandle);
|
||||||
nfs_mark_for_revalidate(dir);
|
nfs_mark_for_revalidate(dir);
|
||||||
if (inode && S_ISDIR(inode->i_mode)) {
|
if (inode && S_ISDIR(inode->i_mode)) {
|
||||||
/* Purge readdir caches. */
|
/* Purge readdir caches. */
|
||||||
@ -1112,8 +1115,6 @@ out_zap_parent:
|
|||||||
shrink_dcache_parent(dentry);
|
shrink_dcache_parent(dentry);
|
||||||
}
|
}
|
||||||
d_drop(dentry);
|
d_drop(dentry);
|
||||||
nfs_free_fattr(fattr);
|
|
||||||
nfs_free_fhandle(fhandle);
|
|
||||||
dput(parent);
|
dput(parent);
|
||||||
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
|
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
|
||||||
__func__, dentry->d_parent->d_name.name,
|
__func__, dentry->d_parent->d_name.name,
|
||||||
|
@ -462,8 +462,6 @@ struct block_device {
|
|||||||
int bd_fsfreeze_count;
|
int bd_fsfreeze_count;
|
||||||
/* Mutex for freeze */
|
/* Mutex for freeze */
|
||||||
struct mutex bd_fsfreeze_mutex;
|
struct mutex bd_fsfreeze_mutex;
|
||||||
/* A semaphore that prevents I/O while block size is being changed */
|
|
||||||
struct percpu_rw_semaphore bd_block_size_semaphore;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2049,7 +2047,6 @@ extern void unregister_blkdev(unsigned int, const char *);
|
|||||||
extern struct block_device *bdget(dev_t);
|
extern struct block_device *bdget(dev_t);
|
||||||
extern struct block_device *bdgrab(struct block_device *bdev);
|
extern struct block_device *bdgrab(struct block_device *bdev);
|
||||||
extern void bd_set_size(struct block_device *, loff_t size);
|
extern void bd_set_size(struct block_device *, loff_t size);
|
||||||
extern sector_t blkdev_max_block(struct block_device *bdev);
|
|
||||||
extern void bd_forget(struct inode *inode);
|
extern void bd_forget(struct inode *inode);
|
||||||
extern void bdput(struct block_device *);
|
extern void bdput(struct block_device *);
|
||||||
extern void invalidate_bdev(struct block_device *);
|
extern void invalidate_bdev(struct block_device *);
|
||||||
@ -2379,8 +2376,6 @@ extern int generic_segment_checks(const struct iovec *iov,
|
|||||||
unsigned long *nr_segs, size_t *count, int access_flags);
|
unsigned long *nr_segs, size_t *count, int access_flags);
|
||||||
|
|
||||||
/* fs/block_dev.c */
|
/* fs/block_dev.c */
|
||||||
extern ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
|
||||||
unsigned long nr_segs, loff_t pos);
|
|
||||||
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
unsigned long nr_segs, loff_t pos);
|
unsigned long nr_segs, loff_t pos);
|
||||||
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
|
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
|
||||||
|
@ -30,10 +30,9 @@ struct vm_area_struct;
|
|||||||
#define ___GFP_HARDWALL 0x20000u
|
#define ___GFP_HARDWALL 0x20000u
|
||||||
#define ___GFP_THISNODE 0x40000u
|
#define ___GFP_THISNODE 0x40000u
|
||||||
#define ___GFP_RECLAIMABLE 0x80000u
|
#define ___GFP_RECLAIMABLE 0x80000u
|
||||||
#define ___GFP_NOTRACK 0x200000u
|
#define ___GFP_NOTRACK 0x100000u
|
||||||
#define ___GFP_NO_KSWAPD 0x400000u
|
#define ___GFP_OTHER_NODE 0x200000u
|
||||||
#define ___GFP_OTHER_NODE 0x800000u
|
#define ___GFP_WRITE 0x400000u
|
||||||
#define ___GFP_WRITE 0x1000000u
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GFP bitmasks..
|
* GFP bitmasks..
|
||||||
@ -86,7 +85,6 @@ struct vm_area_struct;
|
|||||||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
|
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
|
||||||
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
|
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
|
||||||
|
|
||||||
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
|
|
||||||
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
|
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
|
||||||
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
|
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
|
||||||
|
|
||||||
@ -96,7 +94,7 @@ struct vm_area_struct;
|
|||||||
*/
|
*/
|
||||||
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
||||||
|
|
||||||
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
|
#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
|
||||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||||
|
|
||||||
/* This equals 0, but use constants in case they ever change */
|
/* This equals 0, but use constants in case they ever change */
|
||||||
@ -116,8 +114,7 @@ struct vm_area_struct;
|
|||||||
__GFP_MOVABLE)
|
__GFP_MOVABLE)
|
||||||
#define GFP_IOFS (__GFP_IO | __GFP_FS)
|
#define GFP_IOFS (__GFP_IO | __GFP_FS)
|
||||||
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
||||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
|
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
|
||||||
__GFP_NO_KSWAPD)
|
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
|
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
|
||||||
|
@ -1,35 +1,8 @@
|
|||||||
#ifndef _LINUX_HW_BREAKPOINT_H
|
#ifndef _LINUX_HW_BREAKPOINT_H
|
||||||
#define _LINUX_HW_BREAKPOINT_H
|
#define _LINUX_HW_BREAKPOINT_H
|
||||||
|
|
||||||
enum {
|
|
||||||
HW_BREAKPOINT_LEN_1 = 1,
|
|
||||||
HW_BREAKPOINT_LEN_2 = 2,
|
|
||||||
HW_BREAKPOINT_LEN_4 = 4,
|
|
||||||
HW_BREAKPOINT_LEN_8 = 8,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
HW_BREAKPOINT_EMPTY = 0,
|
|
||||||
HW_BREAKPOINT_R = 1,
|
|
||||||
HW_BREAKPOINT_W = 2,
|
|
||||||
HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
|
|
||||||
HW_BREAKPOINT_X = 4,
|
|
||||||
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum bp_type_idx {
|
|
||||||
TYPE_INST = 0,
|
|
||||||
#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
|
|
||||||
TYPE_DATA = 0,
|
|
||||||
#else
|
|
||||||
TYPE_DATA = 1,
|
|
||||||
#endif
|
|
||||||
TYPE_MAX
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
|
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
|
#include <uapi/linux/hw_breakpoint.h>
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||||
|
|
||||||
@ -151,6 +124,4 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||||
#endif /* __KERNEL__ */
|
|
||||||
|
|
||||||
#endif /* _LINUX_HW_BREAKPOINT_H */
|
#endif /* _LINUX_HW_BREAKPOINT_H */
|
||||||
|
@ -13,7 +13,7 @@ struct percpu_rw_semaphore {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define light_mb() barrier()
|
#define light_mb() barrier()
|
||||||
#define heavy_mb() synchronize_sched()
|
#define heavy_mb() synchronize_sched_expedited()
|
||||||
|
|
||||||
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
|
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
|
||||||
{
|
{
|
||||||
@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p)
|
|||||||
{
|
{
|
||||||
mutex_lock(&p->mtx);
|
mutex_lock(&p->mtx);
|
||||||
p->locked = true;
|
p->locked = true;
|
||||||
synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
|
synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
|
||||||
while (__percpu_count(p->counters))
|
while (__percpu_count(p->counters))
|
||||||
msleep(1);
|
msleep(1);
|
||||||
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
|
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
|
||||||
|
@ -36,7 +36,6 @@
|
|||||||
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
|
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
|
||||||
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
|
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
|
||||||
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
|
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
|
||||||
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
|
|
||||||
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
|
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
|
||||||
) : "GFP_NOWAIT"
|
) : "GFP_NOWAIT"
|
||||||
|
|
||||||
|
@ -415,3 +415,4 @@ header-y += wireless.h
|
|||||||
header-y += x25.h
|
header-y += x25.h
|
||||||
header-y += xattr.h
|
header-y += xattr.h
|
||||||
header-y += xfrm.h
|
header-y += xfrm.h
|
||||||
|
header-y += hw_breakpoint.h
|
||||||
|
30
include/uapi/linux/hw_breakpoint.h
Normal file
30
include/uapi/linux/hw_breakpoint.h
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#ifndef _UAPI_LINUX_HW_BREAKPOINT_H
|
||||||
|
#define _UAPI_LINUX_HW_BREAKPOINT_H
|
||||||
|
|
||||||
|
enum {
|
||||||
|
HW_BREAKPOINT_LEN_1 = 1,
|
||||||
|
HW_BREAKPOINT_LEN_2 = 2,
|
||||||
|
HW_BREAKPOINT_LEN_4 = 4,
|
||||||
|
HW_BREAKPOINT_LEN_8 = 8,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
HW_BREAKPOINT_EMPTY = 0,
|
||||||
|
HW_BREAKPOINT_R = 1,
|
||||||
|
HW_BREAKPOINT_W = 2,
|
||||||
|
HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
|
||||||
|
HW_BREAKPOINT_X = 4,
|
||||||
|
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum bp_type_idx {
|
||||||
|
TYPE_INST = 0,
|
||||||
|
#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
|
||||||
|
TYPE_DATA = 0,
|
||||||
|
#else
|
||||||
|
TYPE_DATA = 1,
|
||||||
|
#endif
|
||||||
|
TYPE_MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */
|
@ -111,14 +111,16 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
|
|||||||
* Count the number of breakpoints of the same type and same task.
|
* Count the number of breakpoints of the same type and same task.
|
||||||
* The given event must be not on the list.
|
* The given event must be not on the list.
|
||||||
*/
|
*/
|
||||||
static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
|
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = bp->hw.bp_target;
|
struct task_struct *tsk = bp->hw.bp_target;
|
||||||
struct perf_event *iter;
|
struct perf_event *iter;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
|
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
|
||||||
if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
|
if (iter->hw.bp_target == tsk &&
|
||||||
|
find_slot_idx(iter) == type &&
|
||||||
|
cpu == iter->cpu)
|
||||||
count += hw_breakpoint_weight(iter);
|
count += hw_breakpoint_weight(iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +143,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
|||||||
if (!tsk)
|
if (!tsk)
|
||||||
slots->pinned += max_task_bp_pinned(cpu, type);
|
slots->pinned += max_task_bp_pinned(cpu, type);
|
||||||
else
|
else
|
||||||
slots->pinned += task_bp_pinned(bp, type);
|
slots->pinned += task_bp_pinned(cpu, bp, type);
|
||||||
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
|
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -154,7 +156,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
|||||||
if (!tsk)
|
if (!tsk)
|
||||||
nr += max_task_bp_pinned(cpu, type);
|
nr += max_task_bp_pinned(cpu, type);
|
||||||
else
|
else
|
||||||
nr += task_bp_pinned(bp, type);
|
nr += task_bp_pinned(cpu, bp, type);
|
||||||
|
|
||||||
if (nr > slots->pinned)
|
if (nr > slots->pinned)
|
||||||
slots->pinned = nr;
|
slots->pinned = nr;
|
||||||
@ -188,7 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
|
|||||||
int old_idx = 0;
|
int old_idx = 0;
|
||||||
int idx = 0;
|
int idx = 0;
|
||||||
|
|
||||||
old_count = task_bp_pinned(bp, type);
|
old_count = task_bp_pinned(cpu, bp, type);
|
||||||
old_idx = old_count - 1;
|
old_idx = old_count - 1;
|
||||||
idx = old_idx + weight;
|
idx = old_idx + weight;
|
||||||
|
|
||||||
|
@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
|||||||
|
|
||||||
p->signal->autogroup = autogroup_kref_get(ag);
|
p->signal->autogroup = autogroup_kref_get(ag);
|
||||||
|
|
||||||
if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
t = p;
|
t = p;
|
||||||
do {
|
do {
|
||||||
sched_move_task(t);
|
sched_move_task(t);
|
||||||
} while_each_thread(p, t);
|
} while_each_thread(p, t);
|
||||||
|
|
||||||
out:
|
|
||||||
unlock_task_sighand(p, &flags);
|
unlock_task_sighand(p, &flags);
|
||||||
autogroup_kref_put(prev);
|
autogroup_kref_put(prev);
|
||||||
}
|
}
|
||||||
|
@ -4,11 +4,6 @@
|
|||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
|
|
||||||
struct autogroup {
|
struct autogroup {
|
||||||
/*
|
|
||||||
* reference doesn't mean how many thread attach to this
|
|
||||||
* autogroup now. It just stands for the number of task
|
|
||||||
* could use this autogroup.
|
|
||||||
*/
|
|
||||||
struct kref kref;
|
struct kref kref;
|
||||||
struct task_group *tg;
|
struct task_group *tg;
|
||||||
struct rw_semaphore lock;
|
struct rw_semaphore lock;
|
||||||
|
@ -1364,6 +1364,17 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
|
|||||||
BUG_ON(timer_pending(timer));
|
BUG_ON(timer_pending(timer));
|
||||||
BUG_ON(!list_empty(&work->entry));
|
BUG_ON(!list_empty(&work->entry));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If @delay is 0, queue @dwork->work immediately. This is for
|
||||||
|
* both optimization and correctness. The earliest @timer can
|
||||||
|
* expire is on the closest next tick and delayed_work users depend
|
||||||
|
* on that there's no such delay when @delay is 0.
|
||||||
|
*/
|
||||||
|
if (!delay) {
|
||||||
|
__queue_work(cpu, wq, &dwork->work);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
timer_stats_timer_set_start_info(&dwork->timer);
|
timer_stats_timer_set_start_info(&dwork->timer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|||||||
bool ret = false;
|
bool ret = false;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!delay)
|
|
||||||
return queue_work_on(cpu, wq, &dwork->work);
|
|
||||||
|
|
||||||
/* read the comment in __queue_work() */
|
/* read the comment in __queue_work() */
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq)
|
|||||||
repeat:
|
repeat:
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
if (kthread_should_stop())
|
if (kthread_should_stop()) {
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See whether any cpu is asking for help. Unbounded
|
* See whether any cpu is asking for help. Unbounded
|
||||||
|
@ -1476,9 +1476,17 @@ int soft_offline_page(struct page *page, int flags)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long pfn = page_to_pfn(page);
|
unsigned long pfn = page_to_pfn(page);
|
||||||
|
struct page *hpage = compound_trans_head(page);
|
||||||
|
|
||||||
if (PageHuge(page))
|
if (PageHuge(page))
|
||||||
return soft_offline_huge_page(page, flags);
|
return soft_offline_huge_page(page, flags);
|
||||||
|
if (PageTransHuge(hpage)) {
|
||||||
|
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
|
||||||
|
pr_info("soft offline: %#lx: failed to split THP\n",
|
||||||
|
pfn);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret = get_any_page(page, pfn, flags);
|
ret = get_any_page(page, pfn, flags);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1UL << order;
|
return 1UL << alloc_order;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
|||||||
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns true if the allocation is likely for THP */
|
||||||
|
static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
|
||||||
|
{
|
||||||
|
if (order == pageblock_order &&
|
||||||
|
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct page *
|
static inline struct page *
|
||||||
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
||||||
@ -2416,9 +2425,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|||||||
goto nopage;
|
goto nopage;
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
if (!(gfp_mask & __GFP_NO_KSWAPD))
|
/* The decision whether to wake kswapd for THP is made later */
|
||||||
|
if (!is_thp_alloc(gfp_mask, order))
|
||||||
wake_all_kswapd(order, zonelist, high_zoneidx,
|
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||||
zone_idx(preferred_zone));
|
zone_idx(preferred_zone));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OK, we're below the kswapd watermark and have kicked background
|
* OK, we're below the kswapd watermark and have kicked background
|
||||||
@ -2488,15 +2498,21 @@ rebalance:
|
|||||||
goto got_pg;
|
goto got_pg;
|
||||||
sync_migration = true;
|
sync_migration = true;
|
||||||
|
|
||||||
/*
|
if (is_thp_alloc(gfp_mask, order)) {
|
||||||
* If compaction is deferred for high-order allocations, it is because
|
/*
|
||||||
* sync compaction recently failed. In this is the case and the caller
|
* If compaction is deferred for high-order allocations, it is
|
||||||
* requested a movable allocation that does not heavily disrupt the
|
* because sync compaction recently failed. If this is the case
|
||||||
* system then fail the allocation instead of entering direct reclaim.
|
* and the caller requested a movable allocation that does not
|
||||||
*/
|
* heavily disrupt the system then fail the allocation instead
|
||||||
if ((deferred_compaction || contended_compaction) &&
|
* of entering direct reclaim.
|
||||||
(gfp_mask & __GFP_NO_KSWAPD))
|
*/
|
||||||
goto nopage;
|
if (deferred_compaction || contended_compaction)
|
||||||
|
goto nopage;
|
||||||
|
|
||||||
|
/* If process is willing to reclaim/compact then wake kswapd */
|
||||||
|
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||||
|
zone_idx(preferred_zone));
|
||||||
|
}
|
||||||
|
|
||||||
/* Try direct reclaim and then allocating */
|
/* Try direct reclaim and then allocating */
|
||||||
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user