diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 274b32d12532..492e81df2968 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -387,26 +387,6 @@ Who: Tejun Heo ---------------------------- -What: Support for lcd_switch and display_get in asus-laptop driver -When: March 2010 -Why: These two features use non-standard interfaces. There are the - only features that really need multiple path to guess what's - the right method name on a specific laptop. - - Removing them will allow to remove a lot of code an significantly - clean the drivers. - - This will affect the backlight code which won't be able to know - if the backlight is on or off. The platform display file will also be - write only (like the one in eeepc-laptop). - - This should'nt affect a lot of user because they usually know - when their display is on or off. - -Who: Corentin Chary - ----------------------------- - What: sysfs-class-rfkill state file When: Feb 2014 Files: net/rfkill/core.c diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt new file mode 100644 index 000000000000..23fcb05175be --- /dev/null +++ b/Documentation/input/event-codes.txt @@ -0,0 +1,262 @@ +The input protocol uses a map of types and codes to express input device values +to userspace. This document describes the types and codes and how and when they +may be used. + +A single hardware event generates multiple input events. Each input event +contains the new value of a single data item. A special event type, EV_SYN, is +used to separate input events into packets of input data changes occurring at +the same moment in time. In the following, the term "event" refers to a single +input event encompassing a type, code, and value. + +The input protocol is a stateful protocol. Events are emitted only when values +of event codes have changed. However, the state is maintained within the Linux +input subsystem; drivers do not need to maintain the state and may attempt to +emit unchanged values without harm. Userspace may obtain the current state of +event code values using the EVIOCG* ioctls defined in linux/input.h. The event +reports supported by a device are also provided by sysfs in +class/input/event*/device/capabilities/, and the properties of a device are +provided in class/input/event*/device/properties. + +Types: +========== +Types are groupings of codes under a logical input construct. Each type has a +set of applicable codes to be used in generating events. See the Codes section +for details on valid codes for each type. + +* EV_SYN: + - Used as markers to separate events. Events may be separated in time or in + space, such as with the multitouch protocol. + +* EV_KEY: + - Used to describe state changes of keyboards, buttons, or other key-like + devices. + +* EV_REL: + - Used to describe relative axis value changes, e.g. moving the mouse 5 units + to the left. + +* EV_ABS: + - Used to describe absolute axis value changes, e.g. describing the + coordinates of a touch on a touchscreen. + +* EV_MSC: + - Used to describe miscellaneous input data that do not fit into other types. + +* EV_SW: + - Used to describe binary state input switches. + +* EV_LED: + - Used to turn LEDs on devices on and off. + +* EV_SND: + - Used to output sound to devices. + +* EV_REP: + - Used for autorepeating devices. + +* EV_FF: + - Used to send force feedback commands to an input device. + +* EV_PWR: + - A special type for power button and switch input. + +* EV_FF_STATUS: + - Used to receive force feedback device status. + +Codes: +========== +Codes define the precise type of event. + +EV_SYN: +---------- +EV_SYN event values are undefined. Their usage is defined only by when they are +sent in the evdev event stream. + +* SYN_REPORT: + - Used to synchronize and separate events into packets of input data changes + occurring at the same moment in time. For example, motion of a mouse may set + the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next + motion will emit more REL_X and REL_Y values and send another SYN_REPORT. + +* SYN_CONFIG: + - TBD + +* SYN_MT_REPORT: + - Used to synchronize and separate touch events. See the + multi-touch-protocol.txt document for more information. + +* SYN_DROPPED: + - Used to indicate buffer overrun in the evdev client's event queue. + Client should ignore all events up to and including next SYN_REPORT + event and query the device (using EVIOCG* ioctls) to obtain its + current state. + +EV_KEY: +---------- +EV_KEY events take the form KEY_ or BTN_. For example, KEY_A is used +to represent the 'A' key on a keyboard. When a key is depressed, an event with +the key's code is emitted with value 1. When the key is released, an event is +emitted with value 0. Some hardware send events when a key is repeated. These +events have a value of 2. In general, KEY_ is used for keyboard keys, and +BTN_ is used for other types of momentary switch events. + +A few EV_KEY codes have special meanings: + +* BTN_TOOL_: + - These codes are used in conjunction with input trackpads, tablets, and + touchscreens. These devices may be used with fingers, pens, or other tools. + When an event occurs and a tool is used, the corresponding BTN_TOOL_ + code should be set to a value of 1. When the tool is no longer interacting + with the input device, the BTN_TOOL_ code should be reset to 0. All + trackpads, tablets, and touchscreens should use at least one BTN_TOOL_ + code when events are generated. + +* BTN_TOUCH: + BTN_TOUCH is used for touch contact. While an input tool is determined to be + within meaningful physical contact, the value of this property must be set + to 1. Meaningful physical contact may mean any contact, or it may mean + contact conditioned by an implementation defined property. For example, a + touchpad may set the value to 1 only when the touch pressure rises above a + certain value. BTN_TOUCH may be combined with BTN_TOOL_ codes. For + example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the + pen is hovering over but not touching the tablet surface. + +Note: For appropriate function of the legacy mousedev emulation driver, +BTN_TOUCH must be the first evdev code emitted in a synchronization frame. + +Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was +interpreted as a touchpad by userspace, while a similar device without +BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility +with current userspace it is recommended to follow this distinction. In the +future, this distinction will be deprecated and the device properties ioctl +EVIOCGPROP, defined in linux/input.h, will be used to convey the device type. + +* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP: + - These codes denote one, two, three, and four finger interaction on a + trackpad or touchscreen. For example, if the user uses two fingers and moves + them on the touchpad in an effort to scroll content on screen, + BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion. + Note that all BTN_TOOL_ codes and the BTN_TOUCH code are orthogonal in + purpose. A trackpad event generated by finger touches should generate events + for one code from each group. At most only one of these BTN_TOOL_ + codes should have a value of 1 during any synchronization frame. + +Note: Historically some drivers emitted multiple of the finger count codes with +a value of 1 in the same synchronization frame. This usage is deprecated. + +Note: In multitouch drivers, the input_mt_report_finger_count() function should +be used to emit these codes. Please see multi-touch-protocol.txt for details. + +EV_REL: +---------- +EV_REL events describe relative changes in a property. For example, a mouse may +move to the left by a certain number of units, but its absolute position in +space is unknown. If the absolute position is known, EV_ABS codes should be used +instead of EV_REL codes. + +A few EV_REL codes have special meanings: + +* REL_WHEEL, REL_HWHEEL: + - These codes are used for vertical and horizontal scroll wheels, + respectively. + +EV_ABS: +---------- +EV_ABS events describe absolute changes in a property. For example, a touchpad +may emit coordinates for a touch location. + +A few EV_ABS codes have special meanings: + +* ABS_DISTANCE: + - Used to describe the distance of a tool from an interaction surface. This + event should only be emitted while the tool is hovering, meaning in close + proximity of the device and while the value of the BTN_TOUCH code is 0. If + the input device may be used freely in three dimensions, consider ABS_Z + instead. + +* ABS_MT_: + - Used to describe multitouch input events. Please see + multi-touch-protocol.txt for details. + +EV_SW: +---------- +EV_SW events describe stateful binary switches. For example, the SW_LID code is +used to denote when a laptop lid is closed. + +Upon binding to a device or resuming from suspend, a driver must report +the current switch state. This ensures that the device, kernel, and userspace +state is in sync. + +Upon resume, if the switch state is the same as before suspend, then the input +subsystem will filter out the duplicate switch state reports. The driver does +not need to keep the state of the switch at any time. + +EV_MSC: +---------- +EV_MSC events are used for input and output events that do not fall under other +categories. + +EV_LED: +---------- +EV_LED events are used for input and output to set and query the state of +various LEDs on devices. + +EV_REP: +---------- +EV_REP events are used for specifying autorepeating events. + +EV_SND: +---------- +EV_SND events are used for sending sound commands to simple sound output +devices. + +EV_FF: +---------- +EV_FF events are used to initialize a force feedback capable device and to cause +such device to feedback. + +EV_PWR: +---------- +EV_PWR events are a special type of event used specifically for power +mangement. Its usage is not well defined. To be addressed later. + +Guidelines: +========== +The guidelines below ensure proper single-touch and multi-finger functionality. +For multi-touch functionality, see the multi-touch-protocol.txt document for +more information. + +Mice: +---------- +REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report +the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report +further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report +scroll wheel events where available. + +Touchscreens: +---------- +ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be +used to report when a touch is active on the screen. +BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch +contact. BTN_TOOL_ events should be reported where possible. + +Trackpads: +---------- +Legacy trackpads that only provide relative position information must report +events like mice described above. + +Trackpads that provide absolute touch position must report ABS_{X,Y} for the +location of the touch. BTN_TOUCH should be used to report when a touch is active +on the trackpad. Where multi-finger support is available, BTN_TOOL_ should +be used to report the number of touches active on the trackpad. + +Tablets: +---------- +BTN_TOOL_ events must be reported when a stylus or other tool is active on +the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH +should be used to report when the tool is in contact with the tablet. +BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any +button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}. +BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use +meaningful buttons, like BTN_FORWARD, unless the button is labeled for that +purpose on the device. diff --git a/MAINTAINERS b/MAINTAINERS index 649600cb8ec9..ec3600306289 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -184,10 +184,9 @@ F: Documentation/filesystems/9p.txt F: fs/9p/ A2232 SERIAL BOARD DRIVER -M: Enver Haase L: linux-m68k@lists.linux-m68k.org -S: Maintained -F: drivers/char/ser_a2232* +S: Orphan +F: drivers/staging/generic_serial/ser_a2232* AACRAID SCSI RAID DRIVER M: Adaptec OEM Raid Solutions @@ -877,6 +876,13 @@ F: arch/arm/mach-mv78xx0/ F: arch/arm/mach-orion5x/ F: arch/arm/plat-orion/ +ARM/Orion SoC/Technologic Systems TS-78xx platform support +M: Alexander Clouter +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +W: http://www.digriz.org.uk/ts78xx/kernel +S: Maintained +F: arch/arm/mach-orion5x/ts78xx-* + ARM/MIOA701 MACHINE SUPPORT M: Robert Jarzmik L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1063,7 +1069,7 @@ F: arch/arm/mach-shmobile/ F: drivers/sh/ ARM/TELECHIPS ARM ARCHITECTURE -M: "Hans J. Koch" +M: "Hans J. Koch" L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/plat-tcc/ @@ -1823,11 +1829,10 @@ S: Maintained F: drivers/platform/x86/compal-laptop.c COMPUTONE INTELLIPORT MULTIPORT CARD -M: "Michael H. Warfield" W: http://www.wittsend.com/computone.html -S: Maintained +S: Orphan F: Documentation/serial/computone.txt -F: drivers/char/ip2/ +F: drivers/staging/tty/ip2/ CONEXANT ACCESSRUNNER USB DRIVER M: Simon Arlott @@ -2010,7 +2015,7 @@ F: drivers/net/wan/cycx* CYCLADES ASYNC MUX DRIVER W: http://www.cyclades.com/ S: Orphan -F: drivers/char/cyclades.c +F: drivers/tty/cyclades.c F: include/linux/cyclades.h CYCLADES PC300 DRIVER @@ -2124,8 +2129,8 @@ L: Eng.Linux@digi.com W: http://www.digi.com S: Orphan F: Documentation/serial/digiepca.txt -F: drivers/char/epca* -F: drivers/char/digi* +F: drivers/staging/tty/epca* +F: drivers/staging/tty/digi* DIOLAN U2C-12 I2C DRIVER M: Guenter Roeck @@ -4077,7 +4082,7 @@ F: drivers/video/matrox/matroxfb_* F: include/linux/matroxfb.h MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER -M: "Hans J. Koch" +M: "Hans J. Koch" L: lm-sensors@lm-sensors.org S: Maintained F: Documentation/hwmon/max6650 @@ -4192,7 +4197,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD M: Jiri Slaby S: Maintained F: Documentation/serial/moxa-smartio -F: drivers/char/mxser.* +F: drivers/tty/mxser.* MSI LAPTOP SUPPORT M: "Lee, Chun-Yi" @@ -4234,7 +4239,7 @@ F: sound/oss/msnd* MULTITECH MULTIPORT CARD (ISICOM) S: Orphan -F: drivers/char/isicom.c +F: drivers/tty/isicom.c F: include/linux/isicom.h MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER @@ -5273,14 +5278,14 @@ F: drivers/memstick/host/r592.* RISCOM8 DRIVER S: Orphan F: Documentation/serial/riscom8.txt -F: drivers/char/riscom8* +F: drivers/staging/tty/riscom8* ROCKETPORT DRIVER P: Comtrol Corp. W: http://www.comtrol.com S: Maintained F: Documentation/serial/rocket.txt -F: drivers/char/rocket* +F: drivers/tty/rocket* ROSE NETWORK LAYER M: Ralf Baechle @@ -5916,10 +5921,9 @@ F: arch/arm/mach-spear6xx/spear600.c F: arch/arm/mach-spear6xx/spear600_evb.c SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER -M: Roger Wolff -S: Supported +S: Orphan F: Documentation/serial/specialix.txt -F: drivers/char/specialix* +F: drivers/staging/tty/specialix* SPI SUBSYSTEM M: David Brownell @@ -5964,7 +5968,6 @@ F: arch/alpha/kernel/srm_env.c STABLE BRANCH M: Greg Kroah-Hartman -M: Chris Wright L: stable@kernel.org S: Maintained @@ -6248,7 +6251,8 @@ M: Greg Ungerer W: http://www.uclinux.org/ L: uclinux-dev@uclinux.org (subscribers-only) S: Maintained -F: arch/m68knommu/ +F: arch/m68k/*/*_no.* +F: arch/m68k/include/asm/*_no.* UCLINUX FOR RENESAS H8/300 (H8300) M: Yoshinori Sato @@ -6618,7 +6622,7 @@ F: fs/hostfs/ F: fs/hppfs/ USERSPACE I/O (UIO) -M: "Hans J. Koch" +M: "Hans J. Koch" M: Greg Kroah-Hartman S: Maintained F: Documentation/DocBook/uio-howto.tmpl diff --git a/Makefile b/Makefile index 322e7334ccb9..b967b967572b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 39 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 9bb7b858ed23..7a6d908bb865 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o vmlinux.lds asflags-y := $(KBUILD_CFLAGS) -ccflags-y := -Werror -Wno-sign-compare +ccflags-y := -Wno-sign-compare obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \ diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c index 381fec0af52e..da7bcc372f16 100644 --- a/arch/alpha/kernel/core_mcpcia.c +++ b/arch/alpha/kernel/core_mcpcia.c @@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1, { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); - unsigned int stat0, value, temp, cpu; + unsigned int stat0, value, cpu; cpu = smp_processor_id(); @@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1, stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); - temp = *(vuip)MCPCIA_CAP_ERR(mid); + *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); mb(); @@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); - unsigned int stat0, temp, cpu; + unsigned int stat0, cpu; cpu = smp_processor_id(); @@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); - temp = *(vuip)MCPCIA_CAP_ERR(mid); + *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); draina(); @@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, *((vuip)addr) = value; mb(); mb(); /* magic */ - temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ + *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ mcheck_expected(cpu) = 0; mb(); @@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr) void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) { - struct el_common *mchk_header; struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; unsigned int cpu = smp_processor_id(); int expected; - mchk_header = (struct el_common *)la_ptr; mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; expected = mcheck_expected(cpu); diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index c3b3781a03de..14b26c466c89 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c @@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = { static struct el_subpacket * el_process_regatta_subpacket(struct el_subpacket *header) { - int status; - if (header->class != EL_CLASS__REGATTA_FAMILY) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, @@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header) printk("%s ** Occurred on CPU %d:\n", err_print_prefix, (int)header->by_type.regatta_frame.cpuid); - status = privateer_process_logout_frame((struct el_common *) + privateer_process_logout_frame((struct el_common *) header->by_type.regatta_frame.data_start, 1); break; default: diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 1479dc6ebd97..51b7fbd9e4c1 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -228,7 +228,7 @@ struct irqaction timer_irqaction = { void __init init_rtc_irq(void) { - irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, + irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, handle_simple_irq, "RTC"); setup_irq(RTC_IRQ, &timer_irqaction); } diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index d2634e4476b4..edbddcbd5bc6 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type) case PCA56_CPU: case PCA57_CPU: { - unsigned long cbox_config, size; - if (cpu_type == PCA56_CPU) { L1I = CSHAPE(16*1024, 6, 1); L1D = CSHAPE(8*1024, 5, 1); @@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type) } L3 = -1; +#if 0 + unsigned long cbox_config, size; + cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); -#if 0 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); #else L2 = external_cache_probe(512*1024, 6); diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c index 3e6a2893af9f..6886b834f487 100644 --- a/arch/alpha/kernel/smc37c93x.c +++ b/arch/alpha/kernel/smc37c93x.c @@ -79,7 +79,6 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr) { unsigned char devId; - unsigned char devRev; unsigned long configPort; unsigned long indexPort; @@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr) devId = inb(dataPort); if (devId == VALID_DEVICE_ID) { outb(DEVICE_REV, indexPort); - devRev = inb(dataPort); + /* unsigned char devRev = */ inb(dataPort); break; } else diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index d3cb28bb8eb0..d92cdc715c65 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c @@ -156,7 +156,6 @@ static void __init wildfire_init_irq_per_pca(int qbbno, int pcano) { int i, irq_bias; - unsigned long io_bias; static struct irqaction isa_enable = { .handler = no_action, .name = "isa_enable", @@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) + pcano * WILDFIRE_IRQ_PER_PCA; +#if 0 + unsigned long io_bias; + /* Only need the following for first PCI bus per PCA. */ io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; -#if 0 outb(0, DMA1_RESET_REG + io_bias); outb(0, DMA2_RESET_REG + io_bias); outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index a58e84f1a63b..918e8e0b72ff 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts) year += 100; ts->tv_sec = mktime(year, mon, day, hour, min, sec); + ts->tv_nsec = 0; } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fdc9d4dbf85b..377a7a595b08 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1540,7 +1540,6 @@ config HIGHMEM config HIGHPTE bool "Allocate 2nd-level pagetables from highmem" depends on HIGHMEM - depends on !OUTER_CACHE config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" @@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig" config ARCH_SUSPEND_POSSIBLE depends on !ARCH_S5P64X0 && !ARCH_S5P6442 + depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \ + CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE def_bool y endmenu diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 494224a9b459..03d01d783e3b 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -63,17 +63,6 @@ config DEBUG_USER 8 - SIGSEGV faults 16 - SIGBUS faults -config DEBUG_ERRORS - bool "Verbose kernel error messages" - depends on DEBUG_KERNEL - help - This option controls verbose debugging information which can be - printed when the kernel detects an internal error. This debugging - information is useful to kernel hackers when tracking down problems, - but mostly meaningless to other people. It's safe to say Y unless - you are concerned with the code size or don't want to see these - messages. - config DEBUG_STACK_USAGE bool "Enable stack utilization instrumentation" depends on DEBUG_KERNEL diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index e7521bca2c35..6ea9b6f3607a 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o obj-$(CONFIG_ARCH_IXP2000) += uengine.o obj-$(CONFIG_ARCH_IXP23XX) += uengine.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o -obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h index c4391ba20350..1dc980675894 100644 --- a/arch/arm/include/asm/thread_notify.h +++ b/arch/arm/include/asm/thread_notify.h @@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread) #define THREAD_NOTIFY_FLUSH 0 #define THREAD_NOTIFY_EXIT 1 #define THREAD_NOTIFY_SWITCH 2 +#define THREAD_NOTIFY_COPY 3 #endif #endif diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 74554f1742d7..8d95446150a3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o -obj-$(CONFIG_PM) += sleep.o +obj-$(CONFIG_PM_SLEEP) += sleep.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index d4a0da1e48f4..9b05c6a0dcea 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch); void elf_set_personality(const struct elf32_hdr *x) { unsigned int eflags = x->e_flags; - unsigned int personality = PER_LINUX_32BIT; + unsigned int personality = current->personality & ~PER_MASK; + + /* + * We only support Linux ELF executables, so always set the + * personality to LINUX. + */ + personality |= PER_LINUX; /* * APCS-26 is only valid for OABI executables */ - if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { - if (eflags & EF_ARM_APCS_26) - personality = PER_LINUX; - } + if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN && + (eflags & EF_ARM_APCS_26)) + personality &= ~ADDR_LIMIT_32BIT; + else + personality |= ADDR_LIMIT_32BIT; set_personality(personality); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 8dbc126f7152..87acc25d7a3e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info) */ asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); isb(); + + /* + * Clear any configured vector-catch events before + * enabling monitor mode. + */ + asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); + isb(); } if (enable_monitor_mode()) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 69cfee0fe00f..979da3947f42 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -221,7 +221,7 @@ again: prev_raw_count &= armpmu->max_period; if (overflow) - delta = armpmu->max_period - prev_raw_count + new_raw_count; + delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; else delta = new_raw_count - prev_raw_count; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 94bbedbed639..5e1e54197227 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, if (clone_flags & CLONE_SETTLS) thread->tp_value = regs->ARM_r3; + thread_notify(THREAD_NOTIFY_COPY, thread); + return 0; } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f0000e188c8c..3b54ad19d489 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs) struct thread_info *thread = current_thread_info(); siginfo_t info; - if (current->personality != PER_LINUX && - current->personality != PER_LINUX_32BIT && + if ((current->personality & PER_MASK) != PER_LINUX && thread->exec_domain->handler) { thread->exec_domain->handler(n, regs); return regs->ARM_r0; diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h index ee8b02ed8011..7bfb827f3fe3 100644 --- a/arch/arm/mach-mmp/include/mach/gpio.h +++ b/arch/arm/mach-mmp/include/mach/gpio.h @@ -10,7 +10,7 @@ #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) #define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x)))) -#define NR_BUILTIN_GPIO (192) +#define NR_BUILTIN_GPIO IRQ_GPIO_NUM #define gpio_to_bank(gpio) ((gpio) >> 5) #define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio)) diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h index 4621067c7720..713be155a44d 100644 --- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h +++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h @@ -8,6 +8,15 @@ #define MFP_DRIVE_MEDIUM (0x2 << 13) #define MFP_DRIVE_FAST (0x3 << 13) +#undef MFP_CFG +#undef MFP_CFG_DRV + +#define MFP_CFG(pin, af) \ + (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM) + +#define MFP_CFG_DRV(pin, af, drv) \ + (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv) + /* GPIO */ #define GPIO0_GPIO MFP_CFG(GPIO0, AF5) #define GPIO1_GPIO MFP_CFG(GPIO1, AF5) diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 7f568611547e..6a96911b0ad5 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c @@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = { static void __init qsd8x50_init_mmc(void) { - if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) - vreg_mmc = vreg_get(NULL, "gp6"); - else - vreg_mmc = vreg_get(NULL, "gp5"); + vreg_mmc = vreg_get(NULL, "gp5"); if (IS_ERR(vreg_mmc)) { pr_err("vreg get for vreg_mmc failed (%ld)\n", diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 56f920c55b6a..38b95e949d13 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt) /* Use existing clock_event for cpu 0 */ if (!smp_processor_id()) - return; + return 0; writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); diff --git a/arch/arm/mach-pxa/include/mach/gpio.h b/arch/arm/mach-pxa/include/mach/gpio.h index b024a8b37439..c4639502efca 100644 --- a/arch/arm/mach-pxa/include/mach/gpio.h +++ b/arch/arm/mach-pxa/include/mach/gpio.h @@ -99,11 +99,24 @@ #define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2)) -#define NR_BUILTIN_GPIO 128 +#define NR_BUILTIN_GPIO PXA_GPIO_IRQ_NUM #define gpio_to_bank(gpio) ((gpio) >> 5) #define gpio_to_irq(gpio) IRQ_GPIO(gpio) -#define irq_to_gpio(irq) IRQ_TO_GPIO(irq) + +static inline int irq_to_gpio(unsigned int irq) +{ + int gpio; + + if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1) + return irq - IRQ_GPIO0; + + gpio = irq - PXA_GPIO_IRQ_BASE; + if (gpio >= 2 && gpio < NR_BUILTIN_GPIO) + return gpio; + + return -1; +} #ifdef CONFIG_CPU_PXA26x /* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted, diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h index a4285fc00878..038402404e39 100644 --- a/arch/arm/mach-pxa/include/mach/irqs.h +++ b/arch/arm/mach-pxa/include/mach/irqs.h @@ -93,9 +93,6 @@ #define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x)) #define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x)) -#define IRQ_TO_GPIO_2_x(i) ((i) - PXA_GPIO_IRQ_BASE) -#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i)) - /* * The following interrupts are for board specific purposes. Since * the kernel can only run on one machine at a time, we can re-use diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 6bde5956358d..a4af8c52d7ee 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {} static int pxa25x_set_wake(struct irq_data *d, unsigned int on) { - int gpio = IRQ_TO_GPIO(d->irq); + int gpio = irq_to_gpio(d->irq); uint32_t mask = 0; if (gpio >= 0 && gpio < 85) diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 1cb5d0f9723f..909756eaf4b7 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {} */ static int pxa27x_set_wake(struct irq_data *d, unsigned int on) { - int gpio = IRQ_TO_GPIO(d->irq); + int gpio = irq_to_gpio(d->irq); uint32_t mask; if (gpio >= 0 && gpio < 128) diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c index 76a3f654220f..65a1aba6823d 100644 --- a/arch/arm/mach-tegra/gpio.c +++ b/arch/arm/mach-tegra/gpio.c @@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) void tegra_gpio_resume(void) { unsigned long flags; - int b, p, i; + int b; + int p; local_irq_save(flags); @@ -280,7 +281,8 @@ void tegra_gpio_resume(void) void tegra_gpio_suspend(void) { unsigned long flags; - int b, p, i; + int b; + int p; local_irq_save(flags); for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c index 6d7c4eea4dcb..4459470c052d 100644 --- a/arch/arm/mach-tegra/tegra2_clocks.c +++ b/arch/arm/mach-tegra/tegra2_clocks.c @@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) { unsigned long flags; int ret; + long new_rate = rate; - rate = clk_round_rate(c->parent, rate); - if (rate < 0) - return rate; + new_rate = clk_round_rate(c->parent, new_rate); + if (new_rate < 0) + return new_rate; spin_lock_irqsave(&c->parent->spinlock, flags); - c->u.shared_bus_user.rate = rate; + c->u.shared_bus_user.rate = new_rate; ret = tegra_clk_shared_bus_update(c->parent); spin_unlock_irqrestore(&c->parent->spinlock, flags); diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index afe209e1e1f8..74be05f3e03a 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->cached_hole_size = 0; } /* 8 bits of randomness in 20 address space bits */ - if (current->flags & PF_RANDOMIZE) + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index b46eb21f05c7..bf8a1d1cccb6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 6a4bdb2c94a7..0ed85d930c09 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 74483d1977fe..184a9c997e36 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4*4 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bfa0c9f611c5..7c99cb4c8e4f 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index c35618e42f6f..babfba09c89f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -211,7 +211,7 @@ cpu_v7_name: /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size .equ cpu_v7_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 63d8b2044e84..596213699f37 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 086038cd86ab..ce233bcbf506 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 7 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c index d592b6304b48..d15dc47b0e3d 100644 --- a/arch/arm/plat-s5p/pm.c +++ b/arch/arm/plat-s5p/pm.c @@ -19,17 +19,6 @@ #define PFX "s5p pm: " -/* s3c_pm_check_resume_pin - * - * check to see if the pin is configured correctly for sleep mode, and - * make any necessary adjustments if it is not -*/ - -static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs) -{ - /* nothing here yet */ -} - /* s3c_pm_configure_extint * * configure all external interrupt pins diff --git a/arch/arm/plat-samsung/pm-check.c b/arch/arm/plat-samsung/pm-check.c index e4baf76f374a..6b733fafe7cd 100644 --- a/arch/arm/plat-samsung/pm-check.c +++ b/arch/arm/plat-samsung/pm-check.c @@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz) */ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) { - void *save_at = phys_to_virt(s3c_sleep_save_phys); unsigned long addr; unsigned long left; void *stkpage; @@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) goto skip_check; } - if (in_region(ptr, left, save_at, 32*4 )) { - S3C_PMDBG("skipping %08lx, has save block in\n", addr); - goto skip_check; - } - /* calculate and check the checksum */ calc = crc32_le(~0, ptr, left); diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index d5b58d31903c..5c0a440d6e16 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count) * * print any IRQs asserted at resume time (ie, we woke from) */ -static void s3c_pm_show_resume_irqs(int start, unsigned long which, - unsigned long mask) +static void __maybe_unused s3c_pm_show_resume_irqs(int start, + unsigned long which, + unsigned long mask) { int i; diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index bbf3da012afd..f74695075e64 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread) put_cpu(); } +static void vfp_thread_copy(struct thread_info *thread) +{ + struct thread_info *parent = current_thread_info(); + + vfp_sync_hwstate(parent); + thread->vfpstate = parent->vfpstate; +} + /* * When this function is called with the following 'cmd's, the following * is true while this function is being run: @@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread) static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) { struct thread_info *thread = v; + u32 fpexc; +#ifdef CONFIG_SMP + unsigned int cpu; +#endif - if (likely(cmd == THREAD_NOTIFY_SWITCH)) { - u32 fpexc = fmrx(FPEXC); + switch (cmd) { + case THREAD_NOTIFY_SWITCH: + fpexc = fmrx(FPEXC); #ifdef CONFIG_SMP - unsigned int cpu = thread->cpu; + cpu = thread->cpu; /* * On SMP, if VFP is enabled, save the old state in @@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * old state. */ fmxr(FPEXC, fpexc & ~FPEXC_EN); - return NOTIFY_DONE; - } + break; - if (cmd == THREAD_NOTIFY_FLUSH) + case THREAD_NOTIFY_FLUSH: vfp_thread_flush(thread); - else + break; + + case THREAD_NOTIFY_EXIT: vfp_thread_exit(thread); + break; + + case THREAD_NOTIFY_COPY: + vfp_thread_copy(thread); + break; + } return NOTIFY_DONE; } diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h index ff5b7cf6be4d..160543dbec7e 100644 --- a/arch/avr32/include/asm/setup.h +++ b/arch/avr32/include/asm/setup.h @@ -94,6 +94,13 @@ struct tag_ethernet { #define ETH_INVALID_PHY 0xff +/* board information */ +#define ATAG_BOARDINFO 0x54410008 + +struct tag_boardinfo { + u32 board_number; +}; + struct tag { struct tag_header hdr; union { @@ -102,6 +109,7 @@ struct tag { struct tag_cmdline cmdline; struct tag_clock clock; struct tag_ethernet ethernet; + struct tag_boardinfo boardinfo; } u; }; @@ -128,6 +136,7 @@ extern struct tag *bootloader_tags; extern resource_size_t fbmem_start; extern resource_size_t fbmem_size; +extern u32 board_number; void setup_processor(void); diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index 5c7083916c33..bb0974cce4ac 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c @@ -390,6 +390,21 @@ static int __init parse_tag_clock(struct tag *tag) } __tagtable(ATAG_CLOCK, parse_tag_clock); +/* + * The board_number correspond to the bd->bi_board_number in U-Boot. This + * parameter is only available during initialisation and can be used in some + * kind of board identification. + */ +u32 __initdata board_number; + +static int __init parse_tag_boardinfo(struct tag *tag) +{ + board_number = tag->u.boardinfo.board_number; + + return 0; +} +__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo); + /* * Scan the tag table for this tag, and call its parse function. The * tag table is built by the linker from all the __tagtable diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index b91b2044af9c..7aa25756412f 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c @@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code, info.si_code = code; info.si_addr = (void __user *)addr; force_sig_info(signr, &info, current); - - /* - * Init gets no signals that it doesn't have a handler for. - * That's all very well, but if it has caused a synchronous - * exception and we ignore the resulting signal, it will just - * generate the same exception over and over again and we get - * nowhere. Better to kill it and let the kernel panic. - */ - if (is_global_init(current)) { - __sighandler_t handler; - - spin_lock_irq(¤t->sighand->siglock); - handler = current->sighand->action[signr-1].sa.sa_handler; - spin_unlock_irq(¤t->sighand->siglock); - if (handler == SIG_DFL) { - /* init has generated a synchronous exception - and it doesn't have a handler for the signal */ - printk(KERN_CRIT "init has generated signal %ld " - "but has no handler for it\n", signr); - do_exit(signr); - } - } } asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c index 442f08c5e641..86925fd6ea5b 100644 --- a/arch/avr32/mach-at32ap/clock.c +++ b/arch/avr32/mach-at32ap/clock.c @@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk) spin_unlock(&clk_list_lock); } +static struct clk *__clk_get(struct device *dev, const char *id) +{ + struct clk *clk; + + list_for_each_entry(clk, &at32_clock_list, list) { + if (clk->dev == dev && strcmp(id, clk->name) == 0) { + return clk; + } + } + + return ERR_PTR(-ENOENT); +} + struct clk *clk_get(struct device *dev, const char *id) { struct clk *clk; spin_lock(&clk_list_lock); - - list_for_each_entry(clk, &at32_clock_list, list) { - if (clk->dev == dev && strcmp(id, clk->name) == 0) { - spin_unlock(&clk_list_lock); - return clk; - } - } - + clk = __clk_get(dev, id); spin_unlock(&clk_list_lock); - return ERR_PTR(-ENOENT); + + return clk; } + EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) @@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused) spin_lock(&clk_list_lock); /* show clock tree as derived from the three oscillators */ - clk = clk_get(NULL, "osc32k"); + clk = __clk_get(NULL, "osc32k"); dump_clock(clk, &r); clk_put(clk); - clk = clk_get(NULL, "osc0"); + clk = __clk_get(NULL, "osc0"); dump_clock(clk, &r); clk_put(clk); - clk = clk_get(NULL, "osc1"); + clk = __clk_get(NULL, "osc1"); dump_clock(clk, &r); clk_put(clk); diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index 47ba4b9b6db1..fbc2aeaebddb 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c @@ -61,34 +61,34 @@ struct eic { static struct eic *nmi_eic; static bool nmi_enabled; -static void eic_ack_irq(struct irq_chip *d) +static void eic_ack_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); } -static void eic_mask_irq(struct irq_chip *d) +static void eic_mask_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); } -static void eic_mask_ack_irq(struct irq_chip *d) +static void eic_mask_ack_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); } -static void eic_unmask_irq(struct irq_chip *d) +static void eic_unmask_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); } -static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type) +static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); unsigned int irq = d->irq; unsigned int i = irq - eic->first_irq; u32 mode, edge, level; @@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); int_irq = platform_get_irq(pdev, 0); - if (!regs || !int_irq) { + if (!regs || (int)int_irq <= 0) { dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); return -ENXIO; } diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index f308e1ddc629..2e0aa853a4bc 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c @@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d) pio_writel(pio, IDR, 1 << (gpio & 0x1f)); } -static void gpio_irq_unmask(struct irq_data *d)) +static void gpio_irq_unmask(struct irq_data *d) { unsigned gpio = irq_to_gpio(d->irq); struct pio_device *pio = &pio_dev[gpio >> 5]; diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S index 17503b0ed6c9..f868f4ce761b 100644 --- a/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S @@ -53,7 +53,7 @@ cpu_enter_idle: st.w r8[TI_flags], r9 unmask_interrupts sleep CPU_SLEEP_IDLE - .size cpu_idle_sleep, . - cpu_idle_sleep + .size cpu_enter_idle, . - cpu_enter_idle /* * Common return path for PM functions that don't run from diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index 19e2c7c3e63a..44bd0cced725 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h @@ -19,11 +19,11 @@ * Force strict CPU ordering. */ #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() __asm__ __volatile__ ("" : : : "memory") -#define wmb() __asm__ __volatile__ ("" : : : "memory") -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#define read_barrier_depends() do { } while(0) +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define smp_read_barrier_depends() read_barrier_depends() #ifdef CONFIG_SMP asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); @@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, unsigned long new, unsigned long old); #ifdef __ARCH_SYNC_CORE_DCACHE -# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) -# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) -# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) -#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) - +/* Force Core data cache coherence */ +# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) +# define rmb() do { barrier(); smp_check_barrier(); } while (0) +# define wmb() do { barrier(); smp_mark_barrier(); } while (0) +# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) #else -# define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -#define smp_read_barrier_depends() barrier() +# define mb() barrier() +# define rmb() barrier() +# define wmb() barrier() +# define read_barrier_depends() do { } while (0) #endif static inline unsigned long __xchg(unsigned long x, volatile void *ptr, @@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #else /* !CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define read_barrier_depends() do { } while (0) struct __xchg_dummy { unsigned long a[100]; diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index cdbe075de1dc..8b81dc04488a 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c @@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask) _disable_gptimers(mask); for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) if (mask & (1 << i)) - group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; + group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i]; SSYNC(); } EXPORT_SYMBOL(disable_gptimers); diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 8c9a43daf80f..cdb4beb6bc8f 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; smp_mb(); - evt->event_handler(evt); + /* + * We want to ACK before we handle so that we can handle smaller timer + * intervals. This way if the timer expires again while we're handling + * things, we're more likely to see that 2nd int rather than swallowing + * it by ACKing the int at the end of this handler. + */ bfin_gptmr0_ack(); + evt->event_handler(evt); return IRQ_HANDLED; } diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 6e17a265c4d3..8bce5ed031e4 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info) struct blackfin_flush_data *fdata = info; /* Invalidate the memory holding the bounds of the flushed region. */ - invalidate_dcache_range((unsigned long)fdata, - (unsigned long)fdata + sizeof(*fdata)); + blackfin_dcache_invalidate_range((unsigned long)fdata, + (unsigned long)fdata + sizeof(*fdata)); - flush_icache_range(fdata->start, fdata->end); + /* Make sure all write buffers in the data side of the core + * are flushed before trying to invalidate the icache. This + * needs to be after the data flush and before the icache + * flush so that the SSYNC does the right thing in preventing + * the instruction prefetcher from hitting things in cached + * memory at the wrong time -- it runs much further ahead than + * the pipeline. + */ + SSYNC(); + + /* ipi_flaush_icache is invoked by generic flush_icache_range, + * so call blackfin arch icache flush directly here. + */ + blackfin_icache_flush_range(fdata->start, fdata->end); } static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 26d851d385bb..29e17907d9f2 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -343,10 +343,14 @@ #define __NR_fanotify_init 337 #define __NR_fanotify_mark 338 #define __NR_prlimit64 339 +#define __NR_name_to_handle_at 340 +#define __NR_open_by_handle_at 341 +#define __NR_clock_adjtime 342 +#define __NR_syncfs 343 #ifdef __KERNEL__ -#define NR_syscalls 340 +#define NR_syscalls 344 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S index 1559dea36e55..1359ee659574 100644 --- a/arch/m68k/kernel/entry_mm.S +++ b/arch/m68k/kernel/entry_mm.S @@ -750,4 +750,8 @@ sys_call_table: .long sys_fanotify_init .long sys_fanotify_mark .long sys_prlimit64 + .long sys_name_to_handle_at /* 340 */ + .long sys_open_by_handle_at + .long sys_clock_adjtime + .long sys_syncfs diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 79b1ed198c07..9b8393d8adb8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -358,6 +358,10 @@ ENTRY(sys_call_table) .long sys_fanotify_init .long sys_fanotify_mark .long sys_prlimit64 + .long sys_name_to_handle_at /* 340 */ + .long sys_open_by_handle_at + .long sys_clock_adjtime + .long sys_syncfs .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 851b3bf6e962..eccdefe70d4e 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -6,7 +6,6 @@ config MICROBLAZE select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD - select USB_ARCH_HAS_EHCI select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_OPROFILE select HAVE_ARCH_KGDB diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b6ff882f695b..8f4d50b0adfa 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE config ARCH_SUSPEND_POSSIBLE def_bool y depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ - PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x + (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x config PPC_DCR_NATIVE bool diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index be3cdf9134ce..1833d1a07e79 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -382,10 +382,12 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL) +#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ @@ -435,11 +437,15 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500) +#else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) +#endif #else enum { CPU_FTRS_POSSIBLE = @@ -473,16 +479,21 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | + CPU_FTRS_E5500 | #endif 0, }; #endif /* __powerpc64__ */ #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500) +#else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) +#endif #else enum { CPU_FTRS_ALWAYS = @@ -513,6 +524,7 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & + CPU_FTRS_E5500 & #endif CPU_FTRS_POSSIBLE, }; diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 811f04ac3660..8d1569c29042 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); * on platforms where such control is possible. */ #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) #define PAGE_KERNEL_TEXT PAGE_KERNEL_X #else #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c9b68d07ac4f..b9602ee06deb 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, .cpu_name = "e5500", - .cpu_features = CPU_FTRS_E500MC, + .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d3d416339dd..5b5e1f002a8e 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu) } /* wait for all the CPUs to hit real mode but timeout if they don't come in */ -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) +#ifdef CONFIG_PPC_STD_MMU_64 static void crash_kexec_wait_realmode(int cpu) { unsigned int msecs; @@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu) } mb(); } -#else -static inline void crash_kexec_wait_realmode(int cpu) {} -#endif +#endif /* CONFIG_PPC_STD_MMU_64 */ /* * This function will be called by secondary cpus or by kexec cpu @@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs) crash_ipi_callback(regs); } -#else +#else /* ! CONFIG_SMP */ +static inline void crash_kexec_wait_realmode(int cpu) {} + static void crash_kexec_prepare_cpus(int cpu) { /* @@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs) { cpus_in_sr = CPU_MASK_NONE; } -#endif +#endif /* CONFIG_SMP */ /* * Register a function to be called on shutdown. Only use this if you diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index c00d4ca1ee15..28581f1ad2c0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int ibmebus_bus_pm_freeze(struct device *dev) { @@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define ibmebus_bus_pm_freeze NULL #define ibmebus_bus_pm_thaw NULL @@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) #define ibmebus_bus_pm_poweroff_noirq NULL #define ibmebus_bus_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { .prepare = ibmebus_bus_pm_prepare, diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c834757bebc0..2b97b80d6d7d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) if (!parent) continue; if (of_match_node(legacy_serial_parents, parent) != NULL) { - index = add_legacy_soc_port(np, np); - if (index >= 0 && np == stdout) - legacy_serial_console = index; + if (of_device_is_available(np)) { + index = add_legacy_soc_port(np, np); + if (index >= 0 && np == stdout) + legacy_serial_console = index; + } } of_node_put(parent); } diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c4063b7f49a0..822f63008ae1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], return 0; } +static u64 check_and_compute_delta(u64 prev, u64 val) +{ + u64 delta = (val - prev) & 0xfffffffful; + + /* + * POWER7 can roll back counter values, if the new value is smaller + * than the previous value it will cause the delta and the counter to + * have bogus values unless we rolled a counter over. If a coutner is + * rolled back, it will be smaller, but within 256, which is the maximum + * number of events to rollback at once. If we dectect a rollback + * return 0. This can lead to a small lack of precision in the + * counters. + */ + if (prev > val && (prev - val) < 256) + delta = 0; + + return delta; +} + static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; @@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); + delta = check_and_compute_delta(prev, val); + if (!delta) + return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The counters are only 32 bits wide */ - delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } @@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; - delta = (val - prev) & 0xfffffffful; - local64_add(delta, &event->count); + delta = check_and_compute_delta(prev, val); + if (delta) + local64_add(delta, &event->count); } } @@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; - u64 val; + u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; - local64_set(&event->hw.prev_count, val); + prev = local64_read(&event->hw.prev_count); + if (check_and_compute_delta(prev, val)) + local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } @@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); - delta = (val - prev) & 0xfffffffful; + delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 375480c56eb9..f33acfd872ad 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) u64 stolen = 0; u64 dtb; + if (!dtl) + return 0; + if (i == vpa->dtl_idx) return 0; while (i < vpa->dtl_idx) { diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index a830c5e80657..bc5f0dc6ae1e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) mpic_setup_this_cpu(); } +#ifdef CONFIG_PPC64 #ifdef CONFIG_HOTPLUG_CPU static int smp_core99_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { static void __init smp_core99_bringup_done(void) { -#ifdef CONFIG_PPC64 extern void g5_phy_disable_cpu1(void); /* Close i2c bus if it was used for tb sync */ @@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void) set_cpu_present(1, false); g5_phy_disable_cpu1(); } -#endif /* CONFIG_PPC64 */ - #ifdef CONFIG_HOTPLUG_CPU register_cpu_notifier(&smp_core99_cpu_nb); #endif + if (ppc_md.progress) ppc_md.progress("smp_core99_bringup_done", 0x349); } +#endif /* CONFIG_PPC64 */ #ifdef CONFIG_HOTPLUG_CPU @@ -975,7 +975,9 @@ static void pmac_cpu_die(void) struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, +#ifdef CONFIG_PPC64 .bringup_done = smp_core99_bringup_done, +#endif .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 000724149089..6c42cfde8415 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void) int cpu, ret; struct paca_struct *pp; struct dtl_entry *dtl; + struct kmem_cache *dtl_cache; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return 0; + dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, NULL); + if (!dtl_cache) { + pr_warn("Failed to create dispatch trace log buffer cache\n"); + pr_warn("Stolen time statistics will be unreliable\n"); + return 0; + } + for_each_possible_cpu(cpu) { pp = &paca[cpu]; - dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, - cpu_to_node(cpu)); + dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); if (!dtl) { pr_warn("Failed to allocate dispatch trace log for cpu %d\n", cpu); diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index f8f7f28c6343..68ca9290df94 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) struct resource rsrc; const int *bus_range; + if (!of_device_is_available(dev)) { + pr_warning("%s: disabled\n", dev->full_name); + return -ENODEV; + } + pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 14232d57369c..49798532b477 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev) port->ops = ops; port->priv = priv; port->phys_efptr = 0x100; - rio_register_mport(port); priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); rio_regs_win = priv->regs_win; @@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev) dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", port->sys_size ? 65536 : 256); + if (rio_register_mport(port)) + goto err; + if (port->host_deviceid >= 0) out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index 02fb017fed47..a9da516a5274 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 @@ -4,6 +4,10 @@ menu "UML-specific options" menu "Host processor type and features" +config CMPXCHG_LOCAL + bool + default n + source "arch/x86/Kconfig.cpu" endmenu diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h new file mode 100644 index 000000000000..9e33b864c359 --- /dev/null +++ b/arch/um/include/asm/bug.h @@ -0,0 +1,6 @@ +#ifndef __UM_BUG_H +#define __UM_BUG_H + +#include + +#endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index fd5a1f365c95..3cce71413d0b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -96,11 +96,15 @@ #define MSR_IA32_MC0_ADDR 0x00000402 #define MSR_IA32_MC0_MISC 0x00000403 +#define MSR_AMD64_MC0_MASK 0xc0010044 + #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) +#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) + /* These are consecutive and not in the normal 4er MCE bank block */ #define MSR_IA32_MC0_CTL2 0x00000280 #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3ecece0217ef..3532d3bf8105 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) /* As a rule processors have APIC timer running in deep C states */ if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) set_cpu_cap(c, X86_FEATURE_ARAT); + + /* + * Disable GART TLB Walk Errors on Fam10h. We do this here + * because this is always needed when GART is enabled, even in a + * kernel which has no MCE support built in. + */ + if (c->x86 == 0x10) { + /* + * BIOS should disable GartTlbWlk Errors themself. If + * it doesn't do it here as suggested by the BKDG. + * + * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 + */ + u64 mask; + + rdmsrl(MSR_AMD64_MCx_MASK(4), mask); + mask |= (1 << 10); + wrmsrl(MSR_AMD64_MCx_MASK(4), mask); + } } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2871d3c71b6..8ed8908cc9f7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id) identify_secondary_cpu(c); } +static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2) +{ + int node1 = early_cpu_to_node(cpu1); + int node2 = early_cpu_to_node(cpu2); + + /* + * Our CPU scheduler assumes all logical cpus in the same physical cpu + * share the same node. But, buggy ACPI or NUMA emulation might assign + * them to different node. Fix it. + */ + if (node1 != node2) { + pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n", + cpu1, node1, cpu2, node2, node2); + + numa_remove_cpu(cpu1); + numa_set_node(cpu1, node2); + numa_add_cpu(cpu1); + } +} + static void __cpuinit link_thread_siblings(int cpu1, int cpu2) { cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); @@ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2) cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); + check_cpu_siblings_on_same_node(cpu1, cpu2); } @@ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu) per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); + check_cpu_siblings_on_same_node(cpu, i); } if (c->phys_proc_id == cpu_data(i).phys_proc_id) { cpumask_set_cpu(i, cpu_core_mask(cpu)); cpumask_set_cpu(cpu, cpu_core_mask(i)); + check_cpu_siblings_on_same_node(cpu, i); /* * Does this new cpu bringup a new core? */ diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts index dc701ea58546..2d6d226f2b10 100644 --- a/arch/x86/platform/ce4100/falconfalls.dts +++ b/arch/x86/platform/ce4100/falconfalls.dts @@ -74,6 +74,7 @@ compatible = "intel,ce4100-pci", "pci"; device_type = "pci"; bus-range = <1 1>; + reg = <0x0800 0x0 0x0 0x0 0x0>; ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; interrupt-parent = <&ioapic2>; @@ -412,6 +413,7 @@ #address-cells = <2>; #size-cells = <1>; compatible = "isa"; + reg = <0xf800 0x0 0x0 0x0 0x0>; ranges = <1 0 0 0 0 0x100>; rtc@70 { diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 5c0207bf959b..275dbc19e2cf 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table) pentry->freq_hz, pentry->irq); if (!pentry->irq) continue; - mp_irq.type = MP_IOAPIC; + mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ mp_irq.irqflag = 5; - mp_irq.srcbus = 0; + mp_irq.srcbus = MP_BUS_ISA; mp_irq.srcbusirq = pentry->irq; /* IRQ */ mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; @@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", totallen, (u32)pentry->phys_addr, pentry->irq); - mp_irq.type = MP_IOAPIC; + mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = 0xf; /* level trigger and active low */ - mp_irq.srcbus = 0; + mp_irq.srcbus = MP_BUS_ISA; mp_irq.srcbusirq = pentry->irq; /* IRQ */ mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; @@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void) /* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; - + set_bit(MP_BUS_ISA, mp_bus_not_pci); } /* diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 1c7121ba18ff..5cc821cb2e09 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY config XEN_SAVE_RESTORE bool depends on XEN + select HIBERNATE_CALLBACKS default y config XEN_DEBUG_FS diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 49dbd78ec3cb..e3c6a06cf725 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, static __init void xen_init_cpuid_mask(void) { unsigned int ax, bx, cx, dx; + unsigned int xsave_mask; cpuid_leaf1_edx_mask = ~((1 << X86_FEATURE_MCE) | /* disable MCE */ @@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void) cpuid_leaf1_edx_mask &= ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ (1 << X86_FEATURE_ACPI)); /* disable ACPI */ - ax = 1; - cx = 0; xen_cpuid(&ax, &bx, &cx, &dx); - /* cpuid claims we support xsave; try enabling it to see what happens */ - if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { - unsigned long cr4; + xsave_mask = + (1 << (X86_FEATURE_XSAVE % 32)) | + (1 << (X86_FEATURE_OSXSAVE % 32)); - set_in_cr4(X86_CR4_OSXSAVE); - - cr4 = read_cr4(); - - if ((cr4 & X86_CR4_OSXSAVE) == 0) - cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); - - clear_in_cr4(X86_CR4_OSXSAVE); - } + /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ + if ((cx & xsave_mask) != xsave_mask) + cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ } static void xen_set_debugreg(int reg, unsigned long val) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c82df6c9c0f0..a991b57f91fe 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte) if (io_page && (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; - WARN(addr != other_addr, + WARN_ONCE(addr != other_addr, "0x%lx is using VM_IO, but it is 0x%lx!\n", (unsigned long)addr, (unsigned long)other_addr); } else { pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; other_addr = (_pte.pte & PTE_PFN_MASK); - WARN((addr == other_addr) && (!io_page) && (!iomap_set), + WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), "0x%lx is missing VM_IO (and wasn't fixed)!\n", (unsigned long)addr); } diff --git a/block/blk-core.c b/block/blk-core.c index 90f22cc30799..5fa3dd2705c6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg) } EXPORT_SYMBOL(blk_dump_rq_flags); -/* - * Make sure that plugs that were pending when this function was entered, - * are now complete and requests pushed to the queue. -*/ -static inline void queue_sync_plugs(struct request_queue *q) -{ - /* - * If the current process is plugged and has barriers submitted, - * we will livelock if we don't unplug first. - */ - blk_flush_plug(current); -} - static void blk_delay_work(struct work_struct *work) { struct request_queue *q; q = container_of(work, struct request_queue, delay_work.work); spin_lock_irq(q->queue_lock); - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irq(q->queue_lock); } @@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work) */ void blk_delay_queue(struct request_queue *q, unsigned long msecs) { - schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); + queue_delayed_work(kblockd_workqueue, &q->delay_work, + msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_delay_queue); @@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q) WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); - __blk_run_queue(q, false); + __blk_run_queue(q); } EXPORT_SYMBOL(blk_start_queue); @@ -298,7 +286,6 @@ void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); cancel_delayed_work_sync(&q->delay_work); - queue_sync_plugs(q); } EXPORT_SYMBOL(blk_sync_queue); @@ -310,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue); * Description: * See @blk_run_queue. This variant must be called with the queue lock * held and interrupts disabled. - * */ -void __blk_run_queue(struct request_queue *q, bool force_kblockd) +void __blk_run_queue(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; @@ -321,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd) * Only recurse once to avoid overrunning the stack, let the unplug * handling reinvoke the handler shortly if we already got there. */ - if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { + if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { q->request_fn(q); queue_flag_clear(QUEUE_FLAG_REENTER, q); } else @@ -329,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd) } EXPORT_SYMBOL(__blk_run_queue); +/** + * blk_run_queue_async - run a single device queue in workqueue context + * @q: The queue to run + * + * Description: + * Tells kblockd to perform the equivalent of @blk_run_queue on behalf + * of us. + */ +void blk_run_queue_async(struct request_queue *q) +{ + if (likely(!blk_queue_stopped(q))) + queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); +} + /** * blk_run_queue - run a single device queue * @q: The queue to run @@ -342,7 +342,7 @@ void blk_run_queue(struct request_queue *q) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); @@ -991,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, blk_queue_end_tag(q, rq); add_acct_request(q, rq, where); - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_insert_request); @@ -1311,7 +1311,15 @@ get_rq: plug = current->plug; if (plug) { - if (!plug->should_sort && !list_empty(&plug->list)) { + /* + * If this is the first request added after a plug, fire + * of a plug trace. If others have been added before, check + * if we have multiple devices in this plug. If so, make a + * note to sort the list before dispatch. + */ + if (list_empty(&plug->list)) + trace_block_plug(q); + else if (!plug->should_sort) { struct request *__rq; __rq = list_entry_rq(plug->list.prev); @@ -1327,7 +1335,7 @@ get_rq: } else { spin_lock_irq(q->queue_lock); add_acct_request(q, req, where); - __blk_run_queue(q, false); + __blk_run_queue(q); out_unlock: spin_unlock_irq(q->queue_lock); } @@ -2644,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug) plug->magic = PLUG_MAGIC; INIT_LIST_HEAD(&plug->list); + INIT_LIST_HEAD(&plug->cb_list); plug->should_sort = 0; /* @@ -2668,33 +2677,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) return !(rqa->q <= rqb->q); } -static void flush_plug_list(struct blk_plug *plug) +/* + * If 'from_schedule' is true, then postpone the dispatch of requests + * until a safe kblockd context. We due this to avoid accidental big + * additional stack usage in driver dispatch, in places where the originally + * plugger did not intend it. + */ +static void queue_unplugged(struct request_queue *q, unsigned int depth, + bool from_schedule) + __releases(q->queue_lock) +{ + trace_block_unplug(q, depth, !from_schedule); + + /* + * If we are punting this to kblockd, then we can safely drop + * the queue_lock before waking kblockd (which needs to take + * this lock). + */ + if (from_schedule) { + spin_unlock(q->queue_lock); + blk_run_queue_async(q); + } else { + __blk_run_queue(q); + spin_unlock(q->queue_lock); + } + +} + +static void flush_plug_callbacks(struct blk_plug *plug) +{ + LIST_HEAD(callbacks); + + if (list_empty(&plug->cb_list)) + return; + + list_splice_init(&plug->cb_list, &callbacks); + + while (!list_empty(&callbacks)) { + struct blk_plug_cb *cb = list_first_entry(&callbacks, + struct blk_plug_cb, + list); + list_del(&cb->list); + cb->callback(cb); + } +} + +void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; unsigned long flags; struct request *rq; + LIST_HEAD(list); + unsigned int depth; BUG_ON(plug->magic != PLUG_MAGIC); + flush_plug_callbacks(plug); if (list_empty(&plug->list)) return; - if (plug->should_sort) - list_sort(NULL, &plug->list, plug_rq_cmp); + list_splice_init(&plug->list, &list); + + if (plug->should_sort) { + list_sort(NULL, &list, plug_rq_cmp); + plug->should_sort = 0; + } q = NULL; + depth = 0; + + /* + * Save and disable interrupts here, to avoid doing it for every + * queue lock we have to take. + */ local_irq_save(flags); - while (!list_empty(&plug->list)) { - rq = list_entry_rq(plug->list.next); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); BUG_ON(!rq->q); if (rq->q != q) { - if (q) { - __blk_run_queue(q, false); - spin_unlock(q->queue_lock); - } + /* + * This drops the queue lock + */ + if (q) + queue_unplugged(q, depth, from_schedule); q = rq->q; + depth = 0; spin_lock(q->queue_lock); } rq->cmd_flags &= ~REQ_ON_PLUG; @@ -2706,39 +2775,29 @@ static void flush_plug_list(struct blk_plug *plug) __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); else __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); + + depth++; } - if (q) { - __blk_run_queue(q, false); - spin_unlock(q->queue_lock); - } + /* + * This drops the queue lock + */ + if (q) + queue_unplugged(q, depth, from_schedule); - BUG_ON(!list_empty(&plug->list)); local_irq_restore(flags); } - -static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) -{ - flush_plug_list(plug); - - if (plug == tsk->plug) - tsk->plug = NULL; -} +EXPORT_SYMBOL(blk_flush_plug_list); void blk_finish_plug(struct blk_plug *plug) { - if (plug) - __blk_finish_plug(current, plug); + blk_flush_plug_list(plug, false); + + if (plug == current->plug) + current->plug = NULL; } EXPORT_SYMBOL(blk_finish_plug); -void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) -{ - __blk_finish_plug(tsk, plug); - tsk->plug = plug; -} -EXPORT_SYMBOL(__blk_flush_plug); - int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * diff --git a/block/blk-exec.c b/block/blk-exec.c index 7482b7fa863b..81e31819a597 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, WARN_ON(irqs_disabled()); spin_lock_irq(q->queue_lock); __elv_add_request(q, rq, where); - __blk_run_queue(q, false); + __blk_run_queue(q); /* the queue is stopped so it won't be plugged+unplugged */ if (rq->cmd_type == REQ_TYPE_PM_RESUME) q->request_fn(q); diff --git a/block/blk-flush.c b/block/blk-flush.c index eba4a2790c6c..6c9b5e189e62 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error) * request_fn may confuse the driver. Always use kblockd. */ if (queued) - __blk_run_queue(q, true); + blk_run_queue_async(q); } /** @@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error) * the comment in flush_end_io(). */ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) - __blk_run_queue(q, true); + blk_run_queue_async(q); } /** diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 261c75c665ae..6d735122bc59 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk) { int ret; struct device *dev = disk_to_dev(disk); - struct request_queue *q = disk->queue; if (WARN_ON(!q)) @@ -521,7 +520,7 @@ int blk_register_queue(struct gendisk *disk) if (ret) { kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); - blk_trace_remove_sysfs(disk_to_dev(disk)); + blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); return ret; } diff --git a/block/blk.h b/block/blk.h index 61263463e38e..c9df8fc3c999 100644 --- a/block/blk.h +++ b/block/blk.h @@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); void __generic_unplug_device(struct request_queue *); +void blk_run_queue_async(struct request_queue *q); /* * Internal atomic flags for request handling diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3be881ec95ad..46b0a1d1d925 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); - __blk_run_queue(cfqd->queue, false); + __blk_run_queue(cfqd->queue); } else { cfq_blkiocg_update_idle_time_stats( &cfqq->cfqg->blkg); @@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); - __blk_run_queue(cfqd->queue, false); + __blk_run_queue(cfqd->queue); } } @@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work) struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); - __blk_run_queue(cfqd->queue, false); + __blk_run_queue(cfqd->queue); spin_unlock_irq(q->queue_lock); } diff --git a/block/elevator.c b/block/elevator.c index 0cdb4e7ebab4..6f6abc08bb56 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q) */ elv_drain_elevator(q); while (q->rq.elvpriv) { - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); @@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) * with anything. There's no point in delaying queue * processing. */ - __blk_run_queue(q, false); + __blk_run_queue(q); break; case ELEVATOR_INSERT_SORT_MERGE: diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 821040503154..7025593a58c8 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int amba_pm_freeze(struct device *dev) { @@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define amba_pm_freeze NULL #define amba_pm_thaw NULL @@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev) #define amba_pm_poweroff_noirq NULL #define amba_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM diff --git a/drivers/base/platform.c b/drivers/base/platform.c index f051cfff18af..9e0e4fc24c46 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev) of_device_node_put(&pa->pdev.dev); kfree(pa->pdev.dev.platform_data); + kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); kfree(pa); } @@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int platform_pm_freeze(struct device *dev) { @@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define platform_pm_freeze NULL #define platform_pm_thaw NULL @@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev) #define platform_pm_poweroff_noirq NULL #define platform_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM_RUNTIME diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 052dc53eef38..fbc5b6e7c591 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -233,7 +233,7 @@ static int pm_op(struct device *dev, } break; #endif /* CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: if (ops->freeze) { @@ -260,7 +260,7 @@ static int pm_op(struct device *dev, suspend_report_result(ops->restore, error); } break; -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ default: error = -EINVAL; } @@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev, } break; #endif /* CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: if (ops->freeze_noirq) { @@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev, suspend_report_result(ops->restore_noirq, error); } break; -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ default: error = -EINVAL; } diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a6feb78c404c..c58f691ec3ce 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -96,6 +96,7 @@ config DRM_I915 # i915 depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick select BACKLIGHT_CLASS_DEVICE if ACPI + select VIDEO_OUTPUT_CONTROL if ACPI select INPUT if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8314a49b6b9a..90aef64b76f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -269,7 +269,7 @@ struct init_tbl_entry { int (*handler)(struct nvbios *, uint16_t, struct init_exec *); }; -static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); +static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *); #define MACRO_INDEX_SIZE 2 #define MACRO_SIZE 8 @@ -2010,6 +2010,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) return 3; } +static int +init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_JUMP opcode: 0x5C ('\') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): offset (in bios) + * + * Continue execution of init table from 'offset' + */ + + uint16_t jmp_offset = ROM16(bios->data[offset + 1]); + + if (!iexec->execute) + return 3; + + BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset); + return jmp_offset - offset; +} + static int init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) { @@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = { { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, + { "INIT_JUMP" , 0x5C, init_jump }, { "INIT_I2C_IF" , 0x5E, init_i2c_if }, { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, @@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = { #define MAX_TABLE_OPS 1000 static int -parse_init_table(struct nvbios *bios, unsigned int offset, - struct init_exec *iexec) +parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) { /* * Parses all commands in an init table. @@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) } } + /* XFX GT-240X-YA + * + * So many things wrong here, replace the entire encoder table.. + */ + if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) { + if (idx == 0) { + *conn = 0x02001300; /* VGA, connector 1 */ + *conf = 0x00000028; + } else + if (idx == 1) { + *conn = 0x01010312; /* DVI, connector 0 */ + *conf = 0x00020030; + } else + if (idx == 2) { + *conn = 0x01010310; /* VGA, connector 0 */ + *conf = 0x00000028; + } else + if (idx == 3) { + *conn = 0x02022362; /* HDMI, connector 2 */ + *conf = 0x00020010; + } else { + *conn = 0x0000000e; /* EOL */ + *conf = 0x00000000; + } + } + return true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 57e5302503db..856d56a98d1e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1190,7 +1190,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *); extern int nv50_graph_unload_context(struct drm_device *); extern int nv50_grctx_init(struct nouveau_grctx *); extern void nv50_graph_tlb_flush(struct drm_device *dev); -extern void nv86_graph_tlb_flush(struct drm_device *dev); +extern void nv84_graph_tlb_flush(struct drm_device *dev); extern struct nouveau_enum nv50_data_error_names[]; /* nvc0_graph.c */ diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2683377f4131..78f467fe30be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev) u8 tRC; /* Byte 9 */ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; + u8 magic_number = 0; /* Yeah... sorry*/ u8 *mem = NULL, *entry; int i, recordlen, entries; @@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev) if (!memtimings->timing) return; + /* Get "some number" from the timing reg for NV_40 + * Used in calculations later */ + if(dev_priv->card_type == NV_40) { + magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; + } + entry = mem + mem[1]; for (i = 0; i < entries; i++, entry += recordlen) { struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; @@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev) /* XXX: I don't trust the -1's and +1's... they must come * from somewhere! */ - timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | + timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | tUNK_18 << 16 | - (tUNK_1 + tUNK_19 + 1) << 8 | - (tUNK_2 - 1)); + (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; + if(dev_priv->chipset == 0xa8) { + timing->reg_100224 |= (tUNK_2 - 1); + } else { + timing->reg_100224 |= (tUNK_2 + 2 - magic_number); + } timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); - if(recordlen > 19) { - timing->reg_100228 += (tUNK_19 - 1) << 24; - }/* I cannot back-up this else-statement right now - else { - timing->reg_100228 += tUNK_12 << 24; - }*/ + if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { + timing->reg_100228 |= (tUNK_19 - 1) << 24; + } - /* XXX: reg_10022c */ - timing->reg_10022c = tUNK_2 - 1; + if(dev_priv->card_type == NV_40) { + /* NV40: don't know what the rest of the regs are.. + * And don't need to know either */ + timing->reg_100228 |= 0x20200000 | magic_number << 24; + } else if(dev_priv->card_type >= NV_50) { + /* XXX: reg_10022c */ + timing->reg_10022c = tUNK_2 - 1; - timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | - tUNK_13 << 8 | tUNK_13); + timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | + tUNK_13 << 8 | tUNK_13); - /* XXX: +6? */ - timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); - timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; + timing->reg_100234 = (tRAS << 24 | tRC); + timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; - /* XXX; reg_100238, reg_10023c - * reg: 0x00?????? - * reg_10023c: - * 0 for pre-NV50 cards - * 0x????0202 for NV50+ cards (empirical evidence) */ - if(dev_priv->card_type >= NV_50) { + if(dev_priv->chipset < 0xa3) { + timing->reg_100234 |= (tUNK_2 + 2) << 8; + } else { + /* XXX: +6? */ + timing->reg_100234 |= (tUNK_19 + 6) << 8; + } + + /* XXX; reg_100238, reg_10023c + * reg_100238: 0x00?????? + * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */ timing->reg_10023c = 0x202; + if(dev_priv->chipset < 0xa3) { + timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; + } else { + /* currently unknown + * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ + } } NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, @@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev) timing->reg_100238, timing->reg_10023c); } - memtimings->nr_timing = entries; + memtimings->nr_timing = entries; memtimings->supported = true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index ac62a1b8c4fc..670e3cb697ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev) case 0x13: case 0x15: perflvl->fanspeed = entry[55]; - perflvl->voltage = entry[56]; + perflvl->voltage = (recordlen > 56) ? entry[56] : 0; perflvl->core = ROM32(entry[1]) * 10; perflvl->memory = ROM32(entry[5]) * 20; break; diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 5bb2859001e2..6e2b1a6caa2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv50_graph_destroy_context; engine->graph.load_context = nv50_graph_load_context; engine->graph.unload_context = nv50_graph_unload_context; - if (dev_priv->chipset != 0x86) + if (dev_priv->chipset == 0x50 || + dev_priv->chipset == 0xac) engine->graph.tlb_flush = nv50_graph_tlb_flush; - else { - /* from what i can see nvidia do this on every - * pre-NVA3 board except NVAC, but, we've only - * ever seen problems on NV86 - */ - engine->graph.tlb_flush = nv86_graph_tlb_flush; - } + else + engine->graph.tlb_flush = nv84_graph_tlb_flush; engine->fifo.channels = 128; engine->fifo.init = nv50_fifo_init; engine->fifo.takedown = nv50_fifo_takedown; diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index c82db37d9f41..12098bf839c4 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder) int head = nv_encoder->restore.head; if (nv_encoder->dcb->type == OUTPUT_LVDS) { - struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode; - if (native_mode) - call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON, - native_mode->clock); - else - NV_ERROR(dev, "Not restoring LVDS without native mode\n"); + struct nouveau_connector *connector = + nouveau_encoder_connector_get(nv_encoder); + + if (connector && connector->native_mode) + call_lvds_script(dev, nv_encoder->dcb, head, + LVDS_PANEL_ON, + connector->native_mode->clock); } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { int clock = nouveau_hw_pllvals_to_clk diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 2b9984027f41..a19ccaa025b3 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc) start = ptimer->read(dev); do { - nv_wr32(dev, 0x61002c, 0x370); - nv_wr32(dev, 0x000140, 1); - if (nv_ro32(disp->ntfy, 0x000)) return 0; } while (ptimer->read(dev) - start < 2000000000ULL); diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index a2cfaa691e9b..c8e83c1a4de8 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); evo->dma.max = (4096/4) - 2; + evo->dma.max &= ~7; evo->dma.put = 0; evo->dma.cur = evo->dma.put; evo->dma.free = evo->dma.max - evo->dma.cur; diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8675b00caf18..b02a5b1e7d37 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev) } void -nv86_graph_tlb_flush(struct drm_device *dev) +nv84_graph_tlb_flush(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index 69af0ba7edd3..a0a2a0277f73 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c @@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm) struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct drm_device *dev = vm->dev; struct nouveau_vm_pgd *vpgd; - u32 r100c80, engine; + u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; pinstmem->flush(vm->dev); - if (vm == dev_priv->chan_vm) - engine = 1; - else - engine = 5; - + spin_lock(&dev_priv->ramin_lock); list_for_each_entry(vpgd, &vm->pgd_list, head) { - r100c80 = nv_rd32(dev, 0x100c80); + /* looks like maybe a "free flush slots" counter, the + * faster you write to 0x100cbc to more it decreases + */ + if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) { + NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n", + nv_rd32(dev, 0x100c80), engine); + } nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); nv_wr32(dev, 0x100cbc, 0x80000000 | engine); - if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80)) - NV_ERROR(dev, "vm flush timeout eng %d\n", engine); + /* wait for flush to be queued? */ + if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) { + NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n", + nv_rd32(dev, 0x100c80), engine); + } } + spin_unlock(&dev_priv->ramin_lock); } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 258fa5e7a2d9..d71d375149f8 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -32,6 +32,7 @@ #include "atom.h" #include "atom-names.h" #include "atom-bits.h" +#include "radeon.h" #define ATOM_COND_ABOVE 0 #define ATOM_COND_ABOVEOREQUAL 1 @@ -101,7 +102,9 @@ static void debug_print_spaces(int n) static uint32_t atom_iio_execute(struct atom_context *ctx, int base, uint32_t index, uint32_t data) { + struct radeon_device *rdev = ctx->card->dev->dev_private; uint32_t temp = 0xCDCDCDCD; + while (1) switch (CU8(base)) { case ATOM_IIO_NOP: @@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, base += 3; break; case ATOM_IIO_WRITE: - (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); + if (rdev->family == CHIP_RV515) + (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b41ec59c7100..9d516a8c4dfa 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + + if ((rdev->family == CHIP_R600) || + (rdev->family == CHIP_RV610) || + (rdev->family == CHIP_RV630) || + (rdev->family == CHIP_RV670)) + pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; } else { pll->flags |= RADEON_PLL_LEGACY; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0b0cc74c08c0..3453910ee0f3 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev) struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; - if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { - if (voltage->voltage != rdev->pm.current_vddc) { - radeon_atom_set_voltage(rdev, voltage->voltage); + if (voltage->type == VOLTAGE_SW) { + if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { + radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); rdev->pm.current_vddc = voltage->voltage; - DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); + } + if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { + radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); + rdev->pm.current_vddci = voltage->vddci; + DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci); } } } @@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev) { int r; - r = radeon_dummy_page_init(rdev); - if (r) - return r; /* This don't do much */ r = radeon_gem_init(rdev); if (r) @@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev) radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; - radeon_dummy_page_fini(rdev); } static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index be271c42de4d..15d58292677a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev) if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { if (voltage->voltage != rdev->pm.current_vddc) { - radeon_atom_set_voltage(rdev, voltage->voltage); + radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); rdev->pm.current_vddc = voltage->voltage; DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); } @@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev) { int r; - r = radeon_dummy_page_init(rdev); - if (r) - return r; if (r600_debugfs_mc_info_init(rdev)) { DRM_ERROR("Failed to register debugfs file for mc !\n"); } @@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev) radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; - radeon_dummy_page_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 93f536594c73..ba643b576054 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev); void radeon_pm_resume(struct radeon_device *rdev); void radeon_combios_get_power_modes(struct radeon_device *rdev); void radeon_atombios_get_power_modes(struct radeon_device *rdev); -void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); void rs690_pm_info(struct radeon_device *rdev); extern int rv6xx_get_temp(struct radeon_device *rdev); extern int rv770_get_temp(struct radeon_device *rdev); @@ -767,7 +767,9 @@ struct radeon_voltage { u8 vddci_id; /* index into vddci voltage table */ bool vddci_enabled; /* r6xx+ sw */ - u32 voltage; + u16 voltage; + /* evergreen+ vddci */ + u16 vddci; }; /* clock mode flags */ @@ -835,10 +837,12 @@ struct radeon_pm { int default_power_state_index; u32 current_sclk; u32 current_mclk; - u32 current_vddc; + u16 current_vddc; + u16 current_vddci; u32 default_sclk; u32 default_mclk; - u32 default_vddc; + u16 default_vddc; + u16 default_vddci; struct radeon_i2c_chan *i2c_bus; /* selected pm method */ enum radeon_pm_method pm_method; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index eb888ee5f674..ca576191d058 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) rdev->mc_rreg = &rs600_mc_rreg; rdev->mc_wreg = &rs600_mc_wreg; } - if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) { + if (rdev->family >= CHIP_R600) { rdev->pciep_rreg = &r600_pciep_rreg; rdev->pciep_wreg = &r600_pciep_wreg; } diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 99768d9d91da..f5d12fb103fa 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r } } -static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) +static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, + u16 *vddc, u16 *vddci) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); u8 frev, crev; u16 data_offset; union firmware_info *firmware_info; - u16 vddc = 0; + + *vddc = 0; + *vddci = 0; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); - vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); + *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); + if ((frev == 2) && (crev >= 2)) + *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); } - - return vddc; } static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, @@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde int j; u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); u32 misc2 = le16_to_cpu(non_clock_info->usClassification); - u16 vddc = radeon_atombios_get_default_vddc(rdev); + u16 vddc, vddci; + + radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; @@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; + rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; } else { /* patch the table values with the default slck/mclk from firmware info */ for (j = 0; j < mode_index; j++) { @@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->evergreen.usVDDC); + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = + le16_to_cpu(clock_info->evergreen.usVDDCI); } else { sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); sclk |= clock_info->r600.ucEngineClockHigh << 16; @@ -2577,25 +2585,25 @@ union set_voltage { struct _SET_VOLTAGE_PARAMETERS_V2 v2; }; -void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); - u8 frev, crev, volt_index = level; + u8 frev, crev, volt_index = voltage_level; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (crev) { case 1: - args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v1.ucVoltageType = voltage_type; args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; args.v1.ucVoltageIndex = volt_index; break; case 2: - args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v2.ucVoltageType = voltage_type; args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; - args.v2.usVoltageLevel = cpu_to_le16(level); + args.v2.usVoltageLevel = cpu_to_le16(voltage_level); break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 9e59868d354e..bbcd1dd7bac0 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; else scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - seq = rdev->wb.wb[scratch_index/4]; + seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); } else seq = RREG32(rdev->fence_drv.scratch_reg); if (seq != rdev->fence_drv.last_seq) { diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index f0534ef2f331..8a955bbdb608 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev) rdev->gart.pages = NULL; rdev->gart.pages_addr = NULL; rdev->gart.ttm_alloced = NULL; + + radeon_dummy_page_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index ded2a45bc95c..ccbabf734a61 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, *val = in_buf[0]; DRM_DEBUG("val = 0x%02x\n", *val); } else { - DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", + DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", addr, *val); } } @@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, out_buf[1] = val; if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) - DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", + DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", addr, val); } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5b54268ed6b2..2f46e0c8df53 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { .disable = radeon_legacy_encoder_disable, }; -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) #define MAX_RADEON_LEVEL 0xFF diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 08de669e025a..86eda1ea94df 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -23,6 +23,7 @@ #include "drmP.h" #include "radeon.h" #include "avivod.h" +#include "atom.h" #ifdef CONFIG_ACPI #include #endif @@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev) /* set up the default clocks if the MC ucode is loaded */ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { if (rdev->pm.default_vddc) - radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, + SET_VOLTAGE_TYPE_ASIC_VDDC); + if (rdev->pm.default_vddci) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, + SET_VOLTAGE_TYPE_ASIC_VDDCI); if (rdev->pm.default_sclk) radeon_set_engine_clock(rdev, rdev->pm.default_sclk); if (rdev->pm.default_mclk) @@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev) rdev->pm.current_sclk = rdev->pm.default_sclk; rdev->pm.current_mclk = rdev->pm.default_mclk; rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; if (rdev->pm.pm_method == PM_METHOD_DYNPM && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; @@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev) /* set up the default clocks if the MC ucode is loaded */ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { if (rdev->pm.default_vddc) - radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, + SET_VOLTAGE_TYPE_ASIC_VDDC); if (rdev->pm.default_sclk) radeon_set_engine_clock(rdev, rdev->pm.default_sclk); if (rdev->pm.default_mclk) diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index bbc9cd823334..c6776e48fdde 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) void radeon_ring_free_size(struct radeon_device *rdev) { if (rdev->wb.enabled) - rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; + rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); else { if (rdev->family >= CHIP_R600) rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 876cebc4b8ba..6e3b11e5abbe 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev) udelay(voltage->delay); } } else if (voltage->type == VOLTAGE_VDDC) - radeon_atom_set_voltage(rdev, voltage->vddc_id); + radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b974ac7df8df..ef8a5babe9f7 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev) if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { if (voltage->voltage != rdev->pm.current_vddc) { - radeon_atom_set_voltage(rdev, voltage->voltage); + radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); rdev->pm.current_vddc = voltage->voltage; DRM_DEBUG("Setting: v: %d\n", voltage->voltage); } @@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev) { int r; - r = radeon_dummy_page_init(rdev); - if (r) - return r; /* This don't do much */ r = radeon_gem_init(rdev); if (r) @@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev) radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; - radeon_dummy_page_fini(rdev); } static void rv770_pcie_gen2_enable(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 737a2a2e46a5..9d9d92945f8c 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags, gfp_flags |= GFP_HIGHUSER; for (r = 0; r < count; ++r) { - if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { - void *addr; - addr = dma_alloc_coherent(NULL, PAGE_SIZE, - &dma_address[r], - gfp_flags); - if (addr == NULL) - return -ENOMEM; - p = virt_to_page(addr); - } else - p = alloc_page(gfp_flags); + p = alloc_page(gfp_flags); if (!p) { printk(KERN_ERR TTM_PFX "Unable to allocate page."); return -ENOMEM; } + list_add(&p->lru, pages); } return 0; @@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, unsigned long irq_flags; struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct page *p, *tmp; - unsigned r; if (pool == NULL) { /* No pool for this memory type so free the pages */ - r = page_count-1; list_for_each_entry_safe(p, tmp, pages, lru) { - if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { - void *addr = page_address(p); - WARN_ON(!addr || !dma_address[r]); - if (addr) - dma_free_coherent(NULL, PAGE_SIZE, - addr, - dma_address[r]); - dma_address[r] = 0; - } else - __free_page(p); - r--; + __free_page(p); } /* Make the pages list empty */ INIT_LIST_HEAD(pages); diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 70e60a4bb678..419917955bf6 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig @@ -5,6 +5,7 @@ config STUB_POULSBO # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick select BACKLIGHT_CLASS_DEVICE if ACPI + select VIDEO_OUTPUT_CONTROL if ACPI select INPUT if ACPI select ACPI_VIDEO if ACPI select THERMAL if ACPI diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 38319a69bd0a..d6d58684712b 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) * Sanity check for the adapter hardware - check the reaction of * the bus lines only if it seems to be idle. */ -static int test_bus(struct i2c_algo_bit_data *adap, char *name) +static int test_bus(struct i2c_adapter *i2c_adap) { - int scl, sda; + struct i2c_algo_bit_data *adap = i2c_adap->algo_data; + const char *name = i2c_adap->name; + int scl, sda, ret; + + if (adap->pre_xfer) { + ret = adap->pre_xfer(i2c_adap); + if (ret < 0) + return -ENODEV; + } if (adap->getscl == NULL) pr_info("%s: Testing SDA only, SCL is not readable\n", name); @@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name) "while pulling SCL high!\n", name); goto bailout; } + + if (adap->post_xfer) + adap->post_xfer(i2c_adap); + pr_info("%s: Test OK\n", name); return 0; bailout: sdahi(adap); sclhi(adap); + + if (adap->post_xfer) + adap->post_xfer(i2c_adap); + return -ENODEV; } @@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap, int ret; if (bit_test) { - ret = test_bus(bit_adap, adap->name); + ret = test_bus(adap); if (ret < 0) return -ENODEV; } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 70c30e6bce0b..9a58994ff7ea 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver, /* Let legacy drivers scan this bus for matching devices */ if (driver->attach_adapter) { - dev_warn(&adap->dev, "attach_adapter method is deprecated\n"); + dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n", + driver->driver.name); dev_warn(&adap->dev, "Please use another way to instantiate " "your i2c_client\n"); /* We ignore the return code; if it fails, too bad */ @@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver, if (!driver->detach_adapter) return 0; - dev_warn(&adapter->dev, "detach_adapter method is deprecated\n"); + dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n", + driver->driver.name); res = driver->detach_adapter(adapter); if (res) dev_err(&adapter->dev, "detach_adapter failed (%d) " diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 7f42d3a454d2..88d8e4cb419a 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -39,13 +39,13 @@ struct evdev { }; struct evdev_client { - int head; - int tail; + unsigned int head; + unsigned int tail; spinlock_t buffer_lock; /* protects access to buffer, head and tail */ struct fasync_struct *fasync; struct evdev *evdev; struct list_head node; - int bufsize; + unsigned int bufsize; struct input_event buffer[]; }; @@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex); static void evdev_pass_event(struct evdev_client *client, struct input_event *event) { - /* - * Interrupts are disabled, just acquire the lock. - * Make sure we don't leave with the client buffer - * "empty" by having client->head == client->tail. - */ + /* Interrupts are disabled, just acquire the lock. */ spin_lock(&client->buffer_lock); - do { - client->buffer[client->head++] = *event; - client->head &= client->bufsize - 1; - } while (client->head == client->tail); + + client->buffer[client->head++] = *event; + client->head &= client->bufsize - 1; + + if (unlikely(client->head == client->tail)) { + /* + * This effectively "drops" all unconsumed events, leaving + * EV_SYN/SYN_DROPPED plus the newest event in the queue. + */ + client->tail = (client->head - 2) & (client->bufsize - 1); + + client->buffer[client->tail].time = event->time; + client->buffer[client->tail].type = EV_SYN; + client->buffer[client->tail].code = SYN_DROPPED; + client->buffer[client->tail].value = 0; + } + spin_unlock(&client->buffer_lock); if (event->type == EV_SYN) diff --git a/drivers/input/input.c b/drivers/input/input.c index d6e8bd8a851c..ebbceedc92f4 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int } EXPORT_SYMBOL(input_set_capability); +static unsigned int input_estimate_events_per_packet(struct input_dev *dev) +{ + int mt_slots; + int i; + unsigned int events; + + if (dev->mtsize) { + mt_slots = dev->mtsize; + } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { + mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - + dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, + clamp(mt_slots, 2, 32); + } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { + mt_slots = 2; + } else { + mt_slots = 0; + } + + events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ + + for (i = 0; i < ABS_CNT; i++) { + if (test_bit(i, dev->absbit)) { + if (input_is_mt_axis(i)) + events += mt_slots; + else + events++; + } + } + + for (i = 0; i < REL_CNT; i++) + if (test_bit(i, dev->relbit)) + events++; + + return events; +} + #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ do { \ if (!test_bit(EV_##type, dev->evbit)) \ @@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev) /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ input_cleanse_bitmasks(dev); + if (!dev->hint_events_per_packet) + dev->hint_events_per_packet = + input_estimate_events_per_packet(dev); + /* * If delay and period are pre-set by the driver, then autorepeating * is handled by the driver itself and we don't do it in input.c. diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index 09bef79d9da1..a26922cf0e84 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c @@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp) static int __devinit twl4030_kp_probe(struct platform_device *pdev) { struct twl4030_keypad_data *pdata = pdev->dev.platform_data; - const struct matrix_keymap_data *keymap_data = pdata->keymap_data; + const struct matrix_keymap_data *keymap_data; struct twl4030_keypad *kp; struct input_dev *input; u8 reg; int error; - if (!pdata || !pdata->rows || !pdata->cols || + if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data || pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) { dev_err(&pdev->dev, "Invalid platform_data\n"); return -EINVAL; } + keymap_data = pdata->keymap_data; + kp = kzalloc(sizeof(*kp), GFP_KERNEL); input = input_allocate_device(); if (!kp || !input) { diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 7077f9bf5ead..62bae99424e6 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct xenkbd_info *info = dev_get_drvdata(&dev->dev); - int val; + int ret, val; switch (backend_state) { case XenbusStateInitialising: @@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, case XenbusStateInitWait: InitWait: + ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "feature-abs-pointer", "%d", &val); + if (ret < 0) + val = 0; + if (val) { + ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, + "request-abs-pointer", "1"); + if (ret) + pr_warning("xenkbd: can't request abs-pointer"); + } + xenbus_switch_state(dev, XenbusStateConnected); break; diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c index efa06882de00..45f93d0f5592 100644 --- a/drivers/input/touchscreen/h3600_ts_input.c +++ b/drivers/input/touchscreen/h3600_ts_input.c @@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv) IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) { printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); err = -EBUSY; - goto fail2; + goto fail1; } if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) { printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); err = -EBUSY; - goto fail3; + goto fail2; } serio_set_drvdata(serio, ts); err = serio_open(serio, drv); if (err) - return err; + goto fail3; //h3600_flite_control(1, 25); /* default brightness */ - input_register_device(ts->dev); + err = input_register_device(ts->dev); + if (err) + goto fail4; return 0; -fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); +fail4: serio_close(serio); +fail3: serio_set_drvdata(serio, NULL); + free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev); -fail1: serio_set_drvdata(serio, NULL); - input_free_device(input_dev); +fail1: input_free_device(input_dev); kfree(ts); return err; } diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c index 3790816643be..8497f56f8e46 100644 --- a/drivers/leds/leds-regulator.c +++ b/drivers/leds/leds-regulator.c @@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev) led->cdev.flags |= LED_CORE_SUSPENDRESUME; led->vcc = vcc; + /* to handle correctly an already enabled regulator */ + if (regulator_is_enabled(led->vcc)) + led->enabled = 1; + mutex_init(&led->mutex); INIT_WORK(&led->work, led_work); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5ef136cdba91..e5d8904fc8f6 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) return md_raid5_congested(&rs->md, bits); } -static void raid_unplug(struct dm_target_callbacks *cb) -{ - struct raid_set *rs = container_of(cb, struct raid_set, callbacks); - - md_raid5_kick_device(rs->md.private); -} - /* * Construct a RAID4/5/6 mapping: * Args: @@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) } rs->callbacks.congested_fn = raid_is_congested; - rs->callbacks.unplug_fn = raid_unplug; dm_table_add_target_callbacks(ti->table, &rs->callbacks); return 0; diff --git a/drivers/md/md.c b/drivers/md/md.c index b12b3776c0c0..6e853c61d87e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request); /* Support for plugging. * This mirrors the plugging support in request_queue, but does not - * require having a whole queue + * require having a whole queue or request structures. + * We allocate an md_plug_cb for each md device and each thread it gets + * plugged on. This links tot the private plug_handle structure in the + * personality data where we keep a count of the number of outstanding + * plugs so other code can see if a plug is active. */ -static void plugger_work(struct work_struct *work) -{ - struct plug_handle *plug = - container_of(work, struct plug_handle, unplug_work); - plug->unplug_fn(plug); -} -static void plugger_timeout(unsigned long data) -{ - struct plug_handle *plug = (void *)data; - kblockd_schedule_work(NULL, &plug->unplug_work); -} -void plugger_init(struct plug_handle *plug, - void (*unplug_fn)(struct plug_handle *)) -{ - plug->unplug_flag = 0; - plug->unplug_fn = unplug_fn; - init_timer(&plug->unplug_timer); - plug->unplug_timer.function = plugger_timeout; - plug->unplug_timer.data = (unsigned long)plug; - INIT_WORK(&plug->unplug_work, plugger_work); -} -EXPORT_SYMBOL_GPL(plugger_init); +struct md_plug_cb { + struct blk_plug_cb cb; + mddev_t *mddev; +}; -void plugger_set_plug(struct plug_handle *plug) +static void plugger_unplug(struct blk_plug_cb *cb) { - if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) - mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); + struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); + if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) + md_wakeup_thread(mdcb->mddev->thread); + kfree(mdcb); } -EXPORT_SYMBOL_GPL(plugger_set_plug); -int plugger_remove_plug(struct plug_handle *plug) +/* Check that an unplug wakeup will come shortly. + * If not, wakeup the md thread immediately + */ +int mddev_check_plugged(mddev_t *mddev) { - if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { - del_timer(&plug->unplug_timer); - return 1; - } else + struct blk_plug *plug = current->plug; + struct md_plug_cb *mdcb; + + if (!plug) return 0; -} -EXPORT_SYMBOL_GPL(plugger_remove_plug); + list_for_each_entry(mdcb, &plug->cb_list, cb.list) { + if (mdcb->cb.callback == plugger_unplug && + mdcb->mddev == mddev) { + /* Already on the list, move to top */ + if (mdcb != list_first_entry(&plug->cb_list, + struct md_plug_cb, + cb.list)) + list_move(&mdcb->cb.list, &plug->cb_list); + return 1; + } + } + /* Not currently on the callback list */ + mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); + if (!mdcb) + return 0; + + mdcb->mddev = mddev; + mdcb->cb.callback = plugger_unplug; + atomic_inc(&mddev->plug_cnt); + list_add(&mdcb->cb.list, &plug->cb_list); + return 1; +} +EXPORT_SYMBOL_GPL(mddev_check_plugged); static inline mddev_t *mddev_get(mddev_t *mddev) { @@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev) atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); + atomic_set(&mddev->plug_cnt, 0); spin_lock_init(&mddev->write_lock); atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); @@ -4723,7 +4735,6 @@ static void md_clean(mddev_t *mddev) mddev->bitmap_info.chunksize = 0; mddev->bitmap_info.daemon_sleep = 0; mddev->bitmap_info.max_write_behind = 0; - mddev->plug = NULL; } static void __md_stop_writes(mddev_t *mddev) @@ -6688,12 +6699,6 @@ int md_allow_write(mddev_t *mddev) } EXPORT_SYMBOL_GPL(md_allow_write); -void md_unplug(mddev_t *mddev) -{ - if (mddev->plug) - mddev->plug->unplug_fn(mddev->plug); -} - #define SYNC_MARKS 10 #define SYNC_MARK_STEP (3*HZ) void md_do_sync(mddev_t *mddev) diff --git a/drivers/md/md.h b/drivers/md/md.h index 52b407369e13..0b1fd3f1d85b 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -29,26 +29,6 @@ typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; -/* generic plugging support - like that provided with request_queue, - * but does not require a request_queue - */ -struct plug_handle { - void (*unplug_fn)(struct plug_handle *); - struct timer_list unplug_timer; - struct work_struct unplug_work; - unsigned long unplug_flag; -}; -#define PLUGGED_FLAG 1 -void plugger_init(struct plug_handle *plug, - void (*unplug_fn)(struct plug_handle *)); -void plugger_set_plug(struct plug_handle *plug); -int plugger_remove_plug(struct plug_handle *plug); -static inline void plugger_flush(struct plug_handle *plug) -{ - del_timer_sync(&plug->unplug_timer); - cancel_work_sync(&plug->unplug_work); -} - /* * MD's 'extended' device */ @@ -199,6 +179,9 @@ struct mddev_s int delta_disks, new_level, new_layout; int new_chunk_sectors; + atomic_t plug_cnt; /* If device is expecting + * more bios soon. + */ struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ @@ -336,7 +319,6 @@ struct mddev_s struct list_head all_mddevs; struct attribute_group *to_remove; - struct plug_handle *plug; /* if used by personality */ struct bio_set *bio_set; @@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev); extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern void restore_bitmap_write_access(struct file *file); -extern void md_unplug(mddev_t *mddev); extern void mddev_init(mddev_t *mddev); extern int md_run(mddev_t *mddev); @@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, mddev_t *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, mddev_t *mddev); +extern int mddev_check_plugged(mddev_t *mddev); #endif /* _MD_MD_H */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c2a21ae56d97..2b7a7ff401dc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf) spin_unlock_irq(&conf->device_lock); } -static void md_kick_device(mddev_t *mddev) -{ - blk_flush_plug(current); - md_wakeup_thread(mddev->thread); -} - /* Barriers.... * Sometimes we need to suspend IO while we do something else, * either some resync/recovery, or reconfigure the array. @@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf) /* Wait until no block IO is waiting */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); /* block any new IO from starting */ conf->barrier++; @@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf) /* Now wait for all pending IO to complete */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_pending && conf->barrier < RESYNC_DEPTH, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); spin_unlock_irq(&conf->resync_lock); } @@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf) conf->nr_waiting++; wait_event_lock_irq(conf->wait_barrier, !conf->barrier, conf->resync_lock, - md_kick_device(conf->mddev)); + ); conf->nr_waiting--; } conf->nr_pending++; @@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf) wait_event_lock_irq(conf->wait_barrier, conf->nr_pending == conf->nr_queued+1, conf->resync_lock, - ({ flush_pending_writes(conf); - md_kick_device(conf->mddev); })); + flush_pending_writes(conf)); spin_unlock_irq(&conf->resync_lock); } static void unfreeze_array(conf_t *conf) @@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); mdk_rdev_t *blocked_rdev; + int plugged; /* * Register the new request and wait if the reconstruction @@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) * inc refcount on their rdev. Record them by setting * bios[x] to bio */ + plugged = mddev_check_plugged(mddev); + disks = conf->raid_disks; retry_write: blocked_rdev = NULL; @@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) /* In case raid1d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - if (do_sync || !bitmap) + if (do_sync || !bitmap || !plugged) md_wakeup_thread(mddev->thread); return 0; @@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev) conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; mdk_rdev_t *rdev; + struct blk_plug plug; md_check_recovery(mddev); - + + blk_start_plug(&plug); for (;;) { char b[BDEVNAME_SIZE]; - flush_pending_writes(conf); + if (atomic_read(&mddev->plug_cnt) == 0) + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev) } cond_resched(); } + blk_finish_plug(&plug); } @@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 2da83d566592..8e9462626ec5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf) spin_unlock_irq(&conf->device_lock); } -static void md_kick_device(mddev_t *mddev) -{ - blk_flush_plug(current); - md_wakeup_thread(mddev->thread); -} - /* Barriers.... * Sometimes we need to suspend IO while we do something else, * either some resync/recovery, or reconfigure the array. @@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force) /* Wait until no block IO is waiting (unless 'force') */ wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); /* block any new IO from starting */ conf->barrier++; - /* No wait for all pending IO to complete */ + /* Now wait for all pending IO to complete */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_pending && conf->barrier < RESYNC_DEPTH, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); spin_unlock_irq(&conf->resync_lock); } @@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf) conf->nr_waiting++; wait_event_lock_irq(conf->wait_barrier, !conf->barrier, conf->resync_lock, - md_kick_device(conf->mddev)); + ); conf->nr_waiting--; } conf->nr_pending++; @@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf) wait_event_lock_irq(conf->wait_barrier, conf->nr_pending == conf->nr_queued+1, conf->resync_lock, - ({ flush_pending_writes(conf); - md_kick_device(conf->mddev); })); + flush_pending_writes(conf)); + spin_unlock_irq(&conf->resync_lock); } @@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) const unsigned long do_fua = (bio->bi_rw & REQ_FUA); unsigned long flags; mdk_rdev_t *blocked_rdev; + int plugged; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); @@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) * inc refcount on their rdev. Record them by setting * bios[x] to bio */ + plugged = mddev_check_plugged(mddev); + raid10_find_phys(conf, r10_bio); retry_write: blocked_rdev = NULL; @@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - if (do_sync || !mddev->bitmap) + if (do_sync || !mddev->bitmap || !plugged) md_wakeup_thread(mddev->thread); - return 0; } @@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev) conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; mdk_rdev_t *rdev; + struct blk_plug plug; md_check_recovery(mddev); + blk_start_plug(&plug); for (;;) { char b[BDEVNAME_SIZE]; @@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev) } cond_resched(); } + blk_finish_plug(&plug); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e867ee42b152..f301e6ae220c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -27,12 +27,12 @@ * * We group bitmap updates into batches. Each batch has a number. * We may write out several batches at once, but that isn't very important. - * conf->bm_write is the number of the last batch successfully written. - * conf->bm_flush is the number of the last batch that was closed to + * conf->seq_write is the number of the last batch successfully written. + * conf->seq_flush is the number of the last batch that was closed to * new additions. * When we discover that we will need to write to any block in a stripe * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq - * the number of the batch it will be in. This is bm_flush+1. + * the number of the batch it will be in. This is seq_flush+1. * When we are ready to do a write, if that batch hasn't been written yet, * we plug the array and queue the stripe for later. * When an unplug happens, we increment bm_flush, thus closing the current @@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state)) list_add_tail(&sh->lru, &conf->delayed_list); - plugger_set_plug(&conf->plug); - } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && - sh->bm_seq - conf->seq_write > 0) { + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) list_add_tail(&sh->lru, &conf->bitmap_list); - plugger_set_plug(&conf->plug); - } else { + else { clear_bit(STRIPE_BIT_DELAY, &sh->state); list_add_tail(&sh->lru, &conf->handle_list); } @@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, < (conf->max_nr_stripes *3/4) || !conf->inactive_blocked), conf->device_lock, - md_raid5_kick_device(conf)); + ); conf->inactive_blocked = 0; } else init_stripe(sh, sector, previous); @@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) wait_event_lock_irq(conf->wait_for_stripe, !list_empty(&conf->inactive_list), conf->device_lock, - blk_flush_plug(current)); + ); osh = get_free_stripe(conf); spin_unlock_irq(&conf->device_lock); atomic_set(&nsh->count, 1); @@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf) atomic_inc(&conf->preread_active_stripes); list_add_tail(&sh->lru, &conf->hold_list); } - } else - plugger_set_plug(&conf->plug); + } } static void activate_bit_delay(raid5_conf_t *conf) @@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf) } } -void md_raid5_kick_device(raid5_conf_t *conf) -{ - blk_flush_plug(current); - raid5_activate_delayed(conf); - md_wakeup_thread(conf->mddev->thread); -} -EXPORT_SYMBOL_GPL(md_raid5_kick_device); - -static void raid5_unplug(struct plug_handle *plug) -{ - raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); - - md_raid5_kick_device(conf); -} - int md_raid5_congested(mddev_t *mddev, int bits) { raid5_conf_t *conf = mddev->private; @@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) struct stripe_head *sh; const int rw = bio_data_dir(bi); int remaining; + int plugged; if (unlikely(bi->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bi); @@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ + plugged = mddev_check_plugged(mddev); for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { DEFINE_WAIT(w); int disks, data_disks; @@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) * add failed due to overlap. Flush everything * and wait a while */ - md_raid5_kick_device(conf); + md_wakeup_thread(mddev->thread); release_stripe(sh); schedule(); goto retry; @@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi) } } + if (!plugged) + md_wakeup_thread(mddev->thread); + spin_lock_irq(&conf->device_lock); remaining = raid5_dec_bi_phys_segments(bi); spin_unlock_irq(&conf->device_lock); @@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev) struct stripe_head *sh; raid5_conf_t *conf = mddev->private; int handled; + struct blk_plug plug; pr_debug("+++ raid5d active\n"); md_check_recovery(mddev); + blk_start_plug(&plug); handled = 0; spin_lock_irq(&conf->device_lock); while (1) { struct bio *bio; - if (conf->seq_flush != conf->seq_write) { - int seq = conf->seq_flush; + if (atomic_read(&mddev->plug_cnt) == 0 && + !list_empty(&conf->bitmap_list)) { + /* Now is a good time to flush some bitmap updates */ + conf->seq_flush++; spin_unlock_irq(&conf->device_lock); bitmap_unplug(mddev->bitmap); spin_lock_irq(&conf->device_lock); - conf->seq_write = seq; + conf->seq_write = conf->seq_flush; activate_bit_delay(conf); } + if (atomic_read(&mddev->plug_cnt) == 0) + raid5_activate_delayed(conf); while ((bio = remove_bio_from_retry(conf))) { int ok; @@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev) spin_unlock_irq(&conf->device_lock); async_tx_issue_pending_all(); + blk_finish_plug(&plug); pr_debug("--- raid5d inactive\n"); } @@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev) mdname(mddev)); md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); - plugger_init(&conf->plug, raid5_unplug); - mddev->plug = &conf->plug; if (mddev->queue) { int chunk_size; /* read-ahead size must cover two whole stripes, which @@ -5192,7 +5184,6 @@ static int stop(mddev_t *mddev) mddev->thread = NULL; if (mddev->queue) mddev->queue->backing_dev_info.congested_fn = NULL; - plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/ free_conf(conf); mddev->private = NULL; mddev->to_remove = &raid5_attrs_group; diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 8d563a4f022a..3ca77a2613ba 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -400,8 +400,6 @@ struct raid5_private_data { * Cleared when a sync completes. */ - struct plug_handle plug; - /* per cpu variables */ struct raid5_percpu { struct page *spare_page; /* Used when checking P/Q in raid6 */ diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index d01574d98870..f4c8c844b913 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev) } EXPORT_SYMBOL(mfd_cell_disable); +static int mfd_platform_add_cell(struct platform_device *pdev, + const struct mfd_cell *cell) +{ + if (!cell) + return 0; + + pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL); + if (!pdev->mfd_cell) + return -ENOMEM; + + return 0; +} + static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, @@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id, pdev->dev.parent = parent; - ret = platform_device_add_data(pdev, cell, sizeof(*cell)); + ret = mfd_platform_add_cell(pdev, cell); if (ret) goto fail_res; @@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id, return 0; -/* platform_device_del(pdev); */ fail_res: kfree(res); fail_device: diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 20e4e9395b61..ecafa4ba238b 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; -static void gru_noop(unsigned int irq) +static void gru_noop(struct irq_data *d) { } static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { [0 ... GRU_CHIPLETS_PER_BLADE - 1] { - .mask = gru_noop, - .unmask = gru_noop, - .ack = gru_noop + .irq_mask = gru_noop, + .irq_unmask = gru_noop, + .irq_ack = gru_noop } }; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d86ea8b01137..135df164a4c1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int pci_pm_freeze(struct device *dev) { @@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev) return error; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define pci_pm_freeze NULL #define pci_pm_freeze_noirq NULL @@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev) #define pci_pm_restore NULL #define pci_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM_RUNTIME diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c index 453c54c97612..4c3e94c0ae85 100644 --- a/drivers/pcmcia/pxa2xx_balloon3.c +++ b/drivers/pcmcia/pxa2xx_balloon3.c @@ -25,6 +25,8 @@ #include +#include + #include "soc_common.h" /* @@ -127,6 +129,9 @@ static int __init balloon3_pcmcia_init(void) { int ret; + if (!machine_is_balloon3()) + return -ENODEV; + balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!balloon3_pcmcia_device) return -ENOMEM; diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c index b7e596620db1..b829e655457b 100644 --- a/drivers/pcmcia/pxa2xx_trizeps4.c +++ b/drivers/pcmcia/pxa2xx_trizeps4.c @@ -69,15 +69,15 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) for (i = 0; i < ARRAY_SIZE(irqs); i++) { if (irqs[i].sock != skt->nr) continue; - if (gpio_request(IRQ_TO_GPIO(irqs[i].irq), irqs[i].str) < 0) { + if (gpio_request(irq_to_gpio(irqs[i].irq), irqs[i].str) < 0) { pr_err("%s: sock %d unable to request gpio %d\n", - __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); + __func__, skt->nr, irq_to_gpio(irqs[i].irq)); ret = -EBUSY; goto error; } - if (gpio_direction_input(IRQ_TO_GPIO(irqs[i].irq)) < 0) { + if (gpio_direction_input(irq_to_gpio(irqs[i].irq)) < 0) { pr_err("%s: sock %d unable to set input gpio %d\n", - __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); + __func__, skt->nr, irq_to_gpio(irqs[i].irq)); ret = -EINVAL; goto error; } @@ -86,7 +86,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) error: for (; i >= 0; i--) { - gpio_free(IRQ_TO_GPIO(irqs[i].irq)); + gpio_free(irq_to_gpio(irqs[i].irq)); } return (ret); } @@ -97,7 +97,7 @@ static void trizeps_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) /* free allocated gpio's */ gpio_free(GPIO_PRDY); for (i = 0; i < ARRAY_SIZE(irqs); i++) - gpio_free(IRQ_TO_GPIO(irqs[i].irq)); + gpio_free(irq_to_gpio(irqs[i].irq)); } static unsigned long trizeps_pcmcia_status[2]; @@ -226,6 +226,9 @@ static int __init trizeps_pcmcia_init(void) { int ret; + if (!machine_is_trizeps4() && !machine_is_trizeps4wl()) + return -ENODEV; + trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!trizeps_pcmcia_device) return -ENOMEM; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 2ee442c2a5db..0485e394712a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -187,7 +187,8 @@ config MSI_LAPTOP depends on ACPI depends on BACKLIGHT_CLASS_DEVICE depends on RFKILL - depends on SERIO_I8042 + depends on INPUT && SERIO_I8042 + select INPUT_SPARSEKMAP ---help--- This is a driver for laptops built by MSI (MICRO-STAR INTERNATIONAL): diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 5ea6c3477d17..ac4e7f83ce6c 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -89,7 +89,7 @@ MODULE_LICENSE("GPL"); #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); -MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3"); +MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"); MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); enum acer_wmi_event_ids { diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index efc776cb0c66..832a3fd7c1c8 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus) if (!asus->inputdev) return -ENOMEM; - asus->inputdev->name = asus->driver->input_phys; - asus->inputdev->phys = asus->driver->input_name; + asus->inputdev->name = asus->driver->input_name; + asus->inputdev->phys = asus->driver->input_phys; asus->inputdev->id.bustype = BUS_HOST; asus->inputdev->dev.parent = &asus->platform_device->dev; diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 0ddc434fb93b..649dcadd8ea3 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c @@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = { { KE_KEY, 0x82, { KEY_CAMERA } }, { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, { KE_KEY, 0x88, { KEY_WLAN } }, + { KE_KEY, 0xbd, { KEY_CAMERA } }, { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ + { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, { KE_KEY, 0xec, { KEY_CAMERA_UP } }, diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index d653104b59cb..464bb3fc4d88 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c @@ -74,6 +74,19 @@ struct pmic_gpio { u32 trigger_type; }; +static void pmic_program_irqtype(int gpio, int type) +{ + if (type & IRQ_TYPE_EDGE_RISING) + intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); + else + intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); + + if (type & IRQ_TYPE_EDGE_FALLING) + intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); + else + intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); +}; + static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { if (offset > 8) { @@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) return pg->irq_base + offset; } +static void pmic_bus_lock(struct irq_data *data) +{ + struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); + + mutex_lock(&pg->buslock); +} + +static void pmic_bus_sync_unlock(struct irq_data *data) +{ + struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); + + if (pg->update_type) { + unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE; + + pmic_program_irqtype(gpio, pg->trigger_type); + pg->update_type = 0; + } + mutex_unlock(&pg->buslock); +} + /* the gpiointr register is read-clear, so just do nothing. */ static void pmic_irq_unmask(struct irq_data *data) { } static void pmic_irq_mask(struct irq_data *data) { } static struct irq_chip pmic_irqchip = { - .name = "PMIC-GPIO", - .irq_mask = pmic_irq_mask, - .irq_unmask = pmic_irq_unmask, - .irq_set_type = pmic_irq_type, + .name = "PMIC-GPIO", + .irq_mask = pmic_irq_mask, + .irq_unmask = pmic_irq_unmask, + .irq_set_type = pmic_irq_type, + .irq_bus_lock = pmic_bus_lock, + .irq_bus_sync_unlock = pmic_bus_sync_unlock, }; static irqreturn_t pmic_irq_handler(int irq, void *data) diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index de434c6dc2d6..d347116d150e 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -570,6 +570,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { }, .callback = dmi_check_cb, }, + { + .ident = "R410 Plus", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), + DMI_MATCH(DMI_BOARD_NAME, "R460"), + }, + .callback = dmi_check_cb, + }, { .ident = "R518", .matches = { @@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { .callback = dmi_check_cb, }, { - .ident = "N150/N210/N220", + .ident = "N150/N210/N220/N230", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), - DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), + DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), + DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), }, .callback = dmi_check_cb, }, @@ -771,6 +781,7 @@ static int __init samsung_init(void) /* create a backlight device to talk to this one */ memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_PLATFORM; props.max_brightness = sabi_config->max_brightness; backlight_device = backlight_device_register("samsung", &sdev->dev, NULL, &backlight_ops, diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e642f5f29504..8f709aec4da0 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " "(default: 0)"); +static void sony_nc_kbd_backlight_resume(void); + enum sony_nc_rfkill { SONY_WIFI, SONY_BLUETOOTH, @@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd) if (!handles) return -ENOMEM; - sysfs_attr_init(&handles->devattr.attr); - handles->devattr.attr.name = "handles"; - handles->devattr.attr.mode = S_IRUGO; - handles->devattr.show = sony_nc_handles_show; - for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { if (!acpi_callsetfunc(sony_nc_acpi_handle, "SN00", i + 0x20, &result)) { @@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd) } } - /* allow reading capabilities via sysfs */ - if (device_create_file(&pd->dev, &handles->devattr)) { - kfree(handles); - handles = NULL; - return -1; + if (debug) { + sysfs_attr_init(&handles->devattr.attr); + handles->devattr.attr.name = "handles"; + handles->devattr.attr.mode = S_IRUGO; + handles->devattr.show = sony_nc_handles_show; + + /* allow reading capabilities via sysfs */ + if (device_create_file(&pd->dev, &handles->devattr)) { + kfree(handles); + handles = NULL; + return -1; + } } return 0; @@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd) static int sony_nc_handles_cleanup(struct platform_device *pd) { if (handles) { - device_remove_file(&pd->dev, &handles->devattr); + if (debug) + device_remove_file(&pd->dev, &handles->devattr); kfree(handles); handles = NULL; } @@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd) static int sony_find_snc_handle(int handle) { int i; + + /* not initialized yet, return early */ + if (!handles) + return -1; + for (i = 0; i < 0x10; i++) { if (handles->cap[i] == handle) { dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", @@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device) /* re-read rfkill state */ sony_nc_rfkill_update(); + /* restore kbd backlight states */ + sony_nc_kbd_backlight_resume(); + return 0; } @@ -1355,6 +1368,7 @@ out_no_enum: #define KBDBL_HANDLER 0x137 #define KBDBL_PRESENT 0xB00 #define SET_MODE 0xC00 +#define SET_STATE 0xD00 #define SET_TIMEOUT 0xE00 struct kbd_backlight { @@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) (value << 0x10) | SET_MODE, &result)) return -EIO; + /* Try to turn the light on/off immediately */ + sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE, + &result); + kbdbl_handle->mode = value; return 0; @@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd) { int result; - if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result)) + if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result)) return 0; if (!(result & 0x02)) return 0; @@ -1501,13 +1519,36 @@ outkzalloc: static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) { if (kbdbl_handle) { + int result; + device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); + + /* restore the default hw behaviour */ + sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result); + sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result); + kfree(kbdbl_handle); } return 0; } +static void sony_nc_kbd_backlight_resume(void) +{ + int ignore = 0; + + if (!kbdbl_handle) + return; + + if (kbdbl_handle->mode == 0) + sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore); + + if (kbdbl_handle->timeout != 0) + sony_call_snc_handle(KBDBL_HANDLER, + (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT, + &ignore); +} + static void sony_nc_backlight_setup(void) { acpi_handle unused; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a08561f5349e..efb3b6b9bcdb 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s, tpacpi_is_fw_digit(s[1]) && s[2] == t && s[3] == 'T' && tpacpi_is_fw_digit(s[4]) && - tpacpi_is_fw_digit(s[5]) && - s[6] == 'W' && s[7] == 'W'; + tpacpi_is_fw_digit(s[5]); } /* returns 0 - probe ok, or < 0 - probe error. diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index c29719cacbca..86c9a091a2ff 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str) __setup("riohdid=", rio_hdid_setup); -void rio_register_mport(struct rio_mport *port) +int rio_register_mport(struct rio_mport *port) { if (next_portid >= RIO_MAX_MPORTS) { pr_err("RIO: reached specified max number of mports\n"); - return; + return 1; } port->id = next_portid++; port->host_deviceid = rio_get_hdid(port->id); list_add_tail(&port->node, &rio_mports); + return 0; } EXPORT_SYMBOL_GPL(rio_local_get_device_id); diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 095016a9dec1..ac2701b22e71 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c @@ -418,3 +418,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init); diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 09b4437b3e61..39013867cbd6 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, err = __rtc_read_alarm(rtc, &alrm); if (!err && !rtc_valid_tm(&alrm.time)) - rtc_set_alarm(rtc, &alrm); + rtc_initialize_alarm(rtc, &alrm); strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); dev_set_name(&rtc->dev, "rtc%d", id); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 23719f0acbf6..ef6316acec43 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_set_alarm); +/* Called once per device from rtc_device_register */ +int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + + err = rtc_valid_tm(&alarm->time); + if (err != 0) + return err; + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return err; + + rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); + rtc->aie_timer.period = ktime_set(0, 0); + if (alarm->enabled) { + rtc->aie_timer.enabled = 1; + timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); + } + mutex_unlock(&rtc->ops_lock); + return err; +} +EXPORT_SYMBOL_GPL(rtc_initialize_alarm); + + + int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err = mutex_lock_interruptible(&rtc->ops_lock); diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index a0fc4cf42abf..90d866272c8e 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c @@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) bfin_rtc_int_set_alarm(rtc); else bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); + + return 0; } static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index c42006469559..c5ac03793e79 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c @@ -401,6 +401,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = { }, { .name = "mc13892-rtc", }, + { } }; static struct platform_driver mc13xxx_rtc_driver = { diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 714964913e5e..b3466c491cd3 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev) /* do not clear AIE here, it may be needed for wake */ - s3c_rtc_setpie(dev, 0); free_irq(s3c_rtc_alarmno, rtc_dev); free_irq(s3c_rtc_tickno, rtc_dev); } @@ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) platform_set_drvdata(dev, NULL); rtc_device_unregister(rtc); - s3c_rtc_setpie(&dev->dev, 0); s3c_rtc_setaie(&dev->dev, 0); clk_disable(rtc_clk); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d5c7ff43f5b..ab55c2fa7ce2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) &sdev->request_queue->queue_flags); if (flagset) queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); - __blk_run_queue(sdev->request_queue, false); + __blk_run_queue(sdev->request_queue); if (flagset) queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); spin_unlock(sdev->request_queue->queue_lock); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index fdf3fa639056..28c33506e4ad 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); if (flagset) queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); - __blk_run_queue(rport->rqst_q, false); + __blk_run_queue(rport->rqst_q); if (flagset) queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index dca4a0bb6ca9..e3786f161bc3 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig" source "drivers/staging/wlags49_h25/Kconfig" -source "drivers/staging/samsung-laptop/Kconfig" - source "drivers/staging/sm7xx/Kconfig" source "drivers/staging/dt3155v4l/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index eb93012b6f59..f0d5c5315612 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC) += zram/ obj-$(CONFIG_ZCACHE) += zcache/ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ -obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/ obj-$(CONFIG_FB_SM7XX) += sm7xx/ obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ obj-$(CONFIG_CRYSTALHD) += crystalhd/ diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig deleted file mode 100644 index f27c60864c26..000000000000 --- a/drivers/staging/samsung-laptop/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -config SAMSUNG_LAPTOP - tristate "Samsung Laptop driver" - default n - depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86 - help - This module implements a driver for the N128 Samsung Laptop - providing control over the Wireless LED and the LCD backlight - - To compile this driver as a module, choose - M here: the module will be called samsung-laptop. diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile deleted file mode 100644 index 3c6f42045211..000000000000 --- a/drivers/staging/samsung-laptop/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO deleted file mode 100644 index f7a6d589916e..000000000000 --- a/drivers/staging/samsung-laptop/TODO +++ /dev/null @@ -1,5 +0,0 @@ -TODO: - - review from other developers - - figure out ACPI video issues - -Please send patches to Greg Kroah-Hartman diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c deleted file mode 100644 index 25294462b8b6..000000000000 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ /dev/null @@ -1,843 +0,0 @@ -/* - * Samsung Laptop driver - * - * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de) - * Copyright (C) 2009,2011 Novell Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * This driver is needed because a number of Samsung laptops do not hook - * their control settings through ACPI. So we have to poke around in the - * BIOS to do things like brightness values, and "special" key controls. - */ - -/* - * We have 0 - 8 as valid brightness levels. The specs say that level 0 should - * be reserved by the BIOS (which really doesn't make much sense), we tell - * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8 - */ -#define MAX_BRIGHT 0x07 - - -#define SABI_IFACE_MAIN 0x00 -#define SABI_IFACE_SUB 0x02 -#define SABI_IFACE_COMPLETE 0x04 -#define SABI_IFACE_DATA 0x05 - -/* Structure to get data back to the calling function */ -struct sabi_retval { - u8 retval[20]; -}; - -struct sabi_header_offsets { - u8 port; - u8 re_mem; - u8 iface_func; - u8 en_mem; - u8 data_offset; - u8 data_segment; -}; - -struct sabi_commands { - /* - * Brightness is 0 - 8, as described above. - * Value 0 is for the BIOS to use - */ - u8 get_brightness; - u8 set_brightness; - - /* - * first byte: - * 0x00 - wireless is off - * 0x01 - wireless is on - * second byte: - * 0x02 - 3G is off - * 0x03 - 3G is on - * TODO, verify 3G is correct, that doesn't seem right... - */ - u8 get_wireless_button; - u8 set_wireless_button; - - /* 0 is off, 1 is on */ - u8 get_backlight; - u8 set_backlight; - - /* - * 0x80 or 0x00 - no action - * 0x81 - recovery key pressed - */ - u8 get_recovery_mode; - u8 set_recovery_mode; - - /* - * on seclinux: 0 is low, 1 is high, - * on swsmi: 0 is normal, 1 is silent, 2 is turbo - */ - u8 get_performance_level; - u8 set_performance_level; - - /* - * Tell the BIOS that Linux is running on this machine. - * 81 is on, 80 is off - */ - u8 set_linux; -}; - -struct sabi_performance_level { - const char *name; - u8 value; -}; - -struct sabi_config { - const char *test_string; - u16 main_function; - const struct sabi_header_offsets header_offsets; - const struct sabi_commands commands; - const struct sabi_performance_level performance_levels[4]; - u8 min_brightness; - u8 max_brightness; -}; - -static const struct sabi_config sabi_configs[] = { - { - .test_string = "SECLINUX", - - .main_function = 0x4c49, - - .header_offsets = { - .port = 0x00, - .re_mem = 0x02, - .iface_func = 0x03, - .en_mem = 0x04, - .data_offset = 0x05, - .data_segment = 0x07, - }, - - .commands = { - .get_brightness = 0x00, - .set_brightness = 0x01, - - .get_wireless_button = 0x02, - .set_wireless_button = 0x03, - - .get_backlight = 0x04, - .set_backlight = 0x05, - - .get_recovery_mode = 0x06, - .set_recovery_mode = 0x07, - - .get_performance_level = 0x08, - .set_performance_level = 0x09, - - .set_linux = 0x0a, - }, - - .performance_levels = { - { - .name = "silent", - .value = 0, - }, - { - .name = "normal", - .value = 1, - }, - { }, - }, - .min_brightness = 1, - .max_brightness = 8, - }, - { - .test_string = "SwSmi@", - - .main_function = 0x5843, - - .header_offsets = { - .port = 0x00, - .re_mem = 0x04, - .iface_func = 0x02, - .en_mem = 0x03, - .data_offset = 0x05, - .data_segment = 0x07, - }, - - .commands = { - .get_brightness = 0x10, - .set_brightness = 0x11, - - .get_wireless_button = 0x12, - .set_wireless_button = 0x13, - - .get_backlight = 0x2d, - .set_backlight = 0x2e, - - .get_recovery_mode = 0xff, - .set_recovery_mode = 0xff, - - .get_performance_level = 0x31, - .set_performance_level = 0x32, - - .set_linux = 0xff, - }, - - .performance_levels = { - { - .name = "normal", - .value = 0, - }, - { - .name = "silent", - .value = 1, - }, - { - .name = "overclock", - .value = 2, - }, - { }, - }, - .min_brightness = 0, - .max_brightness = 8, - }, - { }, -}; - -static const struct sabi_config *sabi_config; - -static void __iomem *sabi; -static void __iomem *sabi_iface; -static void __iomem *f0000_segment; -static struct backlight_device *backlight_device; -static struct mutex sabi_mutex; -static struct platform_device *sdev; -static struct rfkill *rfk; - -static int force; -module_param(force, bool, 0); -MODULE_PARM_DESC(force, - "Disable the DMI check and forces the driver to be loaded"); - -static int debug; -module_param(debug, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "Debug enabled or not"); - -static int sabi_get_command(u8 command, struct sabi_retval *sretval) -{ - int retval = 0; - u16 port = readw(sabi + sabi_config->header_offsets.port); - u8 complete, iface_data; - - mutex_lock(&sabi_mutex); - - /* enable memory to be able to write to it */ - outb(readb(sabi + sabi_config->header_offsets.en_mem), port); - - /* write out the command */ - writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); - writew(command, sabi_iface + SABI_IFACE_SUB); - writeb(0, sabi_iface + SABI_IFACE_COMPLETE); - outb(readb(sabi + sabi_config->header_offsets.iface_func), port); - - /* write protect memory to make it safe */ - outb(readb(sabi + sabi_config->header_offsets.re_mem), port); - - /* see if the command actually succeeded */ - complete = readb(sabi_iface + SABI_IFACE_COMPLETE); - iface_data = readb(sabi_iface + SABI_IFACE_DATA); - if (complete != 0xaa || iface_data == 0xff) { - pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", - command, complete, iface_data); - retval = -EINVAL; - goto exit; - } - /* - * Save off the data into a structure so the caller use it. - * Right now we only want the first 4 bytes, - * There are commands that need more, but not for the ones we - * currently care about. - */ - sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA); - sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1); - sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2); - sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3); - -exit: - mutex_unlock(&sabi_mutex); - return retval; - -} - -static int sabi_set_command(u8 command, u8 data) -{ - int retval = 0; - u16 port = readw(sabi + sabi_config->header_offsets.port); - u8 complete, iface_data; - - mutex_lock(&sabi_mutex); - - /* enable memory to be able to write to it */ - outb(readb(sabi + sabi_config->header_offsets.en_mem), port); - - /* write out the command */ - writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); - writew(command, sabi_iface + SABI_IFACE_SUB); - writeb(0, sabi_iface + SABI_IFACE_COMPLETE); - writeb(data, sabi_iface + SABI_IFACE_DATA); - outb(readb(sabi + sabi_config->header_offsets.iface_func), port); - - /* write protect memory to make it safe */ - outb(readb(sabi + sabi_config->header_offsets.re_mem), port); - - /* see if the command actually succeeded */ - complete = readb(sabi_iface + SABI_IFACE_COMPLETE); - iface_data = readb(sabi_iface + SABI_IFACE_DATA); - if (complete != 0xaa || iface_data == 0xff) { - pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", - command, complete, iface_data); - retval = -EINVAL; - } - - mutex_unlock(&sabi_mutex); - return retval; -} - -static void test_backlight(void) -{ - struct sabi_retval sretval; - - sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); - - sabi_set_command(sabi_config->commands.set_backlight, 0); - printk(KERN_DEBUG "backlight should be off\n"); - - sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); - - msleep(1000); - - sabi_set_command(sabi_config->commands.set_backlight, 1); - printk(KERN_DEBUG "backlight should be on\n"); - - sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); -} - -static void test_wireless(void) -{ - struct sabi_retval sretval; - - sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); - - sabi_set_command(sabi_config->commands.set_wireless_button, 0); - printk(KERN_DEBUG "wireless led should be off\n"); - - sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); - - msleep(1000); - - sabi_set_command(sabi_config->commands.set_wireless_button, 1); - printk(KERN_DEBUG "wireless led should be on\n"); - - sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); -} - -static u8 read_brightness(void) -{ - struct sabi_retval sretval; - int user_brightness = 0; - int retval; - - retval = sabi_get_command(sabi_config->commands.get_brightness, - &sretval); - if (!retval) { - user_brightness = sretval.retval[0]; - if (user_brightness != 0) - user_brightness -= sabi_config->min_brightness; - } - return user_brightness; -} - -static void set_brightness(u8 user_brightness) -{ - u8 user_level = user_brightness - sabi_config->min_brightness; - - sabi_set_command(sabi_config->commands.set_brightness, user_level); -} - -static int get_brightness(struct backlight_device *bd) -{ - return (int)read_brightness(); -} - -static int update_status(struct backlight_device *bd) -{ - set_brightness(bd->props.brightness); - - if (bd->props.power == FB_BLANK_UNBLANK) - sabi_set_command(sabi_config->commands.set_backlight, 1); - else - sabi_set_command(sabi_config->commands.set_backlight, 0); - return 0; -} - -static const struct backlight_ops backlight_ops = { - .get_brightness = get_brightness, - .update_status = update_status, -}; - -static int rfkill_set(void *data, bool blocked) -{ - /* Do something with blocked...*/ - /* - * blocked == false is on - * blocked == true is off - */ - if (blocked) - sabi_set_command(sabi_config->commands.set_wireless_button, 0); - else - sabi_set_command(sabi_config->commands.set_wireless_button, 1); - - return 0; -} - -static struct rfkill_ops rfkill_ops = { - .set_block = rfkill_set, -}; - -static int init_wireless(struct platform_device *sdev) -{ - int retval; - - rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN, - &rfkill_ops, NULL); - if (!rfk) - return -ENOMEM; - - retval = rfkill_register(rfk); - if (retval) { - rfkill_destroy(rfk); - return -ENODEV; - } - - return 0; -} - -static void destroy_wireless(void) -{ - rfkill_unregister(rfk); - rfkill_destroy(rfk); -} - -static ssize_t get_performance_level(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct sabi_retval sretval; - int retval; - int i; - - /* Read the state */ - retval = sabi_get_command(sabi_config->commands.get_performance_level, - &sretval); - if (retval) - return retval; - - /* The logic is backwards, yeah, lots of fun... */ - for (i = 0; sabi_config->performance_levels[i].name; ++i) { - if (sretval.retval[0] == sabi_config->performance_levels[i].value) - return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); - } - return sprintf(buf, "%s\n", "unknown"); -} - -static ssize_t set_performance_level(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - if (count >= 1) { - int i; - for (i = 0; sabi_config->performance_levels[i].name; ++i) { - const struct sabi_performance_level *level = - &sabi_config->performance_levels[i]; - if (!strncasecmp(level->name, buf, strlen(level->name))) { - sabi_set_command(sabi_config->commands.set_performance_level, - level->value); - break; - } - } - if (!sabi_config->performance_levels[i].name) - return -EINVAL; - } - return count; -} -static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, - get_performance_level, set_performance_level); - - -static int __init dmi_check_cb(const struct dmi_system_id *id) -{ - pr_info("found laptop model '%s'\n", - id->ident); - return 0; -} - -static struct dmi_system_id __initdata samsung_dmi_table[] = { - { - .ident = "N128", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N128"), - DMI_MATCH(DMI_BOARD_NAME, "N128"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N130", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N130"), - DMI_MATCH(DMI_BOARD_NAME, "N130"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "X125", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X125"), - DMI_MATCH(DMI_BOARD_NAME, "X125"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "X120/X170", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"), - DMI_MATCH(DMI_BOARD_NAME, "X120/X170"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "NC10", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), - DMI_MATCH(DMI_BOARD_NAME, "NC10"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "NP-Q45", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), - DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "X360", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X360"), - DMI_MATCH(DMI_BOARD_NAME, "X360"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R410 Plus", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), - DMI_MATCH(DMI_BOARD_NAME, "R460"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R518", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R518"), - DMI_MATCH(DMI_BOARD_NAME, "R518"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R519/R719", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"), - DMI_MATCH(DMI_BOARD_NAME, "R519/R719"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N150/N210/N220/N230", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), - DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N150P/N210P/N220P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"), - DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R530/R730", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), - DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "NF110/NF210/NF310", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), - DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N145P/N250P/N260P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), - DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R70/R71", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"), - DMI_MATCH(DMI_BOARD_NAME, "R70/R71"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "P460", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "P460"), - DMI_MATCH(DMI_BOARD_NAME, "P460"), - }, - .callback = dmi_check_cb, - }, - { }, -}; -MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); - -static int find_signature(void __iomem *memcheck, const char *testStr) -{ - int i = 0; - int loca; - - for (loca = 0; loca < 0xffff; loca++) { - char temp = readb(memcheck + loca); - - if (temp == testStr[i]) { - if (i == strlen(testStr)-1) - break; - ++i; - } else { - i = 0; - } - } - return loca; -} - -static int __init samsung_init(void) -{ - struct backlight_properties props; - struct sabi_retval sretval; - unsigned int ifaceP; - int i; - int loca; - int retval; - - mutex_init(&sabi_mutex); - - if (!force && !dmi_check_system(samsung_dmi_table)) - return -ENODEV; - - f0000_segment = ioremap_nocache(0xf0000, 0xffff); - if (!f0000_segment) { - pr_err("Can't map the segment at 0xf0000\n"); - return -EINVAL; - } - - /* Try to find one of the signatures in memory to find the header */ - for (i = 0; sabi_configs[i].test_string != 0; ++i) { - sabi_config = &sabi_configs[i]; - loca = find_signature(f0000_segment, sabi_config->test_string); - if (loca != 0xffff) - break; - } - - if (loca == 0xffff) { - pr_err("This computer does not support SABI\n"); - goto error_no_signature; - } - - /* point to the SMI port Number */ - loca += 1; - sabi = (f0000_segment + loca); - - if (debug) { - printk(KERN_DEBUG "This computer supports SABI==%x\n", - loca + 0xf0000 - 6); - printk(KERN_DEBUG "SABI header:\n"); - printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", - readw(sabi + sabi_config->header_offsets.port)); - printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", - readb(sabi + sabi_config->header_offsets.iface_func)); - printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", - readb(sabi + sabi_config->header_offsets.en_mem)); - printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", - readb(sabi + sabi_config->header_offsets.re_mem)); - printk(KERN_DEBUG " SABI data offset = 0x%04x\n", - readw(sabi + sabi_config->header_offsets.data_offset)); - printk(KERN_DEBUG " SABI data segment = 0x%04x\n", - readw(sabi + sabi_config->header_offsets.data_segment)); - } - - /* Get a pointer to the SABI Interface */ - ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; - ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; - sabi_iface = ioremap_nocache(ifaceP, 16); - if (!sabi_iface) { - pr_err("Can't remap %x\n", ifaceP); - goto exit; - } - if (debug) { - printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP); - printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface); - - test_backlight(); - test_wireless(); - - retval = sabi_get_command(sabi_config->commands.get_brightness, - &sretval); - printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); - } - - /* Turn on "Linux" mode in the BIOS */ - if (sabi_config->commands.set_linux != 0xff) { - retval = sabi_set_command(sabi_config->commands.set_linux, - 0x81); - if (retval) { - pr_warn("Linux mode was not set!\n"); - goto error_no_platform; - } - } - - /* knock up a platform device to hang stuff off of */ - sdev = platform_device_register_simple("samsung", -1, NULL, 0); - if (IS_ERR(sdev)) - goto error_no_platform; - - /* create a backlight device to talk to this one */ - memset(&props, 0, sizeof(struct backlight_properties)); - props.type = BACKLIGHT_PLATFORM; - props.max_brightness = sabi_config->max_brightness; - backlight_device = backlight_device_register("samsung", &sdev->dev, - NULL, &backlight_ops, - &props); - if (IS_ERR(backlight_device)) - goto error_no_backlight; - - backlight_device->props.brightness = read_brightness(); - backlight_device->props.power = FB_BLANK_UNBLANK; - backlight_update_status(backlight_device); - - retval = init_wireless(sdev); - if (retval) - goto error_no_rfk; - - retval = device_create_file(&sdev->dev, &dev_attr_performance_level); - if (retval) - goto error_file_create; - -exit: - return 0; - -error_file_create: - destroy_wireless(); - -error_no_rfk: - backlight_device_unregister(backlight_device); - -error_no_backlight: - platform_device_unregister(sdev); - -error_no_platform: - iounmap(sabi_iface); - -error_no_signature: - iounmap(f0000_segment); - return -EINVAL; -} - -static void __exit samsung_exit(void) -{ - /* Turn off "Linux" mode in the BIOS */ - if (sabi_config->commands.set_linux != 0xff) - sabi_set_command(sabi_config->commands.set_linux, 0x80); - - device_remove_file(&sdev->dev, &dev_attr_performance_level); - backlight_device_unregister(backlight_device); - destroy_wireless(); - iounmap(sabi_iface); - iounmap(f0000_segment); - platform_device_unregister(sdev); -} - -module_init(samsung_init); -module_exit(samsung_exit); - -MODULE_AUTHOR("Greg Kroah-Hartman "); -MODULE_DESCRIPTION("Samsung Backlight driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 41b6e51188e4..006489d82dc3 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI default y if ARCH_VT8500 default y if PLAT_SPEAR default y if ARCH_MSM + default y if MICROBLAZE default PCI # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index a3d2e2399655..96fdfb815f89 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, break; case USB_ENDPOINT_XFER_INT: type = "Int."; - if (speed == USB_SPEED_HIGH) + if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER) interval = 1 << (desc->bInterval - 1); else interval = desc->bInterval; @@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, default: /* "can't happen" */ return start; } - interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; + interval *= (speed == USB_SPEED_HIGH || + speed == USB_SPEED_SUPER) ? 125 : 1000; if (interval % 1000) unit = 'u'; else { @@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, if (level == 0) { int max; - /* high speed reserves 80%, full/low reserves 90% */ - if (usbdev->speed == USB_SPEED_HIGH) + /* super/high speed reserves 80%, full/low reserves 90% */ + if (usbdev->speed == USB_SPEED_HIGH || + usbdev->speed == USB_SPEED_SUPER) max = 800; else max = FRAME_TIME_MAX_USECS_ALLOC; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8eed05d23838..77a7faec8d78 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface, /* Streams only apply to bulk endpoints. */ for (i = 0; i < num_eps; i++) - if (!usb_endpoint_xfer_bulk(&eps[i]->desc)) + if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc)) return; hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 8fb754916c67..93720bdc9efd 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) } /* see 7.1.7.6 */ - status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); + /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0 + * external hub. + * FIXME: this is a temporary workaround to make the system able + * to suspend/resume. + */ + if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev)) + status = clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_POWER); + else + status = set_port_feature(hub->hdev, port1, + USB_PORT_FEAT_SUSPEND); if (status) { dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", port1, status); diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c index 9abecfddb27d..0111f8a9cf7f 100644 --- a/drivers/usb/gadget/f_audio.c +++ b/drivers/usb/gadget/f_audio.c @@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f) struct f_audio *audio = func_to_audio(f); usb_free_descriptors(f->descriptors); + usb_free_descriptors(f->hs_descriptors); kfree(audio); } diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 95dd4662d6a8..b3c304290150 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c @@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f) static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) { + struct sk_buff *skb = (struct sk_buff *)req->context; + + dev_kfree_skb_any(skb); } /* @@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port, skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); - skb_copy_bits(skb, 0, req->buf, skb->len); - req->length = skb->len; + skb_copy_bits(skb2, 0, req->buf, skb2->len); + req->length = skb2->len; req->complete = eem_cmd_complete; req->zero = 1; + req->context = skb2; if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) DBG(cdev, "echo response queue fail\n"); break; diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index aee7e3c53c38..36613b37c504 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame) static int txcomplete(struct qe_ep *ep, unsigned char restart) { if (ep->tx_req != NULL) { + struct qe_req *req = ep->tx_req; + unsigned zlp = 0, last_len = 0; + + last_len = min_t(unsigned, req->req.length - ep->sent, + ep->ep.maxpacket); + if (!restart) { int asent = ep->last; ep->sent += asent; @@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart) ep->last = 0; } + /* zlp needed when req->re.zero is set */ + if (req->req.zero) { + if (last_len == 0 || + (req->req.length % ep->ep.maxpacket) != 0) + zlp = 0; + else + zlp = 1; + } else + zlp = 0; + /* a request already were transmitted completely */ - if ((ep->tx_req->req.length - ep->sent) <= 0) { - ep->tx_req->req.actual = (unsigned int)ep->sent; + if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { done(ep, ep->tx_req, 0); ep->tx_req = NULL; ep->last = 0; @@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame) buf = (u8 *)ep->tx_req->req.buf + ep->sent; if (buf && size) { ep->last = size; + ep->tx_req->req.actual += size; frame_set_data(frame, buf); frame_set_length(frame, size); frame_set_status(frame, FRAME_OK); diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 3ed73f49cf18..a01383f71f38 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) /* halt any endpoint by doing a "wrong direction" i/o call */ if (usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) + if (usb_endpoint_xfer_isoc(&data->desc)) { + mutex_unlock(&data->lock); return -EINVAL; + } DBG (data->dev, "%s halt\n", data->name); spin_lock_irq (&data->dev->lock); if (likely (data->ep != NULL)) diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c index 3e4b35e50c24..68dbcc3e4cc2 100644 --- a/drivers/usb/gadget/pch_udc.c +++ b/drivers/usb/gadget/pch_udc.c @@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq, return -EINVAL; if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) return -ESHUTDOWN; - spin_lock_irqsave(&ep->dev->lock, iflags); + spin_lock_irqsave(&dev->lock, iflags); /* map the buffer for dma */ if (usbreq->length && ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { @@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq, DMA_FROM_DEVICE); } else { req->buf = kzalloc(usbreq->length, GFP_ATOMIC); - if (!req->buf) - return -ENOMEM; + if (!req->buf) { + retval = -ENOMEM; + goto probe_end; + } if (ep->in) { memcpy(req->buf, usbreq->buf, usbreq->length); req->dma = dma_map_single(&dev->pdev->dev, diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 015118535f77..6dcc1f68fa60 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597) if (dvsq == DS_DFLT) { /* bus reset */ + spin_unlock(&r8a66597->lock); r8a66597->driver->disconnect(&r8a66597->gadget); + spin_lock(&r8a66597->lock); r8a66597_update_usb_speed(r8a66597); } if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG) diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 98ded66e8d3f..42abd0f603bf 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) static void scan_async (struct ehci_hcd *ehci) { + bool stopped; struct ehci_qh *qh; enum ehci_timer_action action = TIMER_IO_WATCHDOG; ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); timer_action_done (ehci, TIMER_ASYNC_SHRINK); rescan: + stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state); qh = ehci->async->qh_next.qh; if (likely (qh != NULL)) { do { /* clean any finished work for this qh */ - if (!list_empty (&qh->qtd_list) - && qh->stamp != ehci->stamp) { + if (!list_empty(&qh->qtd_list) && (stopped || + qh->stamp != ehci->stamp)) { int temp; /* unlinks could happen here; completion * reporting drops the lock. rescan using * the latest schedule, but don't rescan - * qhs we already finished (no looping). + * qhs we already finished (no looping) + * unless the controller is stopped. */ qh = qh_get (qh); qh->stamp = ehci->stamp; @@ -1285,9 +1288,9 @@ rescan: */ if (list_empty(&qh->qtd_list) && qh->qh_state == QH_STATE_LINKED) { - if (!ehci->reclaim - && ((ehci->stamp - qh->stamp) & 0x1fff) - >= (EHCI_SHRINK_FRAMES * 8)) + if (!ehci->reclaim && (stopped || + ((ehci->stamp - qh->stamp) & 0x1fff) + >= EHCI_SHRINK_FRAMES * 8)) start_unlink_async(ehci, qh); else action = TIMER_ASYNC_SHRINK; diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index f50e84ac570a..795345ad45e6 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) } dev_err(hcd->self.controller, - "%s: Can not allocate %lu bytes of memory\n" + "%s: Cannot allocate %zu bytes of memory\n" "Current memory map:\n", __func__, qtd->length); for (i = 0; i < BLOCKS; i++) { diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index 17a6043c1fa0..958d985f2951 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c @@ -33,7 +33,7 @@ #ifdef __LITTLE_ENDIAN #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C) -#elif __BIG_ENDIAN +#elif defined(__BIG_ENDIAN) #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \ USBH_ENABLE_BE) #else diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 1d586d4f7b56..9b166d70ae91 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void) { u8 rev = 0; unsigned long flags; + struct amd_chipset_info info; + int ret; spin_lock_irqsave(&amd_lock, flags); - amd_chipset.probe_count++; /* probe only once */ - if (amd_chipset.probe_count > 1) { + if (amd_chipset.probe_count > 0) { + amd_chipset.probe_count++; spin_unlock_irqrestore(&amd_lock, flags); return amd_chipset.probe_result; } + memset(&info, 0, sizeof(info)); + spin_unlock_irqrestore(&amd_lock, flags); - amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); - if (amd_chipset.smbus_dev) { - rev = amd_chipset.smbus_dev->revision; + info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); + if (info.smbus_dev) { + rev = info.smbus_dev->revision; if (rev >= 0x40) - amd_chipset.sb_type = 1; + info.sb_type = 1; else if (rev >= 0x30 && rev <= 0x3b) - amd_chipset.sb_type = 3; + info.sb_type = 3; } else { - amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, - 0x780b, NULL); - if (!amd_chipset.smbus_dev) { - spin_unlock_irqrestore(&amd_lock, flags); - return 0; + info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x780b, NULL); + if (!info.smbus_dev) { + ret = 0; + goto commit; } - rev = amd_chipset.smbus_dev->revision; + + rev = info.smbus_dev->revision; if (rev >= 0x11 && rev <= 0x18) - amd_chipset.sb_type = 2; + info.sb_type = 2; } - if (amd_chipset.sb_type == 0) { - if (amd_chipset.smbus_dev) { - pci_dev_put(amd_chipset.smbus_dev); - amd_chipset.smbus_dev = NULL; + if (info.sb_type == 0) { + if (info.smbus_dev) { + pci_dev_put(info.smbus_dev); + info.smbus_dev = NULL; } - spin_unlock_irqrestore(&amd_lock, flags); - return 0; + ret = 0; + goto commit; } - amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); - if (amd_chipset.nb_dev) { - amd_chipset.nb_type = 1; + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); + if (info.nb_dev) { + info.nb_type = 1; } else { - amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, - 0x1510, NULL); - if (amd_chipset.nb_dev) { - amd_chipset.nb_type = 2; - } else { - amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, - 0x9600, NULL); - if (amd_chipset.nb_dev) - amd_chipset.nb_type = 3; + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); + if (info.nb_dev) { + info.nb_type = 2; + } else { + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x9600, NULL); + if (info.nb_dev) + info.nb_type = 3; } } - amd_chipset.probe_result = 1; + ret = info.probe_result = 1; printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); - spin_unlock_irqrestore(&amd_lock, flags); - return amd_chipset.probe_result; +commit: + + spin_lock_irqsave(&amd_lock, flags); + if (amd_chipset.probe_count > 0) { + /* race - someone else was faster - drop devices */ + + /* Mark that we where here */ + amd_chipset.probe_count++; + ret = amd_chipset.probe_result; + + spin_unlock_irqrestore(&amd_lock, flags); + + if (info.nb_dev) + pci_dev_put(info.nb_dev); + if (info.smbus_dev) + pci_dev_put(info.smbus_dev); + + } else { + /* no race - commit the result */ + info.probe_count++; + amd_chipset = info; + spin_unlock_irqrestore(&amd_lock, flags); + } + + return ret; } EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); @@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); void usb_amd_dev_put(void) { + struct pci_dev *nb, *smbus; unsigned long flags; spin_lock_irqsave(&amd_lock, flags); @@ -294,20 +322,23 @@ void usb_amd_dev_put(void) return; } - if (amd_chipset.nb_dev) { - pci_dev_put(amd_chipset.nb_dev); - amd_chipset.nb_dev = NULL; - } - if (amd_chipset.smbus_dev) { - pci_dev_put(amd_chipset.smbus_dev); - amd_chipset.smbus_dev = NULL; - } + /* save them to pci_dev_put outside of spinlock */ + nb = amd_chipset.nb_dev; + smbus = amd_chipset.smbus_dev; + + amd_chipset.nb_dev = NULL; + amd_chipset.smbus_dev = NULL; amd_chipset.nb_type = 0; amd_chipset.sb_type = 0; amd_chipset.isoc_reqs = 0; amd_chipset.probe_result = 0; spin_unlock_irqrestore(&amd_lock, flags); + + if (nb) + pci_dev_put(nb); + if (smbus) + pci_dev_put(smbus); } EXPORT_SYMBOL_GPL(usb_amd_dev_put); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index a003e79aacdc..627f3438028c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, * Skip ports that don't have known speeds, or have duplicate * Extended Capabilities port speed entries. */ - if (port_speed == 0 || port_speed == -1) + if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) continue; /* @@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud return 0; } +/* + * Convert interval expressed as 2^(bInterval - 1) == interval into + * straight exponent value 2^n == interval. + * + */ +static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; + if (interval != ep->desc.bInterval - 1) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval); + + return interval; +} + +/* + * Convert bInterval expressed in frames (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +static unsigned int xhci_parse_frame_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = fls(8 * ep->desc.bInterval) - 1; + interval = clamp_val(interval, 3, 10); + if ((1 << interval) != 8 * ep->desc.bInterval) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + 8 * ep->desc.bInterval); + + return interval; +} + /* Return the polling or NAK interval. * * The polling interval is expressed in "microframes". If xHCI's Interval field @@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval * is set to 0. */ -static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, +static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { unsigned int interval = 0; @@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, case USB_SPEED_HIGH: /* Max NAK rate */ if (usb_endpoint_xfer_control(&ep->desc) || - usb_endpoint_xfer_bulk(&ep->desc)) + usb_endpoint_xfer_bulk(&ep->desc)) { interval = ep->desc.bInterval; + break; + } /* Fall through - SS and HS isoc/int have same decoding */ + case USB_SPEED_SUPER: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - if (ep->desc.bInterval == 0) - interval = 0; - else - interval = ep->desc.bInterval - 1; - if (interval > 15) - interval = 15; - if (interval != ep->desc.bInterval + 1) - dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", - ep->desc.bEndpointAddress, 1 << interval); + usb_endpoint_xfer_isoc(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); } break; - /* Convert bInterval (in 1-255 frames) to microframes and round down to - * nearest power of 2. - */ + case USB_SPEED_FULL: + if (usb_endpoint_xfer_int(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); + break; + } + /* + * Fall through for isochronous endpoint interval decoding + * since it uses the same rules as low speed interrupt + * endpoints. + */ + case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - interval = fls(8*ep->desc.bInterval) - 1; - if (interval > 10) - interval = 10; - if (interval < 3) - interval = 3; - if ((1 << interval) != 8*ep->desc.bInterval) - dev_warn(&udev->dev, - "ep %#x - rounding interval" - " to %d microframes, " - "ep desc says %d microframes\n", - ep->desc.bEndpointAddress, - 1 << interval, - 8*ep->desc.bInterval); + usb_endpoint_xfer_isoc(&ep->desc)) { + + interval = xhci_parse_frame_interval(udev, ep); } break; + default: BUG(); } @@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, * transaction opportunities per microframe", but that goes in the Max Burst * endpoint context field. */ -static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, +static u32 xhci_get_endpoint_mult(struct usb_device *udev, struct usb_host_endpoint *ep) { if (udev->speed != USB_SPEED_SUPER || @@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, return ep->ss_ep_comp.bmAttributes; } -static inline u32 xhci_get_endpoint_type(struct usb_device *udev, +static u32 xhci_get_endpoint_type(struct usb_device *udev, struct usb_host_endpoint *ep) { int in; @@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev, * Basically, this is the maxpacket size, multiplied by the burst size * and mult size. */ -static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, +static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint *ep) { @@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, * found a similar duplicate. */ if (xhci->port_array[i] != major_revision && - xhci->port_array[i] != (u8) -1) { + xhci->port_array[i] != DUPLICATE_ENTRY) { if (xhci->port_array[i] == 0x03) xhci->num_usb3_ports--; else xhci->num_usb2_ports--; - xhci->port_array[i] = (u8) -1; + xhci->port_array[i] = DUPLICATE_ENTRY; } /* FIXME: Should we disable the port? */ continue; @@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) for (i = 0; i < num_ports; i++) { if (xhci->port_array[i] == 0x03 || xhci->port_array[i] == 0 || - xhci->port_array[i] == -1) + xhci->port_array[i] == DUPLICATE_ENTRY) continue; xhci->usb2_ports[port_index] = diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ceea9f33491c..a10494c2f3c7 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; + /* AMD PLL quirk */ + if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) + xhci->quirks |= XHCI_AMD_PLL_FIX; + /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index cfc1ad92473f..7437386a9a50 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, /* Does this link TRB point to the first segment in a ring, * or was the previous TRB the last TRB on the last segment in the ERST? */ -static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, +static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *seg, union xhci_trb *trb) { if (ring == xhci->event_ring) @@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring * segment? I.e. would the updated event TRB pointer step off the end of the * event seg? */ -static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, +static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *seg, union xhci_trb *trb) { if (ring == xhci->event_ring) @@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); } -static inline int enqueue_is_link_trb(struct xhci_ring *ring) +static int enqueue_is_link_trb(struct xhci_ring *ring) { struct xhci_link_trb *link = &ring->enqueue->link; return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); @@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, ep->ep_state |= SET_DEQ_PENDING; } -static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, +static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { ep->ep_state &= ~EP_HALT_PENDING; @@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, /* Only giveback urb when this is the last td in urb */ if (urb_priv->td_cnt == urb_priv->length) { + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); + } + } usb_hcd_unlink_urb_from_ep(hcd, urb); xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); @@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, * Skip ports that don't have known speeds, or have duplicate * Extended Capabilities port speed entries. */ - if (port_speed == 0 || port_speed == -1) + if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) continue; /* @@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci, u8 major_revision; struct xhci_bus_state *bus_state; u32 __iomem **port_array; + bool bogus_port_status = false; /* Port status change events always have a successful completion code */ if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { @@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci, max_ports = HCS_MAX_PORTS(xhci->hcs_params1); if ((port_id <= 0) || (port_id > max_ports)) { xhci_warn(xhci, "Invalid port id %d\n", port_id); + bogus_port_status = true; goto cleanup; } @@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci, xhci_warn(xhci, "Event for port %u not in " "Extended Capabilities, ignoring.\n", port_id); + bogus_port_status = true; goto cleanup; } - if (major_revision == (u8) -1) { + if (major_revision == DUPLICATE_ENTRY) { xhci_warn(xhci, "Event for port %u duplicated in" "Extended Capabilities, ignoring.\n", port_id); + bogus_port_status = true; goto cleanup; } @@ -1335,6 +1346,13 @@ cleanup: /* Update event ring dequeue pointer before dropping the lock */ inc_deq(xhci, xhci->event_ring, true); + /* Don't make the USB core poll the roothub if we got a bad port status + * change event. Besides, at that point we can't tell which roothub + * (USB 2.0 or USB 3.0) to kick. + */ + if (bogus_port_status) + return; + spin_unlock(&xhci->lock); /* Pass this up to the core */ usb_hcd_poll_rh_status(hcd); @@ -1554,8 +1572,17 @@ td_cleanup: urb_priv->td_cnt++; /* Giveback the urb when all the tds are completed */ - if (urb_priv->td_cnt == urb_priv->length) + if (urb_priv->td_cnt == urb_priv->length) { ret = 1; + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs + == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); + } + } + } } return ret; @@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, struct urb_priv *urb_priv; int idx; int len = 0; - int skip_td = 0; union xhci_trb *cur_trb; struct xhci_segment *cur_seg; + struct usb_iso_packet_descriptor *frame; u32 trb_comp_code; + bool skip_td = false; ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); trb_comp_code = GET_COMP_CODE(event->transfer_len); urb_priv = td->urb->hcpriv; idx = urb_priv->td_cnt; + frame = &td->urb->iso_frame_desc[idx]; - if (ep->skip) { - /* The transfer is partly done */ - *status = -EXDEV; - td->urb->iso_frame_desc[idx].status = -EXDEV; - } else { - /* handle completion code */ - switch (trb_comp_code) { - case COMP_SUCCESS: - td->urb->iso_frame_desc[idx].status = 0; - xhci_dbg(xhci, "Successful isoc transfer!\n"); - break; - case COMP_SHORT_TX: - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - td->urb->iso_frame_desc[idx].status = - -EREMOTEIO; - else - td->urb->iso_frame_desc[idx].status = 0; - break; - case COMP_BW_OVER: - td->urb->iso_frame_desc[idx].status = -ECOMM; - skip_td = 1; - break; - case COMP_BUFF_OVER: - case COMP_BABBLE: - td->urb->iso_frame_desc[idx].status = -EOVERFLOW; - skip_td = 1; - break; - case COMP_STALL: - td->urb->iso_frame_desc[idx].status = -EPROTO; - skip_td = 1; - break; - case COMP_STOP: - case COMP_STOP_INVAL: - break; - default: - td->urb->iso_frame_desc[idx].status = -1; - break; - } + /* handle completion code */ + switch (trb_comp_code) { + case COMP_SUCCESS: + frame->status = 0; + xhci_dbg(xhci, "Successful isoc transfer!\n"); + break; + case COMP_SHORT_TX: + frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? + -EREMOTEIO : 0; + break; + case COMP_BW_OVER: + frame->status = -ECOMM; + skip_td = true; + break; + case COMP_BUFF_OVER: + case COMP_BABBLE: + frame->status = -EOVERFLOW; + skip_td = true; + break; + case COMP_STALL: + frame->status = -EPROTO; + skip_td = true; + break; + case COMP_STOP: + case COMP_STOP_INVAL: + break; + default: + frame->status = -1; + break; } - /* calc actual length */ - if (ep->skip) { - td->urb->iso_frame_desc[idx].actual_length = 0; - /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring, false); - inc_deq(xhci, ep_ring, false); - return finish_td(xhci, td, event_trb, event, ep, status, true); - } - - if (trb_comp_code == COMP_SUCCESS || skip_td == 1) { - td->urb->iso_frame_desc[idx].actual_length = - td->urb->iso_frame_desc[idx].length; - td->urb->actual_length += - td->urb->iso_frame_desc[idx].length; + if (trb_comp_code == COMP_SUCCESS || skip_td) { + frame->actual_length = frame->length; + td->urb->actual_length += frame->length; } else { for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; @@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, TRB_LEN(event->transfer_len); if (trb_comp_code != COMP_STOP_INVAL) { - td->urb->iso_frame_desc[idx].actual_length = len; + frame->actual_length = len; td->urb->actual_length += len; } } @@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, return finish_td(xhci, td, event_trb, event, ep, status, false); } +static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_transfer_event *event, + struct xhci_virt_ep *ep, int *status) +{ + struct xhci_ring *ep_ring; + struct urb_priv *urb_priv; + struct usb_iso_packet_descriptor *frame; + int idx; + + ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); + urb_priv = td->urb->hcpriv; + idx = urb_priv->td_cnt; + frame = &td->urb->iso_frame_desc[idx]; + + /* The transfer is partly done */ + *status = -EXDEV; + frame->status = -EXDEV; + + /* calc actual length */ + frame->actual_length = 0; + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + inc_deq(xhci, ep_ring, false); + inc_deq(xhci, ep_ring, false); + + return finish_td(xhci, td, NULL, event, ep, status, true); +} + /* * Process bulk and interrupt tds, update urb status and actual_length. */ @@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci, } td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); + /* Is this a TRB in the currently executing TD? */ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, td->last_trb, event_dma); - if (event_seg && ep->skip) { + if (!event_seg) { + if (!ep->skip || + !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + /* HC is busted, give up! */ + xhci_err(xhci, + "ERROR Transfer event TRB DMA ptr not " + "part of current TD\n"); + return -ESHUTDOWN; + } + + ret = skip_isoc_td(xhci, td, event, ep, &status); + goto cleanup; + } + + if (ep->skip) { xhci_dbg(xhci, "Found td. Clear skip flag.\n"); ep->skip = false; } - if (!event_seg && - (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) { - /* HC is busted, give up! */ - xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " - "part of current TD\n"); - return -ESHUTDOWN; - } - if (event_seg) { - event_trb = &event_seg->trbs[(event_dma - - event_seg->dma) / sizeof(*event_trb)]; - /* - * No-op TRB should not trigger interrupts. - * If event_trb is a no-op TRB, it means the - * corresponding TD has been cancelled. Just ignore - * the TD. - */ - if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) - == TRB_TYPE(TRB_TR_NOOP)) { - xhci_dbg(xhci, "event_trb is a no-op TRB. " - "Skip it\n"); - goto cleanup; - } + event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / + sizeof(*event_trb)]; + /* + * No-op TRB should not trigger interrupts. + * If event_trb is a no-op TRB, it means the + * corresponding TD has been cancelled. Just ignore + * the TD. + */ + if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_TR_NOOP)) { + xhci_dbg(xhci, + "event_trb is a no-op TRB. Skip it\n"); + goto cleanup; } /* Now update the urb's actual_length and give back to @@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } } + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_disable(); + } + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, start_cycle, start_trb); return 0; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 196e0181b2ed..81b976e45880 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd) del_timer_sync(&xhci->event_ring_timer); #endif + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_dev_put(); + xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); @@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* If restore operation fails, re-initialize the HC during resume */ if ((temp & STS_SRE) || hibernated) { - usb_root_hub_lost_power(hcd->self.root_hub); + /* Let the USB core know _both_ roothubs lost power. */ + usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); + usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); @@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Everything but endpoint 0 is disabled, so free or cache the rings. */ last_freed_endpoint = 1; for (i = 1; i < 31; ++i) { - if (!virt_dev->eps[i].ring) - continue; - xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); - last_freed_endpoint = i; + struct xhci_virt_ep *ep = &virt_dev->eps[i]; + + if (ep->ep_state & EP_HAS_STREAMS) { + xhci_free_stream_info(xhci, ep->stream_info); + ep->stream_info = NULL; + ep->ep_state &= ~EP_HAS_STREAMS; + } + + if (ep->ring) { + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + last_freed_endpoint = i; + } } xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 07e263063e37..ba1be6b7cc6d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -30,6 +30,7 @@ /* Code sharing between pci-quirks and xhci hcd */ #include "xhci-ext-caps.h" +#include "pci-quirks.h" /* xHCI PCI Configuration Registers */ #define XHCI_SBRN_OFFSET (0x60) @@ -232,7 +233,7 @@ struct xhci_op_regs { * notification type that matches a bit set in this bit field. */ #define DEV_NOTE_MASK (0xffff) -#define ENABLE_DEV_NOTE(x) (1 << x) +#define ENABLE_DEV_NOTE(x) (1 << (x)) /* Most of the device notification types should only be used for debug. * SW does need to pay attention to function wake notifications. */ @@ -348,6 +349,9 @@ struct xhci_op_regs { /* Initiate a warm port reset - complete when PORT_WRC is '1' */ #define PORT_WR (1 << 31) +/* We mark duplicate entries with -1 */ +#define DUPLICATE_ENTRY ((u8)(-1)) + /* Port Power Management Status and Control - port_power_base bitmasks */ /* Inactivity timer value for transitions into U1, in microseconds. * Timeout can be up to 127us. 0xFF means an infinite timeout. @@ -601,11 +605,11 @@ struct xhci_ep_ctx { #define EP_STATE_STOPPED 3 #define EP_STATE_ERROR 4 /* Mult - Max number of burtst within an interval, in EP companion desc. */ -#define EP_MULT(p) ((p & 0x3) << 8) +#define EP_MULT(p) (((p) & 0x3) << 8) /* bits 10:14 are Max Primary Streams */ /* bit 15 is Linear Stream Array */ /* Interval - period between requests to an endpoint - 125u increments. */ -#define EP_INTERVAL(p) ((p & 0xff) << 16) +#define EP_INTERVAL(p) (((p) & 0xff) << 16) #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) #define EP_MAXPSTREAMS_MASK (0x1f << 10) #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) @@ -1276,6 +1280,7 @@ struct xhci_hcd { #define XHCI_LINK_TRB_QUIRK (1 << 0) #define XHCI_RESET_EP_QUIRK (1 << 1) #define XHCI_NEC_HOST (1 << 2) +#define XHCI_AMD_PLL_FIX (1 << 3) /* There are two roothubs to keep track of bus suspend info for */ struct xhci_bus_state bus_state[2]; /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 4cbb7e4b368d..74073b363c30 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -14,7 +14,7 @@ config USB_MUSB_HDRC select TWL4030_USB if MACH_OMAP_3430SDP select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS - tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' + bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' help Say Y here if your system has a dual role high speed USB controller based on the Mentor Graphics silicon IP. Then @@ -30,8 +30,8 @@ config USB_MUSB_HDRC If you do not know what this is, please say N. - To compile this driver as a module, choose M here; the - module will be called "musb-hdrc". +# To compile this driver as a module, choose M here; the +# module will be called "musb-hdrc". choice prompt "Platform Glue Layer" diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 52312e8af213..8e2a1ff8a35a 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -21,6 +21,7 @@ #include #include "musb_core.h" +#include "musbhsdma.h" #include "blackfin.h" struct bfin_glue { @@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode) return -EIO; } +static int bfin_musb_adjust_channel_params(struct dma_channel *channel, + u16 packet_sz, u8 *mode, + dma_addr_t *dma_addr, u32 *len) +{ + struct musb_dma_channel *musb_channel = channel->private_data; + + /* + * Anomaly 05000450 might cause data corruption when using DMA + * MODE 1 transmits with short packet. So to work around this, + * we truncate all MODE 1 transfers down to a multiple of the + * max packet size, and then do the last short packet transfer + * (if there is any) using MODE 0. + */ + if (ANOMALY_05000450) { + if (musb_channel->transmit && *mode == 1) + *len = *len - (*len % packet_sz); + } + + return 0; +} + static void bfin_musb_reg_init(struct musb *musb) { if (ANOMALY_05000346) { @@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = { .vbus_status = bfin_musb_vbus_status, .set_vbus = bfin_musb_set_vbus, + + .adjust_channel_params = bfin_musb_adjust_channel_params, }; static u64 bfin_dmamask = DMA_BIT_MASK(32); diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index de55a3c3259a..ab434fbd8c35 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) length = min(n_bds * maxpacket, length); } - DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", + DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n", tx->index, maxpacket, rndis ? "rndis" : "transparent", n_bds, - addr, length); + (unsigned long long)addr, length); cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); @@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) length = min(n_bds * maxpacket, length); DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " - "dma 0x%x len %u %u/%u\n", + "dma 0x%llx len %u %u/%u\n", rx->index, maxpacket, onepacket ? (is_rndis ? "rndis" : "onepacket") @@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) musb_readl(tibase, DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) & 0xffff, - addr, length, rx->channel.actual_len, rx->buf_len); + (unsigned long long)addr, length, + rx->channel.actual_len, rx->buf_len); /* only queue one segment at a time, since the hardware prevents * correct queue shutdown after unexpected short packets @@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) if (!completed && (bd->hw_options & CPPI_OWN_SET)) break; - DBG(5, "C/RXBD %08x: nxt %08x buf %08x " + DBG(5, "C/RXBD %llx: nxt %08x buf %08x " "off.len %08x opt.len %08x (%d)\n", - bd->dma, bd->hw_next, bd->hw_bufp, + (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp, bd->hw_off_len, bd->hw_options, rx->channel.actual_len); @@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) musb_ep_select(cppi->mregs, rx->index + 1); csr = musb_readw(regs, MUSB_RXCSR); if (csr & MUSB_RXCSR_DMAENAB) { - DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", + DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n", rx->index, rx->head, rx->tail, rx->last_processed - ? rx->last_processed->dma + ? (unsigned long long) + rx->last_processed->dma : 0, completed ? ", completed" : "", csr); @@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); - if (!tx && !rx) + if (!tx && !rx) { + if (cppi->irq) + spin_unlock_irqrestore(&musb->lock, flags); return IRQ_NONE; + } DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); @@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) */ if (NULL == bd) { DBG(1, "null BD\n"); - tx_ram->tx_complete = 0; + musb_writel(&tx_ram->tx_complete, 0, 0); continue; } @@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel) * compare mode by writing 1 to the tx_complete register. */ cppi_reset_tx(tx_ram, 1); - cppi_ch->head = 0; + cppi_ch->head = NULL; musb_writel(&tx_ram->tx_complete, 0, 1); cppi_dump_tx(5, cppi_ch, " (done teardown)"); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 630ae7f3cd4c..f10ff00ca09e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev) struct musb *musb = dev_to_musb(&pdev->dev); unsigned long flags; + pm_runtime_get_sync(musb->controller); spin_lock_irqsave(&musb->lock, flags); musb_platform_disable(musb); musb_generic_disable(musb); @@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev) musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_platform_exit(musb); + pm_runtime_put(musb->controller); /* FIXME power down */ } diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 4bd9e2145ee4..0e053b587960 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -261,6 +261,7 @@ enum musb_g_ep0_state { * @try_ilde: tries to idle the IP * @vbus_status: returns vbus status if possible * @set_vbus: forces vbus status + * @channel_program: pre check for standard dma channel_program func */ struct musb_platform_ops { int (*init)(struct musb *musb); @@ -274,6 +275,10 @@ struct musb_platform_ops { int (*vbus_status)(struct musb *musb); void (*set_vbus)(struct musb *musb, int on); + + int (*adjust_channel_params)(struct dma_channel *channel, + u16 packet_sz, u8 *mode, + dma_addr_t *dma_addr, u32 *len); }; /* diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 98519c5d8b5c..6dfbf9ffd7a6 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum) is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | - MUSB_TXCSR_TXPKTRDY); + MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); musb_writew(epio, MUSB_TXCSR, csr); /* Ensure writebuffer is empty. */ csr = musb_readw(epio, MUSB_TXCSR); @@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) } /* if the hardware doesn't have the request, easy ... */ - if (musb_ep->req_list.next != &request->list || musb_ep->busy) + if (musb_ep->req_list.next != &req->list || musb_ep->busy) musb_g_giveback(musb_ep, request, -ECONNRESET); /* ... else abort the dma transfer ... */ diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 0144a2d481fd..d281792db05c 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel, BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); + /* Let targets check/tweak the arguments */ + if (musb->ops->adjust_channel_params) { + int ret = musb->ops->adjust_channel_params(channel, + packet_sz, &mode, &dma_addr, &len); + if (ret) + return ret; + } + /* * The DMA engine in RTL1.8 and above cannot handle * DMA addresses that are not aligned to a 4 byte boundary. diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 25cb8b0003b1..57a27fa954b4 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb, case USB_EVENT_VBUS: DBG(4, "VBUS Connect\n"); +#ifdef CONFIG_USB_GADGET_MUSB_HDRC if (musb->gadget_driver) pm_runtime_get_sync(musb->controller); - +#endif otg_init(musb->xceiv); break; diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index d6384e4aeef9..f7e04bf34a13 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev) } musb->dev.parent = &pdev->dev; + musb->dev.dma_mask = pdev->dev.dma_mask; + musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; glue->dev = &pdev->dev; glue->musb = musb; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index a973c7a29d6e..4de6ef0ae52a 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = { * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! */ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, @@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, { USB_DEVICE(OCT_VID, OCT_US101_PID) }, + { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), @@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c543e55bafba..efffc23723bd 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -300,6 +300,8 @@ * Hameg HO820 and HO870 interface (using VID 0x0403) */ #define HAMEG_HO820_PID 0xed74 +#define HAMEG_HO730_PID 0xed73 +#define HAMEG_HO720_PID 0xed72 #define HAMEG_HO870_PID 0xed71 /* @@ -572,6 +574,7 @@ /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ /* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ /* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ +#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */ #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ /* @@ -1141,3 +1144,12 @@ #define QIHARDWARE_VID 0x20B7 #define MILKYMISTONE_JTAGSERIAL_PID 0x0713 +/* + * CTI GmbH RS485 Converter http://www.cti-lean.com/ + */ +/* USB-485-Mini*/ +#define FTDI_CTI_MINI_PID 0xF608 +/* USB-Nano-485*/ +#define FTDI_CTI_NANO_PID 0xF60B + + diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 75c7f456eed5..d77ff0435896 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb); /* ONDA MT825UP HSDPA 14.2 modem */ #define ONDA_MT825UP 0x000b +/* Samsung products */ +#define SAMSUNG_VENDOR_ID 0x04e8 +#define SAMSUNG_PRODUCT_GT_B3730 0x6889 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 8858201eb1d3..54a9dab1f33b 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) ifnum = intf->desc.bInterfaceNumber; dbg("This Interface = %d", ifnum); - data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), + data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; @@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { dbg("QDL port found"); - if (serial->interface->num_altsetting == 1) - return 0; + if (serial->interface->num_altsetting == 1) { + retval = 0; /* Success */ + break; + } retval = usb_set_interface(serial->dev, ifnum, 1); if (retval < 0) { @@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) retval = -ENODEV; kfree(data); } - return retval; } break; @@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) "Could not set interface, error %d\n", retval); retval = -ENODEV; + kfree(data); } } else if (ifnum == 2) { dbg("Modem port found"); @@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) retval = -ENODEV; kfree(data); } - return retval; } else if (ifnum==3) { /* * NMEA (serial line 9600 8N1) @@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) "Could not set interface, error %d\n", retval); retval = -ENODEV; + kfree(data); } } break; @@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) dev_err(&serial->dev->dev, "unknown number of interfaces: %d\n", nintf); kfree(data); - return -ENODEV; + retval = -ENODEV; } + /* Set serial->private if not returning -ENODEV */ + if (retval != -ENODEV) + usb_set_serial_data(serial, data); return retval; } +static void qc_release(struct usb_serial *serial) +{ + struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); + + dbg("%s", __func__); + + /* Call usb_wwan release & free the private data allocated in qcprobe */ + usb_wwan_release(serial); + usb_set_serial_data(serial, NULL); + kfree(priv); +} + static struct usb_serial_driver qcdevice = { .driver = { .owner = THIS_MODULE, @@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = { .chars_in_buffer = usb_wwan_chars_in_buffer, .attach = usb_wwan_startup, .disconnect = usb_wwan_disconnect, - .release = usb_wwan_release, + .release = qc_release, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index a2e5b5100ab4..0f4e8c942f9e 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -1648,7 +1648,9 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) switch (val) { case CPUFREQ_PRECHANGE: - if (!fbi->overlay[0].usage && !fbi->overlay[1].usage) +#ifdef CONFIG_FB_PXA_OVERLAY + if (!(fbi->overlay[0].usage || fbi->overlay[1].usage)) +#endif set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); break; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 42d6c930cc87..33167b43ac7e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, unsigned long irqflags, const char *devname, void *dev_id) { - unsigned int irq; - int retval; + int irq, retval; irq = bind_evtchn_to_irq(evtchn); if (irq < 0) @@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { - unsigned int irq; - int retval; + int irq, retval; irq = bind_virq_to_irq(virq, cpu); if (irq < 0) diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 95143dd6904d..1ac94125bf93 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled) xen_mm_unpin_all(); } -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int xen_suspend(void *data) { struct suspend_info *si = data; @@ -173,7 +173,7 @@ out: #endif shutting_down = SHUTDOWN_INVALID; } -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ struct shutdown_handler { const char *command; @@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch, { "poweroff", do_poweroff }, { "halt", do_poweroff }, { "reboot", do_reboot }, -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS { "suspend", do_suspend }, #endif {NULL, NULL}, diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 0ee594569dcc..85b67ffa2a43 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -286,11 +286,9 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid) struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) { - int err, flags; + int err; struct p9_fid *fid; - struct v9fs_session_info *v9ses; - v9ses = v9fs_dentry2v9ses(dentry); fid = v9fs_fid_clone_with_uid(dentry, 0); if (IS_ERR(fid)) goto error_out; @@ -299,17 +297,8 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) * dirty pages. We always request for the open fid in read-write * mode so that a partial page write which result in page * read can work. - * - * we don't have a tsyncfs operation for older version - * of protocol. So make sure the write back fid is - * opened in O_SYNC mode. */ - if (!v9fs_proto_dotl(v9ses)) - flags = O_RDWR | O_SYNC; - else - flags = O_RDWR; - - err = p9_client_open(fid, flags); + err = p9_client_open(fid, O_RDWR); if (err < 0) { p9_client_clunk(fid); fid = ERR_PTR(err); diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index 9665c2b840e6..e5ebedfc5ed8 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -116,7 +116,6 @@ struct v9fs_session_info { struct list_head slist; /* list of sessions registered with v9fs */ struct backing_dev_info bdi; struct rw_semaphore rename_sem; - struct p9_fid *root_fid; /* Used for file system sync */ }; /* cache_validity flags */ diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index b6a3b9f7fe4d..e022890c6f40 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) retval = v9fs_refresh_inode_dotl(fid, inode); else retval = v9fs_refresh_inode(fid, inode); - if (retval <= 0) + if (retval == -ENOENT) + return 0; + if (retval < 0) return retval; } out_valid: diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index ffbb113d5f33..82a7c38ddad0 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -811,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd) fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) { __putname(link); - link = ERR_PTR(PTR_ERR(fid)); + link = ERR_CAST(fid); goto ndset; } retval = p9_client_readlink(fid, &target); diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index f3eed3383e4f..feef6cdc1fd2 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, retval = PTR_ERR(inode); goto release_sb; } + root = d_alloc_root(inode); if (!root) { iput(inode); @@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, p9stat_free(st); kfree(st); } - v9fs_fid_add(root, fid); retval = v9fs_get_acl(inode, fid); if (retval) goto release_sb; - /* - * Add the root fid to session info. This is used - * for file system sync. We want a cloned fid here - * so that we can do a sync_filesystem after a - * shrink_dcache_for_umount - */ - v9ses->root_fid = v9fs_fid_clone(root); - if (IS_ERR(v9ses->root_fid)) { - retval = PTR_ERR(v9ses->root_fid); - goto release_sb; - } + v9fs_fid_add(root, fid); P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); return dget(sb->s_root); @@ -210,11 +200,15 @@ close_session: v9fs_session_close(v9ses); kfree(v9ses); return ERR_PTR(retval); + release_sb: /* - * we will do the session_close and root dentry - * release in the below call. + * we will do the session_close and root dentry release + * in the below call. But we need to clunk fid, because we haven't + * attached the fid to dentry so it won't get clunked + * automatically. */ + p9_client_clunk(fid); deactivate_locked_super(sb); return ERR_PTR(retval); } @@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s) P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); kill_anon_super(s); - p9_client_clunk(v9ses->root_fid); + v9fs_session_cancel(v9ses); v9fs_session_close(v9ses); kfree(v9ses); @@ -285,14 +279,6 @@ done: return res; } -static int v9fs_sync_fs(struct super_block *sb, int wait) -{ - struct v9fs_session_info *v9ses = sb->s_fs_info; - - P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb); - return p9_client_sync_fs(v9ses->root_fid); -} - static int v9fs_drop_inode(struct inode *inode) { struct v9fs_session_info *v9ses; @@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode) return 1; } +static int v9fs_write_inode(struct inode *inode, + struct writeback_control *wbc) +{ + int ret; + struct p9_wstat wstat; + struct v9fs_inode *v9inode; + /* + * send an fsync request to server irrespective of + * wbc->sync_mode. + */ + P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); + v9inode = V9FS_I(inode); + if (!v9inode->writeback_fid) + return 0; + v9fs_blank_wstat(&wstat); + + ret = p9_client_wstat(v9inode->writeback_fid, &wstat); + if (ret < 0) { + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; + } + return 0; +} + +static int v9fs_write_inode_dotl(struct inode *inode, + struct writeback_control *wbc) +{ + int ret; + struct v9fs_inode *v9inode; + /* + * send an fsync request to server irrespective of + * wbc->sync_mode. + */ + P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); + v9inode = V9FS_I(inode); + if (!v9inode->writeback_fid) + return 0; + ret = p9_client_fsync(v9inode->writeback_fid, 0); + if (ret < 0) { + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; + } + return 0; +} + static const struct super_operations v9fs_super_ops = { .alloc_inode = v9fs_alloc_inode, .destroy_inode = v9fs_destroy_inode, @@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = { .evict_inode = v9fs_evict_inode, .show_options = generic_show_options, .umount_begin = v9fs_umount_begin, + .write_inode = v9fs_write_inode, }; static const struct super_operations v9fs_super_ops_dotl = { .alloc_inode = v9fs_alloc_inode, .destroy_inode = v9fs_destroy_inode, - .sync_fs = v9fs_sync_fs, .statfs = v9fs_statfs, .drop_inode = v9fs_drop_inode, .evict_inode = v9fs_evict_inode, .show_options = generic_show_options, .umount_begin = v9fs_umount_begin, + .write_inode = v9fs_write_inode_dotl, }; struct file_system_type v9fs_fs_type = { diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f34078d702d3..303983fabfd6 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) current->mm->start_stack = bprm->p; #ifdef arch_randomize_brk - if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) + if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { current->mm->brk = current->mm->start_brk = arch_randomize_brk(current->mm); +#ifdef CONFIG_COMPAT_BRK + current->brk_randomized = 1; +#endif + } #endif if (current->personality & MMAP_PAGE_ZERO) { diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index de34bfad9ec3..5d505aaa72fb 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, if (value) { acl = posix_acl_from_xattr(value, size); - if (acl == NULL) { - value = NULL; - size = 0; + if (acl) { + ret = posix_acl_valid(acl); + if (ret) + goto out; } else if (IS_ERR(acl)) { return PTR_ERR(acl); } } ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); - +out: posix_acl_release(acl); return ret; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3458b5725540..2e61fe1b6b8c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -740,8 +740,10 @@ struct btrfs_space_info { */ unsigned long reservation_progress; - int full; /* indicates that we cannot allocate any more + int full:1; /* indicates that we cannot allocate any more chunks for this space */ + int chunk_alloc:1; /* set if we are allocating a chunk */ + int force_alloc; /* set if we need to force a chunk alloc for this space */ @@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, struct inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); +void btrfs_drop_pages(struct page **pages, size_t num_pages); +int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, + struct page **pages, size_t num_pages, + loff_t pos, size_t write_bytes, + struct extent_state **cached); /* tree-defrag.c */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8f1d44ba332f..68c84c8c24bd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3057,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); - t->use_count = 0; + atomic_set(&t->use_count, 0); list_del_init(&t->list); memset(t, 0, sizeof(*t)); kmem_cache_free(btrfs_transaction_cachep, t); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f619c3cb13b7..31f33ba56fe8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -33,6 +33,25 @@ #include "locking.h" #include "free-space-cache.h" +/* control flags for do_chunk_alloc's force field + * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk + * if we really need one. + * + * CHUNK_ALLOC_FORCE means it must try to allocate one + * + * CHUNK_ALLOC_LIMITED means to only try and allocate one + * if we have very few chunks already allocated. This is + * used as part of the clustering code to help make sure + * we have a good pool of storage to cluster in, without + * filling the FS with empty chunks + * + */ +enum { + CHUNK_ALLOC_NO_FORCE = 0, + CHUNK_ALLOC_FORCE = 1, + CHUNK_ALLOC_LIMITED = 2, +}; + static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc); @@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->bytes_readonly = 0; found->bytes_may_use = 0; found->full = 0; - found->force_alloc = 0; + found->force_alloc = CHUNK_ALLOC_NO_FORCE; + found->chunk_alloc = 0; *space_info = found; list_add_rcu(&found->list, &info->space_info); atomic_set(&found->caching_threads, 0); @@ -3150,7 +3170,7 @@ again: if (!data_sinfo->full && alloc_chunk) { u64 alloc_target; - data_sinfo->force_alloc = 1; + data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; spin_unlock(&data_sinfo->lock); alloc: alloc_target = btrfs_get_alloc_profile(root, 1); @@ -3160,7 +3180,8 @@ alloc: ret = do_chunk_alloc(trans, root->fs_info->extent_root, bytes + 2 * 1024 * 1024, - alloc_target, 0); + alloc_target, + CHUNK_ALLOC_NO_FORCE); btrfs_end_transaction(trans, root); if (ret < 0) { if (ret != -ENOSPC) @@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { if (found->flags & BTRFS_BLOCK_GROUP_METADATA) - found->force_alloc = 1; + found->force_alloc = CHUNK_ALLOC_FORCE; } rcu_read_unlock(); } static int should_alloc_chunk(struct btrfs_root *root, - struct btrfs_space_info *sinfo, u64 alloc_bytes) + struct btrfs_space_info *sinfo, u64 alloc_bytes, + int force) { u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; + u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; u64 thresh; - if (sinfo->bytes_used + sinfo->bytes_reserved + - alloc_bytes + 256 * 1024 * 1024 < num_bytes) + if (force == CHUNK_ALLOC_FORCE) + return 1; + + /* + * in limited mode, we want to have some free space up to + * about 1% of the FS size. + */ + if (force == CHUNK_ALLOC_LIMITED) { + thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); + thresh = max_t(u64, 64 * 1024 * 1024, + div_factor_fine(thresh, 1)); + + if (num_bytes - num_allocated < thresh) + return 1; + } + + /* + * we have two similar checks here, one based on percentage + * and once based on a hard number of 256MB. The idea + * is that if we have a good amount of free + * room, don't allocate a chunk. A good mount is + * less than 80% utilized of the chunks we have allocated, + * or more than 256MB free + */ + if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes) return 0; - if (sinfo->bytes_used + sinfo->bytes_reserved + - alloc_bytes < div_factor(num_bytes, 8)) + if (num_allocated + alloc_bytes < div_factor(num_bytes, 8)) return 0; thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); + + /* 256MB or 5% of the FS */ thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) return 0; - return 1; } @@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, { struct btrfs_space_info *space_info; struct btrfs_fs_info *fs_info = extent_root->fs_info; + int wait_for_alloc = 0; int ret = 0; - mutex_lock(&fs_info->chunk_mutex); - flags = btrfs_reduce_alloc_profile(extent_root, flags); space_info = __find_space_info(extent_root->fs_info, flags); @@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, } BUG_ON(!space_info); +again: spin_lock(&space_info->lock); if (space_info->force_alloc) - force = 1; + force = space_info->force_alloc; if (space_info->full) { spin_unlock(&space_info->lock); - goto out; + return 0; } - if (!force && !should_alloc_chunk(extent_root, space_info, - alloc_bytes)) { + if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { spin_unlock(&space_info->lock); - goto out; + return 0; + } else if (space_info->chunk_alloc) { + wait_for_alloc = 1; + } else { + space_info->chunk_alloc = 1; } + spin_unlock(&space_info->lock); + mutex_lock(&fs_info->chunk_mutex); + + /* + * The chunk_mutex is held throughout the entirety of a chunk + * allocation, so once we've acquired the chunk_mutex we know that the + * other guy is done and we need to recheck and see if we should + * allocate. + */ + if (wait_for_alloc) { + mutex_unlock(&fs_info->chunk_mutex); + wait_for_alloc = 0; + goto again; + } + /* * If we have mixed data/metadata chunks we want to make sure we keep * allocating mixed chunks instead of individual chunks. @@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, space_info->full = 1; else ret = 1; - space_info->force_alloc = 0; + + space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; + space_info->chunk_alloc = 0; spin_unlock(&space_info->lock); -out: mutex_unlock(&extent_root->fs_info->chunk_mutex); return ret; } @@ -5303,11 +5368,13 @@ loop: if (allowed_chunk_alloc) { ret = do_chunk_alloc(trans, root, num_bytes + - 2 * 1024 * 1024, data, 1); + 2 * 1024 * 1024, data, + CHUNK_ALLOC_LIMITED); allowed_chunk_alloc = 0; done_chunk_alloc = 1; - } else if (!done_chunk_alloc) { - space_info->force_alloc = 1; + } else if (!done_chunk_alloc && + space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { + space_info->force_alloc = CHUNK_ALLOC_LIMITED; } if (loop < LOOP_NO_EMPTY_SIZE) { @@ -5393,7 +5460,8 @@ again: */ if (empty_size || root->ref_cows) ret = do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes + 2 * 1024 * 1024, data, 0); + num_bytes + 2 * 1024 * 1024, data, + CHUNK_ALLOC_NO_FORCE); WARN_ON(num_bytes < root->sectorsize); ret = find_free_extent(trans, root, num_bytes, empty_size, @@ -5405,7 +5473,7 @@ again: num_bytes = num_bytes & ~(root->sectorsize - 1); num_bytes = max(num_bytes, min_alloc_size); do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes, data, 1); + num_bytes, data, CHUNK_ALLOC_FORCE); goto again; } if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { @@ -8109,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, alloc_flags = update_block_group_flags(root, cache->flags); if (alloc_flags != cache->flags) - do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); ret = set_block_group_ro(cache); if (!ret) goto out; alloc_flags = get_alloc_profile(root, cache->space_info->flags); - ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); if (ret < 0) goto out; ret = set_block_group_ro(cache); @@ -8128,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 type) { u64 alloc_flags = get_alloc_profile(root, type); - return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); } /* diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 20ddb28602a8..315138605088 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state, } } +static void uncache_state(struct extent_state **cached_ptr) +{ + if (cached_ptr && (*cached_ptr)) { + struct extent_state *state = *cached_ptr; + *cached_ptr = NULL; + free_extent_state(state); + } +} + /* * set some bits on a range in the tree. This may require allocations or * sleeping, so the gfp mask is used to indicate what is allowed. @@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, } int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) + struct extent_state **cached_state, gfp_t mask) { - return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, - NULL, mask); + return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, + NULL, cached_state, mask); } static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, @@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, mask); } -int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) +int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, mask); @@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) do { struct page *page = bvec->bv_page; + struct extent_state *cached = NULL; + struct extent_state *state; + tree = &BTRFS_I(page->mapping->host)->io_tree; start = ((u64)page->index << PAGE_CACHE_SHIFT) + @@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (++bvec <= bvec_end) prefetchw(&bvec->bv_page->flags); + spin_lock(&tree->lock); + state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED); + if (state && state->start == start) { + /* + * take a reference on the state, unlock will drop + * the ref + */ + cache_state(state, &cached); + } + spin_unlock(&tree->lock); + if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, - NULL); + state); if (ret) uptodate = 0; } @@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err) test_bit(BIO_UPTODATE, &bio->bi_flags); if (err) uptodate = 0; + uncache_state(&cached); continue; } } if (uptodate) { - set_extent_uptodate(tree, start, end, + set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); } - unlock_extent(tree, start, end, GFP_ATOMIC); + unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); if (whole_page) { if (uptodate) { @@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) do { struct page *page = bvec->bv_page; + struct extent_state *cached = NULL; tree = &BTRFS_I(page->mapping->host)->io_tree; start = ((u64)page->index << PAGE_CACHE_SHIFT) + @@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) prefetchw(&bvec->bv_page->flags); if (uptodate) { - set_extent_uptodate(tree, start, end, GFP_ATOMIC); + set_extent_uptodate(tree, start, end, &cached, + GFP_ATOMIC); } else { ClearPageUptodate(page); SetPageError(page); } - unlock_extent(tree, start, end, GFP_ATOMIC); + unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); } while (bvec >= bio->bi_io_vec); @@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, while (cur <= end) { if (cur >= last_byte) { char *userpage; + struct extent_state *cached = NULL; + iosize = PAGE_CACHE_SIZE - page_offset; userpage = kmap_atomic(page, KM_USER0); memset(userpage + page_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, - GFP_NOFS); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); break; } em = get_extent(inode, page, page_offset, cur, @@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, /* we've found a hole, just zero and go on */ if (block_start == EXTENT_MAP_HOLE) { char *userpage; + struct extent_state *cached = NULL; + userpage = kmap_atomic(page, KM_USER0); memset(userpage + page_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, - GFP_NOFS); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); cur = cur + iosize; page_offset += iosize; continue; @@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree, iocount++; block_start = block_start + iosize; } else { - set_extent_uptodate(tree, block_start, cur_end, + struct extent_state *cached = NULL; + + set_extent_uptodate(tree, block_start, cur_end, &cached, GFP_NOFS); - unlock_extent(tree, block_start, cur_end, GFP_NOFS); + unlock_extent_cached(tree, block_start, cur_end, + &cached, GFP_NOFS); block_start = cur_end + 1; } page_offset = block_start & (PAGE_CACHE_SIZE - 1); @@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, num_pages = num_extent_pages(eb->start, eb->len); set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - GFP_NOFS); + NULL, GFP_NOFS); for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || @@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page, kunmap_atomic(dst_kaddr, KM_USER0); } +static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) +{ + unsigned long distance = (src > dst) ? src - dst : dst - src; + return distance < len; +} + static void copy_pages(struct page *dst_page, struct page *src_page, unsigned long dst_off, unsigned long src_off, unsigned long len) @@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page, char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); char *src_kaddr; - if (dst_page != src_page) + if (dst_page != src_page) { src_kaddr = kmap_atomic(src_page, KM_USER1); - else + } else { src_kaddr = dst_kaddr; + BUG_ON(areas_overlap(src_off, dst_off, len)); + } memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); kunmap_atomic(dst_kaddr, KM_USER0); @@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, "len %lu len %lu\n", dst_offset, len, dst->len); BUG_ON(1); } - if (dst_offset < src_offset) { + if (!areas_overlap(src_offset, dst_offset, len)) { memcpy_extent_buffer(dst, dst_offset, src_offset, len); return; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f62c5442835d..af2d7179c372 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int exclusive_bits, u64 *failed_start, struct extent_state **cached_state, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); + struct extent_state **cached_state, gfp_t mask); int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e621ea54a3fd..75899a01dded 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, /* * unlocks pages after btrfs_file_write is done with them */ -static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) +void btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { @@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ -static noinline int dirty_and_release_pages(struct btrfs_root *root, - struct file *file, - struct page **pages, - size_t num_pages, - loff_t pos, - size_t write_bytes) +int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, + struct page **pages, size_t num_pages, + loff_t pos, size_t write_bytes, + struct extent_state **cached) { int err = 0; int i; - struct inode *inode = fdentry(file)->d_inode; u64 num_bytes; u64 start_pos; u64 end_of_last_block; @@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root, end_of_last_block = start_pos + num_bytes - 1; err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, - NULL); + cached); if (err) return err; @@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, } if (copied > 0) { - ret = dirty_and_release_pages(root, file, pages, - dirty_pages, pos, - copied); + ret = btrfs_dirty_pages(root, inode, pages, + dirty_pages, pos, copied, + NULL); if (ret) { btrfs_delalloc_release_space(inode, dirty_pages << PAGE_CACHE_SHIFT); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f561c953205b..11d2e9cea09e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode; struct rb_node *node; struct list_head *pos, *n; + struct page **pages; struct page *page; struct extent_state *cached_state = NULL; struct btrfs_free_cluster *cluster = NULL; @@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, u64 start, end, len; u64 bytes = 0; u32 *crc, *checksums; - pgoff_t index = 0, last_index = 0; unsigned long first_page_offset; - int num_checksums; + int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; int ret = 0; bool next_page = false; + bool out_of_space = false; root = root->fs_info->tree_root; @@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root, return 0; } - last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & ~(root->sectorsize - 1), (u64)-1); /* We need a checksum per page. */ - num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; - crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); + crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); if (!crc) { iput(inode); return 0; } + pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); + if (!pages) { + kfree(crc); + iput(inode); + return 0; + } + /* Since the first page has all of our checksums and our generation we * need to calculate the offset into the page that we can start writing * our entries. */ - first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); + first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); /* Get the cluster for this block_group if it exists */ if (!list_empty(&block_group->cluster_list)) @@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, * after find_get_page at this point. Just putting this here so people * know and don't freak out. */ - while (index <= last_index) { + while (index < num_pages) { page = grab_cache_page(inode->i_mapping, index); if (!page) { - pgoff_t i = 0; + int i; - while (i < index) { - page = find_get_page(inode->i_mapping, i); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); - i++; + for (i = 0; i < num_pages; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); } goto out_free; } + pages[index] = page; index++; } @@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, offset = start_offset; } - page = find_get_page(inode->i_mapping, index); + if (index >= num_pages) { + out_of_space = true; + break; + } + + page = pages[index]; addr = kmap(page); entry = addr + start_offset; @@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root, bytes += PAGE_CACHE_SIZE; - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - - /* - * We need to release our reference we got for grab_cache_page, - * except for the first page which will hold our checksums, we - * do that below. - */ - if (index != 0) { - unlock_page(page); - page_cache_release(page); - } - - page_cache_release(page); - index++; } while (node || next_page); @@ -734,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_free_space *entry = list_entry(pos, struct btrfs_free_space, list); - page = find_get_page(inode->i_mapping, index); + if (index >= num_pages) { + out_of_space = true; + break; + } + page = pages[index]; addr = kmap(page); memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); @@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root, crc++; bytes += PAGE_CACHE_SIZE; - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); list_del_init(&entry->list); index++; } + if (out_of_space) { + btrfs_drop_pages(pages, num_pages); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, &cached_state, + GFP_NOFS); + ret = 0; + goto out_free; + } + /* Zero out the rest of the pages just to make sure */ - while (index <= last_index) { + while (index < num_pages) { void *addr; - page = find_get_page(inode->i_mapping, index); - + page = pages[index]; addr = kmap(page); memset(addr, 0, PAGE_CACHE_SIZE); kunmap(page); - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); bytes += PAGE_CACHE_SIZE; index++; } - btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state); - /* Write the checksums and trans id to the first page */ { void *addr; u64 *gen; - page = find_get_page(inode->i_mapping, 0); + page = pages[0]; addr = kmap(page); - memcpy(addr, checksums, sizeof(u32) * num_checksums); - gen = addr + (sizeof(u32) * num_checksums); + memcpy(addr, checksums, sizeof(u32) * num_pages); + gen = addr + (sizeof(u32) * num_pages); *gen = trans->transid; kunmap(page); - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); } - BTRFS_I(inode)->generation = trans->transid; + ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, + bytes, &cached_state); + btrfs_drop_pages(pages, num_pages); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state, GFP_NOFS); + if (ret) { + ret = 0; + goto out_free; + } + + BTRFS_I(inode)->generation = trans->transid; + filemap_write_and_wait(inode->i_mapping); key.objectid = BTRFS_FREE_SPACE_OBJECTID; @@ -853,6 +845,7 @@ out_free: BTRFS_I(inode)->generation = 0; } kfree(checksums); + kfree(pages); btrfs_update_inode(trans, root, inode); iput(inode); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5cc64ab9c485..fcd66b6a8086 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1770,9 +1770,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); - btrfs_ordered_update_i_size(inode, 0, ordered_extent); - ret = btrfs_update_inode(trans, root, inode); - BUG_ON(ret); + ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); + if (!ret) { + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + } + ret = 0; out: if (nolock) { if (trans) @@ -2590,6 +2593,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, struct btrfs_inode_item *item, struct inode *inode) { + if (!leaf->map_token) + map_private_extent_buffer(leaf, (unsigned long)item, + sizeof(struct btrfs_inode_item), + &leaf->map_token, &leaf->kaddr, + &leaf->map_start, &leaf->map_len, + KM_USER1); + btrfs_set_inode_uid(leaf, item, inode->i_uid); btrfs_set_inode_gid(leaf, item, inode->i_gid); btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); @@ -2618,6 +2628,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_inode_rdev(leaf, item, inode->i_rdev); btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); + + if (leaf->map_token) { + unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); + leaf->map_token = NULL; + } } /* @@ -4207,10 +4222,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_key found_key; struct btrfs_path *path; int ret; - u32 nritems; struct extent_buffer *leaf; int slot; - int advance; unsigned char d_type; int over = 0; u32 di_cur; @@ -4253,27 +4266,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - advance = 0; while (1) { leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; - if (advance || slot >= nritems) { - if (slot >= nritems - 1) { - ret = btrfs_next_leaf(root, path); - if (ret) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - slot = path->slots[0]; - } else { - slot++; - path->slots[0]++; - } + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto err; + else if (ret > 0) + break; + continue; } - advance = 1; item = btrfs_item_nr(leaf, slot); btrfs_item_key_to_cpu(leaf, &found_key, slot); @@ -4282,7 +4287,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, if (btrfs_key_type(&found_key) != key_type) break; if (found_key.offset < filp->f_pos) - continue; + goto next; filp->f_pos = found_key.offset; @@ -4335,6 +4340,8 @@ skip: di_cur += di_len; di = (struct btrfs_dir_item *)((char *)di + di_len); } +next: + path->slots[0]++; } /* Reached end of directory/root. Bump pos past the last item. */ @@ -4527,14 +4534,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, BUG_ON(!path); inode = new_inode(root->fs_info->sb); - if (!inode) + if (!inode) { + btrfs_free_path(path); return ERR_PTR(-ENOMEM); + } if (dir) { trace_btrfs_inode_request(dir); ret = btrfs_set_inode_index(dir, index); if (ret) { + btrfs_free_path(path); iput(inode); return ERR_PTR(ret); } @@ -4834,9 +4844,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink == ~0U) return -EMLINK; - btrfs_inc_nlink(inode); - inode->i_ctime = CURRENT_TIME; - err = btrfs_set_inode_index(dir, &index); if (err) goto fail; @@ -4852,6 +4859,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, goto fail; } + btrfs_inc_nlink(inode); + inode->i_ctime = CURRENT_TIME; + btrfs_set_trans_block_group(trans, dir); ihold(inode); @@ -5221,7 +5231,7 @@ again: btrfs_mark_buffer_dirty(leaf); } set_extent_uptodate(io_tree, em->start, - extent_map_end(em) - 1, GFP_NOFS); + extent_map_end(em) - 1, NULL, GFP_NOFS); goto insert; } else { printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); @@ -5428,17 +5438,30 @@ out: } static struct extent_map *btrfs_new_extent_direct(struct inode *inode, + struct extent_map *em, u64 start, u64 len) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; - struct extent_map *em; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct btrfs_key ins; u64 alloc_hint; int ret; + bool insert = false; - btrfs_drop_extent_cache(inode, start, start + len - 1, 0); + /* + * Ok if the extent map we looked up is a hole and is for the exact + * range we want, there is no reason to allocate a new one, however if + * it is not right then we need to free this one and drop the cache for + * our range. + */ + if (em->block_start != EXTENT_MAP_HOLE || em->start != start || + em->len != len) { + free_extent_map(em); + em = NULL; + insert = true; + btrfs_drop_extent_cache(inode, start, start + len - 1, 0); + } trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) @@ -5454,10 +5477,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, goto out; } - em = alloc_extent_map(GFP_NOFS); if (!em) { - em = ERR_PTR(-ENOMEM); - goto out; + em = alloc_extent_map(GFP_NOFS); + if (!em) { + em = ERR_PTR(-ENOMEM); + goto out; + } } em->start = start; @@ -5467,9 +5492,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; + + /* + * We need to do this because if we're using the original em we searched + * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. + */ + em->flags = 0; set_bit(EXTENT_FLAG_PINNED, &em->flags); - while (1) { + while (insert) { write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); @@ -5687,8 +5718,7 @@ must_cow: * it above */ len = bh_result->b_size; - free_extent_map(em); - em = btrfs_new_extent_direct(inode, start, len); + em = btrfs_new_extent_direct(inode, em, start, len); if (IS_ERR(em)) return PTR_ERR(em); len = min(len, em->len - (start - em->start)); @@ -5851,8 +5881,10 @@ again: } add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); - btrfs_ordered_update_i_size(inode, 0, ordered); - btrfs_update_inode(trans, root, inode); + ret = btrfs_ordered_update_i_size(inode, 0, ordered); + if (!ret) + btrfs_update_inode(trans, root, inode); + ret = 0; out_unlock: unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, ordered->file_offset + ordered->len - 1, @@ -5938,7 +5970,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, int rw, u64 file_offset, int skip_sum, - u32 *csums) + u32 *csums, int async_submit) { int write = rw & REQ_WRITE; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5949,13 +5981,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, if (ret) goto err; - if (write && !skip_sum) { + if (skip_sum) + goto map; + + if (write && async_submit) { ret = btrfs_wq_submit_bio(root->fs_info, inode, rw, bio, 0, 0, file_offset, __btrfs_submit_bio_start_direct_io, __btrfs_submit_bio_done); goto err; + } else if (write) { + /* + * If we aren't doing async submit, calculate the csum of the + * bio now. + */ + ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); + if (ret) + goto err; } else if (!skip_sum) { ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset, csums); @@ -5963,7 +6006,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, goto err; } - ret = btrfs_map_bio(root, rw, bio, 0, 1); +map: + ret = btrfs_map_bio(root, rw, bio, 0, async_submit); err: bio_put(bio); return ret; @@ -5985,15 +6029,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, int nr_pages = 0; u32 *csums = dip->csums; int ret = 0; + int async_submit = 0; int write = rw & REQ_WRITE; - bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); - if (!bio) - return -ENOMEM; - bio->bi_private = dip; - bio->bi_end_io = btrfs_end_dio_bio; - atomic_inc(&dip->pending_bios); - map_length = orig_bio->bi_size; ret = btrfs_map_block(map_tree, READ, start_sector << 9, &map_length, NULL, 0); @@ -6002,6 +6040,19 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, return -EIO; } + if (map_length >= orig_bio->bi_size) { + bio = orig_bio; + goto submit; + } + + async_submit = 1; + bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); + if (!bio) + return -ENOMEM; + bio->bi_private = dip; + bio->bi_end_io = btrfs_end_dio_bio; + atomic_inc(&dip->pending_bios); + while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { if (unlikely(map_length < submit_len + bvec->bv_len || bio_add_page(bio, bvec->bv_page, bvec->bv_len, @@ -6015,7 +6066,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, atomic_inc(&dip->pending_bios); ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums); + csums, async_submit); if (ret) { bio_put(bio); atomic_dec(&dip->pending_bios); @@ -6052,8 +6103,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, } } +submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums); + csums, async_submit); if (!ret) return 0; @@ -6148,6 +6200,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io unsigned long nr_segs) { int seg; + int i; size_t size; unsigned long addr; unsigned blocksize_mask = root->sectorsize - 1; @@ -6162,8 +6215,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; - if ((addr & blocksize_mask) || (size & blocksize_mask)) + if ((addr & blocksize_mask) || (size & blocksize_mask)) goto out; + + /* If this is a write we don't need to check anymore */ + if (rw & WRITE) + continue; + + /* + * Check to make sure we don't have duplicate iov_base's in this + * iovec, if so return EINVAL, otherwise we'll get csum errors + * when reading back. + */ + for (i = seg + 1; i < nr_segs; i++) { + if (iov[seg].iov_base == iov[i].iov_base) + goto out; + } } retval = 0; out: diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index cfc264fefdb0..ffb48d6c5433 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) struct btrfs_ioctl_space_info space; struct btrfs_ioctl_space_info *dest; struct btrfs_ioctl_space_info *dest_orig; - struct btrfs_ioctl_space_info *user_dest; + struct btrfs_ioctl_space_info __user *user_dest; struct btrfs_space_info *info; u64 types[] = {BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_SYSTEM, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 58e7de9cc90c..0ac712efcdf2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -159,7 +159,7 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_err, }; static match_table_t tokens = { @@ -189,6 +189,7 @@ static match_table_t tokens = { {Opt_clear_cache, "clear_cache"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, {Opt_enospc_debug, "enospc_debug"}, + {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_err, NULL}, }; @@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) break; case Opt_subvol: case Opt_subvolid: + case Opt_subvolrootid: case Opt_device: /* * These are parsed by btrfs_parse_early_options @@ -388,7 +390,7 @@ out: */ static int btrfs_parse_early_options(const char *options, fmode_t flags, void *holder, char **subvol_name, u64 *subvol_objectid, - struct btrfs_fs_devices **fs_devices) + u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; char *opts, *orig, *p; @@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, *subvol_objectid = intarg; } break; + case Opt_subvolrootid: + intarg = 0; + error = match_int(&args[0], &intarg); + if (!error) { + /* we want the original fs_tree */ + if (!intarg) + *subvol_rootid = + BTRFS_FS_TREE_OBJECTID; + else + *subvol_rootid = intarg; + } + break; case Opt_device: error = btrfs_scan_one_device(match_strdup(&args[0]), flags, holder, fs_devices); @@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, fmode_t mode = FMODE_READ; char *subvol_name = NULL; u64 subvol_objectid = 0; + u64 subvol_rootid = 0; int error = 0; if (!(flags & MS_RDONLY)) @@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, error = btrfs_parse_early_options(data, mode, fs_type, &subvol_name, &subvol_objectid, - &fs_devices); + &subvol_rootid, &fs_devices); if (error) return ERR_PTR(error); @@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, s->s_flags |= MS_ACTIVE; } - root = get_default_root(s, subvol_objectid); - if (IS_ERR(root)) { - error = PTR_ERR(root); - deactivate_locked_super(s); - goto error_free_subvol_name; - } /* if they gave us a subvolume name bind mount into that */ if (strcmp(subvol_name, ".")) { struct dentry *new_root; + + root = get_default_root(s, subvol_rootid); + if (IS_ERR(root)) { + error = PTR_ERR(root); + deactivate_locked_super(s); + goto error_free_subvol_name; + } + mutex_lock(&root->d_inode->i_mutex); new_root = lookup_one_len(subvol_name, root, strlen(subvol_name)); @@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } dput(root); root = new_root; + } else { + root = get_default_root(s, subvol_objectid); + if (IS_ERR(root)) { + error = PTR_ERR(root); + deactivate_locked_super(s); + goto error_free_subvol_name; + } } kfree(subvol_name); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 5b158da7e0bb..c571734d5e5a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -32,10 +32,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) { - WARN_ON(transaction->use_count == 0); - transaction->use_count--; - if (transaction->use_count == 0) { - list_del_init(&transaction->list); + WARN_ON(atomic_read(&transaction->use_count) == 0); + if (atomic_dec_and_test(&transaction->use_count)) { memset(transaction, 0, sizeof(*transaction)); kmem_cache_free(btrfs_transaction_cachep, transaction); } @@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root) if (!cur_trans) return -ENOMEM; root->fs_info->generation++; - cur_trans->num_writers = 1; + atomic_set(&cur_trans->num_writers, 1); cur_trans->num_joined = 0; cur_trans->transid = root->fs_info->generation; init_waitqueue_head(&cur_trans->writer_wait); init_waitqueue_head(&cur_trans->commit_wait); cur_trans->in_commit = 0; cur_trans->blocked = 0; - cur_trans->use_count = 1; + atomic_set(&cur_trans->use_count, 1); cur_trans->commit_done = 0; cur_trans->start_time = get_seconds(); @@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root) root->fs_info->running_transaction = cur_trans; spin_unlock(&root->fs_info->new_trans_lock); } else { - cur_trans->num_writers++; + atomic_inc(&cur_trans->num_writers); cur_trans->num_joined++; } @@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root) cur_trans = root->fs_info->running_transaction; if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); @@ -181,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, { struct btrfs_trans_handle *h; struct btrfs_transaction *cur_trans; + int retries = 0; int ret; if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) @@ -204,7 +203,7 @@ again: } cur_trans = root->fs_info->running_transaction; - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); if (type != TRANS_JOIN_NOLOCK) mutex_unlock(&root->fs_info->trans_mutex); @@ -224,10 +223,18 @@ again: if (num_items > 0) { ret = btrfs_trans_reserve_metadata(h, root, num_items); - if (ret == -EAGAIN) { + if (ret == -EAGAIN && !retries) { + retries++; btrfs_commit_transaction(h, root); goto again; + } else if (ret == -EAGAIN) { + /* + * We have already retried and got EAGAIN, so really we + * don't have space, so set ret to -ENOSPC. + */ + ret = -ENOSPC; } + if (ret < 0) { btrfs_end_transaction(h, root); return ERR_PTR(ret); @@ -327,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) goto out_unlock; /* nothing committing|committed */ } - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); wait_for_commit(root, cur_trans); @@ -457,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, wake_up_process(info->transaction_kthread); } - if (lock) - mutex_lock(&info->trans_mutex); WARN_ON(cur_trans != info->running_transaction); - WARN_ON(cur_trans->num_writers < 1); - cur_trans->num_writers--; + WARN_ON(atomic_read(&cur_trans->num_writers) < 1); + atomic_dec(&cur_trans->num_writers); smp_mb(); if (waitqueue_active(&cur_trans->writer_wait)) wake_up(&cur_trans->writer_wait); put_transaction(cur_trans); - if (lock) - mutex_unlock(&info->trans_mutex); if (current->journal_info == trans) current->journal_info = NULL; @@ -1178,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); cur_trans = trans->transaction; - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); @@ -1237,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->trans_mutex); if (cur_trans->in_commit) { - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); @@ -1259,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); if (!prev_trans->commit_done) { - prev_trans->use_count++; + atomic_inc(&prev_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); wait_for_commit(root, prev_trans); @@ -1300,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, TASK_UNINTERRUPTIBLE); smp_mb(); - if (cur_trans->num_writers > 1) + if (atomic_read(&cur_trans->num_writers) > 1) schedule_timeout(MAX_SCHEDULE_TIMEOUT); else if (should_grow) schedule_timeout(1); mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); - } while (cur_trans->num_writers > 1 || + } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); ret = create_pending_snapshots(trans, root->fs_info); @@ -1394,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, wake_up(&cur_trans->commit_wait); + list_del_init(&cur_trans->list); put_transaction(cur_trans); put_transaction(cur_trans); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 229a594cacd5..e441acc6c584 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -27,11 +27,11 @@ struct btrfs_transaction { * total writers in this transaction, it must be zero before the * transaction can end */ - unsigned long num_writers; + atomic_t num_writers; unsigned long num_joined; int in_commit; - int use_count; + atomic_t use_count; int commit_done; int blocked; struct list_head list; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index a5303b871b13..cfd660550ded 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; - int ret = 0, slot, advance; + int ret = 0, slot; size_t total_size = 0, size_left = size; unsigned long name_ptr; size_t name_len; - u32 nritems; /* * ok we want all objects associated with this id. @@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - advance = 0; + while (1) { leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; /* this is where we start walking through the path */ - if (advance || slot >= nritems) { + if (slot >= btrfs_header_nritems(leaf)) { /* * if we've reached the last slot in this leaf we need * to go to the next leaf and reset everything */ - if (slot >= nritems-1) { - ret = btrfs_next_leaf(root, path); - if (ret) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - slot = path->slots[0]; - } else { - /* - * just walking through the slots on this leaf - */ - slot++; - path->slots[0]++; - } + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto err; + else if (ret > 0) + break; + continue; } - advance = 1; btrfs_item_key_to_cpu(leaf, &found_key, slot); @@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) /* we are just looking for how big our buffer needs to be */ if (!size) - continue; + goto next; if (!buffer || (name_len + 1) > size_left) { ret = -ERANGE; @@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) size_left -= name_len + 1; buffer += name_len + 1; +next: + path->slots[0]++; } ret = total_size; diff --git a/fs/cifs/README b/fs/cifs/README index fe1683590828..74ab165fc646 100644 --- a/fs/cifs/README +++ b/fs/cifs/README @@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to support and want to map the uid and gid fields to values supplied at mount (rather than the actual values, then set this to zero. (default 1) -Experimental When set to 1 used to enable certain experimental - features (currently enables multipage writes - when signing is enabled, the multipage write - performance enhancement was disabled when - signing turned on in case buffer was modified - just before it was sent, also this flag will - be used to use the new experimental directory change - notification code). When set to 2 enables - an additional experimental feature, "raw ntlmssp" - session establishment support (which allows - specifying "sec=ntlmssp" on mount). The Linux cifs - module will use ntlmv2 authentication encapsulated - in "raw ntlmssp" (not using SPNEGO) when - "sec=ntlmssp" is specified on mount. - This support also requires building cifs with - the CONFIG_CIFS_EXPERIMENTAL configuration flag. These experimental features and tracing can be enabled by changing flags in /proc/fs/cifs (after the cifs module has been installed or built into the diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index e654dfd092c3..53d57a3fe427 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c @@ -50,7 +50,7 @@ void cifs_fscache_unregister(void) */ struct cifs_server_key { uint16_t family; /* address family */ - uint16_t port; /* IP port */ + __be16 port; /* IP port */ union { struct in_addr ipv4_addr; struct in6_addr ipv6_addr; diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 65829d32128c..30d01bc90855 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops; static const struct file_operations traceSMB_proc_fops; static const struct file_operations cifs_multiuser_mount_proc_fops; static const struct file_operations cifs_security_flags_proc_fops; -static const struct file_operations cifs_experimental_proc_fops; static const struct file_operations cifs_linux_ext_proc_fops; void @@ -441,8 +440,6 @@ cifs_proc_init(void) proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops); proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops); - proc_create("Experimental", 0, proc_fs_cifs, - &cifs_experimental_proc_fops); proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, &cifs_linux_ext_proc_fops); proc_create("MultiuserMount", 0, proc_fs_cifs, @@ -469,7 +466,6 @@ cifs_proc_clean(void) remove_proc_entry("OplockEnabled", proc_fs_cifs); remove_proc_entry("SecurityFlags", proc_fs_cifs); remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); - remove_proc_entry("Experimental", proc_fs_cifs); remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); remove_proc_entry("fs/cifs", NULL); } @@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = { .write = cifs_oplock_proc_write, }; -static int cifs_experimental_proc_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%d\n", experimEnabled); - return 0; -} - -static int cifs_experimental_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, cifs_experimental_proc_show, NULL); -} - -static ssize_t cifs_experimental_proc_write(struct file *file, - const char __user *buffer, size_t count, loff_t *ppos) -{ - char c; - int rc; - - rc = get_user(c, buffer); - if (rc) - return rc; - if (c == '0' || c == 'n' || c == 'N') - experimEnabled = 0; - else if (c == '1' || c == 'y' || c == 'Y') - experimEnabled = 1; - else if (c == '2') - experimEnabled = 2; - - return count; -} - -static const struct file_operations cifs_experimental_proc_fops = { - .owner = THIS_MODULE, - .open = cifs_experimental_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = cifs_experimental_proc_write, -}; - static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", linuxExtEnabled); diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 4dfba8283165..33d221394aca 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) MAX_MECH_STR_LEN + UID_KEY_LEN + (sizeof(uid_t) * 2) + CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + - USER_KEY_LEN + strlen(sesInfo->userName) + + USER_KEY_LEN + strlen(sesInfo->user_name) + PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; spnego_key = ERR_PTR(-ENOMEM); @@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); dp = description + strlen(description); - sprintf(dp, ";user=%s", sesInfo->userName); + sprintf(dp, ";user=%s", sesInfo->user_name); dp = description + strlen(description); sprintf(dp, ";pid=0x%x", current->pid); diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index fc0fd4fde306..23d43cde4306 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, case UNI_COLON: *target = ':'; break; - case UNI_ASTERIK: + case UNI_ASTERISK: *target = '*'; break; case UNI_QUESTION: @@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, * names are little endian 16 bit Unicode on the wire */ int -cifsConvertToUCS(__le16 *target, const char *source, int maxlen, +cifsConvertToUCS(__le16 *target, const char *source, int srclen, const struct nls_table *cp, int mapChars) { int i, j, charlen; - int len_remaining = maxlen; char src_char; - __u16 temp; + __le16 dst_char; + wchar_t tmp; if (!mapChars) return cifs_strtoUCS(target, source, PATH_MAX, cp); - for (i = 0, j = 0; i < maxlen; j++) { + for (i = 0, j = 0; i < srclen; j++) { src_char = source[i]; switch (src_char) { case 0: - put_unaligned_le16(0, &target[j]); + put_unaligned(0, &target[j]); goto ctoUCS_out; case ':': - temp = UNI_COLON; + dst_char = cpu_to_le16(UNI_COLON); break; case '*': - temp = UNI_ASTERIK; + dst_char = cpu_to_le16(UNI_ASTERISK); break; case '?': - temp = UNI_QUESTION; + dst_char = cpu_to_le16(UNI_QUESTION); break; case '<': - temp = UNI_LESSTHAN; + dst_char = cpu_to_le16(UNI_LESSTHAN); break; case '>': - temp = UNI_GRTRTHAN; + dst_char = cpu_to_le16(UNI_GRTRTHAN); break; case '|': - temp = UNI_PIPE; + dst_char = cpu_to_le16(UNI_PIPE); break; /* * FIXME: We can not handle remapping backslash (UNI_SLASH) @@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen, * as they use backslash as separator. */ default: - charlen = cp->char2uni(source+i, len_remaining, - &temp); + charlen = cp->char2uni(source + i, srclen - i, &tmp); + dst_char = cpu_to_le16(tmp); + /* * if no match, use question mark, which at least in * some cases serves as wild card */ if (charlen < 1) { - temp = 0x003f; + dst_char = cpu_to_le16(0x003f); charlen = 1; } - len_remaining -= charlen; /* * character may take more than one byte in the source * string, but will take exactly two bytes in the @@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen, i += charlen; continue; } - put_unaligned_le16(temp, &target[j]); + put_unaligned(dst_char, &target[j]); i++; /* move to next char in source string */ - len_remaining--; } ctoUCS_out: diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 7fe6b52df507..644dd882a560 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -44,7 +44,7 @@ * reserved symbols (along with \ and /), otherwise illegal to store * in filenames in NTFS */ -#define UNI_ASTERIK (__u16) ('*' + 0xF000) +#define UNI_ASTERISK (__u16) ('*' + 0xF000) #define UNI_QUESTION (__u16) ('?' + 0xF000) #define UNI_COLON (__u16) (':' + 0xF000) #define UNI_GRTRTHAN (__u16) ('>' + 0xF000) diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index a51585f9852b..d1a016be73ba 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -30,12 +30,13 @@ #include #include -/* Calculate and return the CIFS signature based on the mac key and SMB PDU */ -/* the 16 byte signature must be allocated by the caller */ -/* Note we only use the 1st eight bytes */ -/* Note that the smb header signature field on input contains the - sequence number before this function is called */ - +/* + * Calculate and return the CIFS signature based on the mac key and SMB PDU. + * The 16 byte signature must be allocated by the caller. Note we only use the + * 1st eight bytes and that the smb header signature field on input contains + * the sequence number before this function is called. Also, this function + * should be called with the server->srv_mutex held. + */ static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, char *signature) { @@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, cpu_to_le32(expected_sequence_number); cifs_pdu->Signature.Sequence.Reserved = 0; + mutex_lock(&server->srv_mutex); rc = cifs_calculate_signature(cifs_pdu, server, what_we_think_sig_should_be); + mutex_unlock(&server->srv_mutex); if (rc) return rc; @@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash, return rc; } - /* convert ses->userName to unicode and uppercase */ - len = strlen(ses->userName); + /* convert ses->user_name to unicode and uppercase */ + len = strlen(ses->user_name); user = kmalloc(2 + (len * 2), GFP_KERNEL); if (user == NULL) { cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); rc = -ENOMEM; goto calc_exit_2; } - len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); + len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp); UniStrupr(user); crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f2970136d17d..5c412b33cd7c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -53,7 +53,6 @@ int cifsFYI = 0; int cifsERROR = 1; int traceSMB = 0; unsigned int oplockEnabled = 1; -unsigned int experimEnabled = 0; unsigned int linuxExtEnabled = 1; unsigned int lookupCacheEnabled = 1; unsigned int multiuser_mount = 0; @@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data, kfree(cifs_sb); return rc; } + cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; #ifdef CONFIG_CIFS_DFS_UPCALL /* copy mount params to sb for use in submounts */ @@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) seq_printf(s, ",multiuser"); - else if (tcon->ses->userName) - seq_printf(s, ",username=%s", tcon->ses->userName); + else if (tcon->ses->user_name) + seq_printf(s, ",username=%s", tcon->ses->user_name); if (tcon->ses->domainName) seq_printf(s, ",domain=%s", tcon->ses->domainName); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 17afb0fbcaed..a5d1106fcbde 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -37,10 +37,9 @@ #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) #define MAX_SERVER_SIZE 15 -#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */ -#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null - termination then *2 for unicode versions */ -#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ +#define MAX_SHARE_SIZE 80 +#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ +#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ #define CIFS_MIN_RCV_POOL 4 @@ -92,7 +91,8 @@ enum statusEnum { CifsNew = 0, CifsGood, CifsExiting, - CifsNeedReconnect + CifsNeedReconnect, + CifsNeedNegotiate }; enum securityEnum { @@ -274,7 +274,7 @@ struct cifsSesInfo { int capabilities; char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for TCP names - will ipv6 and sctp addresses fit? */ - char userName[MAX_USERNAME_SIZE + 1]; + char *user_name; char *domainName; char *password; struct session_key auth_key; @@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions have the uid/password or Kerberos credential or equivalent for current user */ GLOBAL_EXTERN unsigned int oplockEnabled; -GLOBAL_EXTERN unsigned int experimEnabled; GLOBAL_EXTERN unsigned int lookupCacheEnabled; GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent with more secure ntlmssp2 challenge/resp */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 2644a5d6cc67..df959bae6728 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) */ while (server->tcpStatus == CifsNeedReconnect) { wait_event_interruptible_timeout(server->response_q, - (server->tcpStatus == CifsGood), 10 * HZ); + (server->tcpStatus != CifsNeedReconnect), 10 * HZ); - /* is TCP session is reestablished now ?*/ + /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) break; @@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server) return rc; /* set up echo request */ - smb->hdr.Tid = cpu_to_le16(0xffff); + smb->hdr.Tid = 0xffff; smb->hdr.WordCount = 1; put_unaligned_le16(1, &smb->EchoCount); put_bcc_le(1, &smb->hdr); @@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, __constant_cpu_to_le16(CIFS_WRLCK)) pLockData->fl_type = F_WRLCK; - pLockData->fl_start = parm_data->start; - pLockData->fl_end = parm_data->start + - parm_data->length - 1; - pLockData->fl_pid = parm_data->pid; + pLockData->fl_start = le64_to_cpu(parm_data->start); + pLockData->fl_end = pLockData->fl_start + + le64_to_cpu(parm_data->length) - 1; + pLockData->fl_pid = le32_to_cpu(parm_data->pid); } } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6e2b2addfc78..db9d55b507d0 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server) } spin_unlock(&GlobalMid_Lock); - while ((server->tcpStatus != CifsExiting) && - (server->tcpStatus != CifsGood)) { + while (server->tcpStatus == CifsNeedReconnect) { try_to_freeze(); /* we should try only the port we connected to before */ @@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server) atomic_inc(&tcpSesReconnectCount); spin_lock(&GlobalMid_Lock); if (server->tcpStatus != CifsExiting) - server->tcpStatus = CifsGood; + server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); } } @@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); - remaining = total_data_size - data_in_this_rsp; - - if (remaining == 0) + if (total_data_size == data_in_this_rsp) return 0; - else if (remaining < 0) { + else if (total_data_size < data_in_this_rsp) { cFYI(1, "total data %d smaller than data in frame %d", total_data_size, data_in_this_rsp); return -EINVAL; - } else { - cFYI(1, "missing %d bytes from transact2, check next response", - remaining); - if (total_data_size > maxBufSize) { - cERROR(1, "TotalDataSize %d is over maximum buffer %d", - total_data_size, maxBufSize); - return -EINVAL; - } - return remaining; } + + remaining = total_data_size - data_in_this_rsp; + + cFYI(1, "missing %d bytes from transact2, check next response", + remaining); + if (total_data_size > maxBufSize) { + cERROR(1, "TotalDataSize %d is over maximum buffer %d", + total_data_size, maxBufSize); + return -EINVAL; + } + return remaining; } static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) @@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) pdu_length = 4; /* enough to get RFC1001 header */ incomplete_rcv: - if (echo_retries > 0 && + if (echo_retries > 0 && server->tcpStatus == CifsGood && time_after(jiffies, server->lstrp + (echo_retries * SMB_ECHO_INTERVAL))) { cERROR(1, "Server %s has not responded in %d seconds. " @@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname, /* null user, ie anonymous, authentication */ vol->nullauth = 1; } - if (strnlen(value, 200) < 200) { + if (strnlen(value, MAX_USERNAME_SIZE) < + MAX_USERNAME_SIZE) { vol->username = value; } else { printk(KERN_WARNING "CIFS: username too long\n"); @@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs) static bool match_port(struct TCP_Server_Info *server, struct sockaddr *addr) { - unsigned short int port, *sport; + __be16 port, *sport; switch (addr->sa_family) { case AF_INET: @@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) module_put(THIS_MODULE); goto out_err_crypto_release; } + tcp_ses->tcpStatus = CifsNeedNegotiate; /* thread spawned, put it on the list */ spin_lock(&cifs_tcp_ses_lock); @@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) break; default: /* anything else takes username/password */ - if (strncmp(ses->userName, vol->username, + if (ses->user_name == NULL) + continue; + if (strncmp(ses->user_name, vol->username, MAX_USERNAME_SIZE)) continue; if (strlen(vol->username) != 0 && @@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) cifs_put_tcp_session(server); } +static bool warned_on_ntlm; /* globals init to false automatically */ + static struct cifsSesInfo * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) { @@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) else sprintf(ses->serverName, "%pI4", &addr->sin_addr); - if (volume_info->username) - strncpy(ses->userName, volume_info->username, - MAX_USERNAME_SIZE); + if (volume_info->username) { + ses->user_name = kstrdup(volume_info->username, GFP_KERNEL); + if (!ses->user_name) + goto get_ses_fail; + } /* volume_info->password freed at unmount */ if (volume_info->password) { @@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) } ses->cred_uid = volume_info->cred_uid; ses->linux_uid = volume_info->linux_uid; + + /* ntlmv2 is much stronger than ntlm security, and has been broadly + supported for many years, time to update default security mechanism */ + if ((volume_info->secFlg == 0) && warned_on_ntlm == false) { + warned_on_ntlm = true; + cERROR(1, "default security mechanism requested. The default " + "security mechanism will be upgraded from ntlm to " + "ntlmv2 in kernel release 2.6.41"); + } ses->overrideSecFlg = volume_info->secFlg; mutex_lock(&ses->session_mutex); @@ -2276,7 +2292,7 @@ static int generic_ip_connect(struct TCP_Server_Info *server) { int rc = 0; - unsigned short int sport; + __be16 sport; int slen, sfamily; struct socket *socket = server->ssocket; struct sockaddr *saddr; @@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server) static int ip_connect(struct TCP_Server_Info *server) { - unsigned short int *sport; + __be16 *sport; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; @@ -2826,7 +2842,7 @@ try_mount_again: remote_path_check: /* check if a whole path (including prepath) is not remote */ - if (!rc && cifs_sb->prepathlen && tcon) { + if (!rc && tcon) { /* build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(cifs_sb, tcon); if (full_path == NULL) { diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c27d236738fc..faf59529e847 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -575,8 +575,10 @@ reopen_error_exit: int cifs_close(struct inode *inode, struct file *file) { - cifsFileInfo_put(file->private_data); - file->private_data = NULL; + if (file->private_data != NULL) { + cifsFileInfo_put(file->private_data); + file->private_data = NULL; + } /* return code from the ->release op is always ignored */ return 0; @@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, total_written += bytes_written) { rc = -EAGAIN; while (rc == -EAGAIN) { + struct kvec iov[2]; + unsigned int len; + if (open_file->invalidHandle) { /* we could deadlock if we called filemap_fdatawait from here so tell @@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, if (rc != 0) break; } - if (experimEnabled || (pTcon->ses->server && - ((pTcon->ses->server->secMode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) - == 0))) { - struct kvec iov[2]; - unsigned int len; - len = min((size_t)cifs_sb->wsize, - write_size - total_written); - /* iov[0] is reserved for smb header */ - iov[1].iov_base = (char *)write_data + - total_written; - iov[1].iov_len = len; - rc = CIFSSMBWrite2(xid, pTcon, - open_file->netfid, len, - *poffset, &bytes_written, - iov, 1, 0); - } else - rc = CIFSSMBWrite(xid, pTcon, - open_file->netfid, - min_t(const int, cifs_sb->wsize, - write_size - total_written), - *poffset, &bytes_written, - write_data + total_written, - NULL, 0); + len = min((size_t)cifs_sb->wsize, + write_size - total_written); + /* iov[0] is reserved for smb header */ + iov[1].iov_base = (char *)write_data + total_written; + iov[1].iov_len = len; + rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len, + *poffset, &bytes_written, iov, 1, 0); } if (rc || (bytes_written == 0)) { if (total_written) @@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping, } tcon = tlink_tcon(open_file->tlink); - if (!experimEnabled && tcon->ses->server->secMode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { - cifsFileInfo_put(open_file); - kfree(iov); - return generic_writepages(mapping, wbc); - } cifsFileInfo_put(open_file); xid = GetXid(); @@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, return total_read; } +/* + * If the page is mmap'ed into a process' page tables, then we need to make + * sure that it doesn't change while being written back. + */ +static int +cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page = vmf->page; + + lock_page(page); + return VM_FAULT_LOCKED; +} + +static struct vm_operations_struct cifs_file_vm_ops = { + .fault = filemap_fault, + .page_mkwrite = cifs_page_mkwrite, +}; + int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) { int rc, xid; @@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) cifs_invalidate_mapping(inode); rc = generic_file_mmap(file, vma); + if (rc == 0) + vma->vm_ops = &cifs_file_vm_ops; FreeXid(xid); return rc; } @@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) return rc; } rc = generic_file_mmap(file, vma); + if (rc == 0) + vma->vm_ops = &cifs_file_vm_ops; FreeXid(xid); return rc; } diff --git a/fs/cifs/link.c b/fs/cifs/link.c index e8804d373404..ce417a9764a3 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon, if (rc != 0) return rc; - if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { + if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { CIFSSMBClose(xid, tcon, netfid); /* it's not a symlink */ return -EINVAL; @@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, if (rc != 0) goto out; - if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { + if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { CIFSSMBClose(xid, pTcon, netfid); /* it's not a symlink */ goto out; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 2a930a752a78..0c684ae4c071 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free) memset(buf_to_free->password, 0, strlen(buf_to_free->password)); kfree(buf_to_free->password); } + kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); kfree(buf_to_free); } @@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) (struct smb_com_transaction_change_notify_rsp *)buf; struct file_notify_information *pnotify; __u32 data_offset = 0; - if (pSMBr->ByteCount > sizeof(struct file_notify_information)) { + if (get_bcc_le(buf) > sizeof(struct file_notify_information)) { data_offset = le32_to_cpu(pSMBr->DataOffset); pnotify = (struct file_notify_information *) diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 16765703131b..f6728eb6f4b9 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, bcc_ptr++; } */ /* copy user */ - if (ses->userName == NULL) { + if (ses->user_name == NULL) { /* null user mount */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; } else { - bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, + bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name, MAX_USERNAME_SIZE, nls_cp); } bcc_ptr += 2 * bytes_ret; @@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* copy user */ /* BB what about null user mounts - check that we do this BB */ /* copy user */ - if (ses->userName == NULL) { - /* BB what about null user mounts - check that we do this BB */ - } else { - strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); - } - bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE); + if (ses->user_name != NULL) + strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE); + /* else null user mount */ + + bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE); *bcc_ptr = 0; bcc_ptr++; /* account for null termination */ @@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, /* BB spec says that if AvId field of MsvAvTimestamp is populated then we must set the MIC field of the AUTHENTICATE_MESSAGE */ ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); - tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset); - tilen = cpu_to_le16(pblob->TargetInfoArray.Length); + tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); + tilen = le16_to_cpu(pblob->TargetInfoArray.Length); if (tilen) { ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); if (!ses->auth_key.response) { @@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, tmp += len; } - if (ses->userName == NULL) { + if (ses->user_name == NULL) { sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->UserName.Length = 0; sec_blob->UserName.MaximumLength = 0; tmp += 2; } else { int len; - len = cifs_strtoUCS((__le16 *)tmp, ses->userName, + len = cifs_strtoUCS((__le16 *)tmp, ses->user_name, MAX_USERNAME_SIZE, nls_cp); len *= 2; /* unicode is 2 bytes each */ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); diff --git a/fs/dcache.c b/fs/dcache.c index ad25c4cec7d5..129a35730994 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2131,7 +2131,7 @@ EXPORT_SYMBOL(d_rehash); */ void dentry_update_name_case(struct dentry *dentry, struct qstr *name) { - BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); + BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ spin_lock(&dentry->d_lock); diff --git a/fs/fhandle.c b/fs/fhandle.c index bf93ad2bee07..6b088641f5bf 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include "internal.h" diff --git a/fs/filesystems.c b/fs/filesystems.c index 751d6b255a12..0845f84f2a5f 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -110,14 +110,13 @@ int unregister_filesystem(struct file_system_type * fs) *tmp = fs->next; fs->next = NULL; write_unlock(&file_systems_lock); + synchronize_rcu(); return 0; } tmp = &(*tmp)->next; } write_unlock(&file_systems_lock); - synchronize_rcu(); - return -EINVAL; } diff --git a/fs/namei.c b/fs/namei.c index e6cd6113872c..54fc993e3027 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -697,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd) do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; + nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } } diff --git a/fs/namespace.c b/fs/namespace.c index 7dba2ed03429..d99bcf59e4c2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = { .show = show_vfsmnt }; -static int uuid_is_nil(u8 *uuid) -{ - int i; - u8 *cp = (u8 *)uuid; - - for (i = 0; i < 16; i++) { - if (*cp++) - return 0; - } - return 1; -} - static int show_mountinfo(struct seq_file *m, void *v) { struct proc_mounts *p = m->private; @@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v) if (IS_MNT_UNBINDABLE(mnt)) seq_puts(m, " unbindable"); - if (!uuid_is_nil(mnt->mnt_sb->s_uuid)) - /* print the uuid */ - seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid); - /* Filesystem specific data */ seq_puts(m, " - "); show_type(m, sb); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af0c6279a4a7..e4cbc11a74ab 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u if (!nfs_need_commit(nfsi)) return 0; + spin_lock(&inode->i_lock); ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); if (ret > 0) nfsi->ncommit -= ret; + spin_unlock(&inode->i_lock); + if (nfs_need_commit(NFS_I(inode))) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; } #else @@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how) res = nfs_commit_set_lock(NFS_I(inode), may_wait); if (res <= 0) goto out_mark_dirty; - spin_lock(&inode->i_lock); res = nfs_scan_commit(inode, &head, 0, 0); - spin_unlock(&inode->i_lock); if (res) { int error; diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index b10e3540d5b7..ce4f62440425 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) BUG_ON (!data || !frags); + if (size < 2 * VBLK_SIZE_HEAD) { + ldm_error("Value of size is to small."); + return false; + } + group = get_unaligned_be32(data + 0x08); rec = get_unaligned_be16(data + 0x0C); num = get_unaligned_be16(data + 0x0E); @@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) ldm_error ("A VBLK claims to have %d parts.", num); return false; } + if (rec >= num) { + ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num); + return false; + } list_for_each (item, frags) { f = list_entry (item, struct frag, list); @@ -1334,10 +1343,9 @@ found: f->map |= (1 << rec); - if (num > 0) { - data += VBLK_SIZE_HEAD; - size -= VBLK_SIZE_HEAD; - } + data += VBLK_SIZE_HEAD; + size -= VBLK_SIZE_HEAD; + memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); return true; diff --git a/fs/proc/base.c b/fs/proc/base.c index dd6628d3ba42..dfa532730e55 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3124,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi /* for the /proc/ directory itself, after non-process stuff has been done */ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) { - unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; - struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); + unsigned int nr; + struct task_struct *reaper; struct tgid_iter iter; struct pid_namespace *ns; + if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET) + goto out_no_task; + nr = filp->f_pos - FIRST_PROCESS_ENTRY; + + reaper = get_proc_task(filp->f_path.dentry->d_inode); if (!reaper) goto out_no_task; diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 9eead2c796b7..fbb0b478a346 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) SetPageDirty(page); unlock_page(page); + put_page(page); } return 0; diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 919f0de29d8f..e6493cac193d 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -23,6 +23,12 @@ #ifndef __UBIFS_DEBUG_H__ #define __UBIFS_DEBUG_H__ +/* Checking helper functions */ +typedef int (*dbg_leaf_callback)(struct ubifs_info *c, + struct ubifs_zbranch *zbr, void *priv); +typedef int (*dbg_znode_callback)(struct ubifs_info *c, + struct ubifs_znode *znode, void *priv); + #ifdef CONFIG_UBIFS_FS_DEBUG /** @@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c); void dbg_dump_index(struct ubifs_info *c); void dbg_dump_lpt_lebs(const struct ubifs_info *c); -/* Checking helper functions */ -typedef int (*dbg_leaf_callback)(struct ubifs_info *c, - struct ubifs_zbranch *zbr, void *priv); -typedef int (*dbg_znode_callback)(struct ubifs_info *c, - struct ubifs_znode *znode, void *priv); int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, dbg_znode_callback znode_cb, void *priv); @@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size); int dbg_check_filesystem(struct ubifs_info *c); void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, int add_pos); -int dbg_check_lprops(struct ubifs_info *c); int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, int row, int col); int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, @@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); #define DBGKEY(key) ((char *)(key)) #define DBGKEY1(key) ((char *)(key)) -#define ubifs_debugging_init(c) 0 -#define ubifs_debugging_exit(c) ({}) +static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; } +static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; } +static inline const char *dbg_ntype(int type) { return ""; } +static inline const char *dbg_cstate(int cmt_state) { return ""; } +static inline const char *dbg_jhead(int jhead) { return ""; } +static inline const char * +dbg_get_key_dump(const struct ubifs_info *c, + const union ubifs_key *key) { return ""; } +static inline void dbg_dump_inode(const struct ubifs_info *c, + const struct inode *inode) { return; } +static inline void dbg_dump_node(const struct ubifs_info *c, + const void *node) { return; } +static inline void dbg_dump_lpt_node(const struct ubifs_info *c, + void *node, int lnum, + int offs) { return; } +static inline void +dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; } +static inline void +dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; } +static inline void dbg_dump_budg(struct ubifs_info *c) { return; } +static inline void dbg_dump_lprop(const struct ubifs_info *c, + const struct ubifs_lprops *lp) { return; } +static inline void dbg_dump_lprops(struct ubifs_info *c) { return; } +static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; } +static inline void dbg_dump_leb(const struct ubifs_info *c, + int lnum) { return; } +static inline void +dbg_dump_znode(const struct ubifs_info *c, + const struct ubifs_znode *znode) { return; } +static inline void dbg_dump_heap(struct ubifs_info *c, + struct ubifs_lpt_heap *heap, + int cat) { return; } +static inline void dbg_dump_pnode(struct ubifs_info *c, + struct ubifs_pnode *pnode, + struct ubifs_nnode *parent, + int iip) { return; } +static inline void dbg_dump_tnc(struct ubifs_info *c) { return; } +static inline void dbg_dump_index(struct ubifs_info *c) { return; } +static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; } -#define dbg_ntype(type) "" -#define dbg_cstate(cmt_state) "" -#define dbg_jhead(jhead) "" -#define dbg_get_key_dump(c, key) ({}) -#define dbg_dump_inode(c, inode) ({}) -#define dbg_dump_node(c, node) ({}) -#define dbg_dump_lpt_node(c, node, lnum, offs) ({}) -#define dbg_dump_budget_req(req) ({}) -#define dbg_dump_lstats(lst) ({}) -#define dbg_dump_budg(c) ({}) -#define dbg_dump_lprop(c, lp) ({}) -#define dbg_dump_lprops(c) ({}) -#define dbg_dump_lpt_info(c) ({}) -#define dbg_dump_leb(c, lnum) ({}) -#define dbg_dump_znode(c, znode) ({}) -#define dbg_dump_heap(c, heap, cat) ({}) -#define dbg_dump_pnode(c, pnode, parent, iip) ({}) -#define dbg_dump_tnc(c) ({}) -#define dbg_dump_index(c) ({}) -#define dbg_dump_lpt_lebs(c) ({}) +static inline int dbg_walk_index(struct ubifs_info *c, + dbg_leaf_callback leaf_cb, + dbg_znode_callback znode_cb, + void *priv) { return 0; } +static inline void dbg_save_space_info(struct ubifs_info *c) { return; } +static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; } +static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; } +static inline int +dbg_old_index_check_init(struct ubifs_info *c, + struct ubifs_zbranch *zroot) { return 0; } +static inline int +dbg_check_old_index(struct ubifs_info *c, + struct ubifs_zbranch *zroot) { return 0; } +static inline int dbg_check_cats(struct ubifs_info *c) { return 0; } +static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; } +static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; } +static inline int dbg_chk_lpt_sz(struct ubifs_info *c, + int action, int len) { return 0; } +static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; } +static inline int dbg_check_dir_size(struct ubifs_info *c, + const struct inode *dir) { return 0; } +static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; } +static inline int dbg_check_idx_size(struct ubifs_info *c, + long long idx_size) { return 0; } +static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; } +static inline void dbg_check_heap(struct ubifs_info *c, + struct ubifs_lpt_heap *heap, + int cat, int add_pos) { return; } +static inline int dbg_check_lpt_nodes(struct ubifs_info *c, + struct ubifs_cnode *cnode, int row, int col) { return 0; } +static inline int dbg_check_inode_size(struct ubifs_info *c, + const struct inode *inode, + loff_t size) { return 0; } +static inline int +dbg_check_data_nodes_order(struct ubifs_info *c, + struct list_head *head) { return 0; } +static inline int +dbg_check_nondata_nodes_order(struct ubifs_info *c, + struct list_head *head) { return 0; } -#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 -#define dbg_old_index_check_init(c, zroot) 0 -#define dbg_save_space_info(c) ({}) -#define dbg_check_space_info(c) 0 -#define dbg_check_old_index(c, zroot) 0 -#define dbg_check_cats(c) 0 -#define dbg_check_ltab(c) 0 -#define dbg_chk_lpt_free_spc(c) 0 -#define dbg_chk_lpt_sz(c, action, len) 0 -#define dbg_check_synced_i_size(inode) 0 -#define dbg_check_dir_size(c, dir) 0 -#define dbg_check_tnc(c, x) 0 -#define dbg_check_idx_size(c, idx_size) 0 -#define dbg_check_filesystem(c) 0 -#define dbg_check_heap(c, heap, cat, add_pos) ({}) -#define dbg_check_lprops(c) 0 -#define dbg_check_lpt_nodes(c, cnode, row, col) 0 -#define dbg_check_inode_size(c, inode, size) 0 -#define dbg_check_data_nodes_order(c, head) 0 -#define dbg_check_nondata_nodes_order(c, head) 0 -#define dbg_force_in_the_gaps_enabled 0 -#define dbg_force_in_the_gaps() 0 -#define dbg_failure_mode 0 +static inline int dbg_force_in_the_gaps(void) { return 0; } +#define dbg_force_in_the_gaps_enabled 0 +#define dbg_failure_mode 0 -#define dbg_debugfs_init() 0 -#define dbg_debugfs_exit() -#define dbg_debugfs_init_fs(c) 0 -#define dbg_debugfs_exit_fs(c) 0 +static inline int dbg_debugfs_init(void) { return 0; } +static inline void dbg_debugfs_exit(void) { return; } +static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; } +static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; } #endif /* !CONFIG_UBIFS_FS_DEBUG */ #endif /* !__UBIFS_DEBUG_H__ */ diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 28be1e6a65e8..b286db79c686 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync) dbg_gen("syncing inode %lu", inode->i_ino); + if (inode->i_sb->s_flags & MS_RDONLY) + return 0; + /* * VFS has already synchronized dirty pages for this inode. Synchronize * the inode unless this is a 'datasync()' call. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32176cc8e715..cbbfd98ad4a3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); -extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); +extern void __blk_run_queue(struct request_queue *q); extern void blk_run_queue(struct request_queue *); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, @@ -857,26 +857,39 @@ extern void blk_put_queue(struct request_queue *); struct blk_plug { unsigned long magic; struct list_head list; + struct list_head cb_list; unsigned int should_sort; }; +struct blk_plug_cb { + struct list_head list; + void (*callback)(struct blk_plug_cb *); +}; extern void blk_start_plug(struct blk_plug *); extern void blk_finish_plug(struct blk_plug *); -extern void __blk_flush_plug(struct task_struct *, struct blk_plug *); +extern void blk_flush_plug_list(struct blk_plug *, bool); static inline void blk_flush_plug(struct task_struct *tsk) { struct blk_plug *plug = tsk->plug; - if (unlikely(plug)) - __blk_flush_plug(tsk, plug); + if (plug) + blk_flush_plug_list(plug, false); +} + +static inline void blk_schedule_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, true); } static inline bool blk_needs_flush_plug(struct task_struct *tsk) { struct blk_plug *plug = tsk->plug; - return plug && !list_empty(&plug->list); + return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); } /* @@ -1314,6 +1327,11 @@ static inline void blk_flush_plug(struct task_struct *task) { } +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + static inline bool blk_needs_flush_plug(struct task_struct *tsk) { return false; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e2768834f397..32a4423710f5 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -197,7 +197,6 @@ struct dm_target { struct dm_target_callbacks { struct list_head list; int (*congested_fn) (struct dm_target_callbacks *, int); - void (*unplug_fn)(struct dm_target_callbacks *); }; int dm_register_target(struct target_type *t); diff --git a/include/linux/input.h b/include/linux/input.h index f3a7794a18c4..771d6d85667d 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -167,6 +167,7 @@ struct input_keymap_entry { #define SYN_REPORT 0 #define SYN_CONFIG 1 #define SYN_MT_REPORT 2 +#define SYN_DROPPED 3 /* * Keys and buttons @@ -553,8 +554,8 @@ struct input_keymap_entry { #define KEY_DVD 0x185 /* Media Select DVD */ #define KEY_AUX 0x186 #define KEY_MP3 0x187 -#define KEY_AUDIO 0x188 -#define KEY_VIDEO 0x189 +#define KEY_AUDIO 0x188 /* AL Audio Browser */ +#define KEY_VIDEO 0x189 /* AL Movie Browser */ #define KEY_DIRECTORY 0x18a #define KEY_LIST 0x18b #define KEY_MEMO 0x18c /* Media Select Messages */ @@ -603,8 +604,9 @@ struct input_keymap_entry { #define KEY_FRAMEFORWARD 0x1b5 #define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ #define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ -#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ -#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ +#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ +#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ +#define KEY_IMAGES 0x1ba /* AL Image Browser */ #define KEY_DEL_EOL 0x1c0 #define KEY_DEL_EOS 0x1c1 diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index b3ac06a4435d..318bb82325a6 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -48,6 +48,12 @@ static inline void input_mt_slot(struct input_dev *dev, int slot) input_event(dev, EV_ABS, ABS_MT_SLOT, slot); } +static inline bool input_is_mt_axis(int axis) +{ + return axis == ABS_MT_SLOT || + (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST); +} + void input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5a5ce7055839..5e9840f50980 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -216,7 +216,7 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru) return ; } -static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page) +static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) { return ; } diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index ad1b19aa6508..aef23309a742 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones, */ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) { - return pdev->dev.platform_data; + return pdev->mfd_cell; } /* * Given a platform device that's been created by mfd_add_devices(), fetch * the .mfd_data entry from the mfd_cell that created it. + * Otherwise just return the platform_data pointer. + * This maintains compatibility with platform drivers whose devices aren't + * created by the mfd layer, and expect platform_data to contain what would've + * otherwise been in mfd_data. */ static inline void *mfd_get_data(struct platform_device *pdev) { - return mfd_get_cell(pdev)->mfd_data; + const struct mfd_cell *cell = mfd_get_cell(pdev); + + if (cell) + return cell->mfd_data; + else + return pdev->dev.platform_data; } extern int mfd_add_devices(struct device *parent, int id, diff --git a/include/linux/pid.h b/include/linux/pid.h index 31afb7ecbe1f..cdced84261d7 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr); */ extern struct pid *find_get_pid(int nr); extern struct pid *find_ge_pid(int nr, struct pid_namespace *); -int next_pidmap(struct pid_namespace *pid_ns, int last); +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index d96db9825708..744942c95fec 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -14,6 +14,8 @@ #include #include +struct mfd_cell; + struct platform_device { const char * name; int id; @@ -23,6 +25,9 @@ struct platform_device { const struct platform_device_id *id_entry; + /* MFD cell pointer */ + struct mfd_cell *mfd_cell; + /* arch specific additions */ struct pdev_archdata archdata; }; diff --git a/include/linux/rio.h b/include/linux/rio.h index 4e37a7cfa726..4d50611112ba 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -396,7 +396,7 @@ union rio_pw_msg { }; /* Architecture and hardware-specific functions */ -extern void rio_register_mport(struct rio_mport *); +extern int rio_register_mport(struct rio_mport *); extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); extern void rio_close_inb_mbox(struct rio_mport *, int); extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index 7410d3365e2a..0cee0152aca9 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h @@ -35,6 +35,7 @@ #define RIO_DID_IDTCPS6Q 0x035f #define RIO_DID_IDTCPS10Q 0x035e #define RIO_DID_IDTCPS1848 0x0374 +#define RIO_DID_IDTCPS1432 0x0375 #define RIO_DID_IDTCPS1616 0x0379 #define RIO_DID_IDTVPS1616 0x0377 #define RIO_DID_IDTSPS1616 0x0378 diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 2ca7e8a78060..877ece45426f 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); +extern int rtc_initialize_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); extern void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events); diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ec2c027e92c..18d63cea2848 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1254,6 +1254,9 @@ struct task_struct { #endif struct mm_struct *mm, *active_mm; +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; +#endif #if defined(SPLIT_RSS_COUNTING) struct task_rss_stat rss_stat; #endif diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5a89e3612875..083ffea7ba18 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); extern bool system_entering_hibernation(void); #else /* CONFIG_HIBERNATION */ +static inline void register_nosave_region(unsigned long b, unsigned long e) {} +static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} @@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; } extern struct mutex pm_mutex; -#ifndef CONFIG_HIBERNATION -static inline void register_nosave_region(unsigned long b, unsigned long e) -{ -} -static inline void register_nosave_region_late(unsigned long b, unsigned long e) -{ -} - +#ifndef CONFIG_HIBERNATE_CALLBACKS static inline void lock_system_sleep(void) {} static inline void unlock_system_sleep(void) {} diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 461c0119664f..2b3831b58aa4 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ UNEVICTABLE_MLOCKFREED, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + THP_FAULT_ALLOC, + THP_FAULT_FALLBACK, + THP_COLLAPSE_ALLOC, + THP_COLLAPSE_ALLOC_FAILED, + THP_SPLIT, +#endif NR_VM_EVENT_ITEMS }; diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index cdf2e8ac4309..d2df55b0c213 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -139,8 +139,6 @@ do { \ */ enum p9_msg_t { - P9_TSYNCFS = 0, - P9_RSYNCFS, P9_TLERROR = 6, P9_RLERROR, P9_TSTATFS = 8, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 85c1413f054d..051a99f79769 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -218,8 +218,8 @@ void p9_client_disconnect(struct p9_client *clnt); void p9_client_begin_disconnect(struct p9_client *clnt); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, char *uname, u32 n_uname, char *aname); -struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, - int clone); +struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, + char **wnames, int clone); int p9_client_open(struct p9_fid *fid, int mode); int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, char *extension); @@ -230,7 +230,6 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, gid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); -int p9_client_sync_fs(struct p9_fid *fid); int p9_client_remove(struct p9_fid *fid); int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, u32 count); diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 78f18adb49c8..bf366547da25 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -401,9 +401,9 @@ TRACE_EVENT(block_plug, DECLARE_EVENT_CLASS(block_unplug, - TP_PROTO(struct request_queue *q), + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), - TP_ARGS(q), + TP_ARGS(q, depth, explicit), TP_STRUCT__entry( __field( int, nr_rq ) @@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug, ), TP_fast_assign( - __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; + __entry->nr_rq = depth; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -419,31 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug, ); /** - * block_unplug_timer - timed release of operations requests in queue to device driver - * @q: request queue to unplug - * - * Unplug the request queue @q because a timer expired and allow block - * operation requests to be sent to the device driver. - */ -DEFINE_EVENT(block_unplug, block_unplug_timer, - - TP_PROTO(struct request_queue *q), - - TP_ARGS(q) -); - -/** - * block_unplug_io - release of operations requests in request queue + * block_unplug - release of operations requests in request queue * @q: request queue to unplug + * @depth: number of requests just added to the queue + * @explicit: whether this was an explicit unplug, or one from schedule() * * Unplug request queue @q because device driver is scheduled to work * on elements in the request queue. */ -DEFINE_EVENT(block_unplug, block_unplug_io, +DEFINE_EVENT(block_unplug, block_unplug, - TP_PROTO(struct request_queue *q), + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), - TP_ARGS(q) + TP_ARGS(q, depth, explicit) ); /** diff --git a/kernel/futex.c b/kernel/futex.c index dfb924ffe65b..fe28dc282eae 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1886,7 +1886,7 @@ retry: restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; - restart->futex.flags = flags; + restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; ret = -ERESTART_RESTARTBLOCK; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 27960f114efd..8e81a9860a0d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode) } if (mode & PERF_CGROUP_SWIN) { + WARN_ON_ONCE(cpuctx->cgrp); /* set cgrp before ctxsw in to * allow event_filter_match() to not * have to pass task around @@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) if (!ctx || !ctx->nr_events) goto out; + /* + * We must ctxsw out cgroup events to avoid conflict + * when invoking perf_task_event_sched_in() later on + * in this function. Otherwise we end up trying to + * ctxswin cgroup events which are already scheduled + * in. + */ + perf_cgroup_sched_out(current); task_ctx_sched_out(ctx, EVENT_ALL); raw_spin_lock(&ctx->lock); @@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) raw_spin_unlock(&ctx->lock); + /* + * Also calls ctxswin for cgroup events, if any: + */ perf_event_context_sched_in(ctx, ctx->task); out: local_irq_restore(flags); diff --git a/kernel/pid.c b/kernel/pid.c index 02f221274265..57a8346a270e 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) return -1; } -int next_pidmap(struct pid_namespace *pid_ns, int last) +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) { int offset; struct pidmap *map, *end; + if (last >= PID_MAX_LIMIT) + return -1; + offset = (last + 1) & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; end = &pid_ns->pidmap[PIDMAP_ENTRIES]; diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 4603f08dc47b..6de9a8fc3417 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -18,9 +18,13 @@ config SUSPEND_FREEZER Turning OFF this setting is NOT recommended! If in doubt, say Y. +config HIBERNATE_CALLBACKS + bool + config HIBERNATION bool "Hibernation (aka 'suspend to disk')" depends on SWAP && ARCH_HIBERNATION_POSSIBLE + select HIBERNATE_CALLBACKS select LZO_COMPRESS select LZO_DECOMPRESS ---help--- @@ -85,7 +89,7 @@ config PM_STD_PARTITION config PM_SLEEP def_bool y - depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE + depends on SUSPEND || HIBERNATE_CALLBACKS config PM_SLEEP_SMP def_bool y diff --git a/kernel/sched.c b/kernel/sched.c index 48013633d792..312f8b95c2d4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4111,20 +4111,20 @@ need_resched: try_to_wake_up_local(to_wakeup); } deactivate_task(rq, prev, DEQUEUE_SLEEP); + + /* + * If we are going to sleep and we have plugged IO queued, make + * sure to submit it to avoid deadlocks. + */ + if (blk_needs_flush_plug(prev)) { + raw_spin_unlock(&rq->lock); + blk_schedule_flush_plug(prev); + raw_spin_lock(&rq->lock); + } } switch_count = &prev->nvcsw; } - /* - * If we are going to sleep and we have plugged IO queued, make - * sure to submit it to avoid deadlocks. - */ - if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) { - raw_spin_unlock(&rq->lock); - blk_flush_plug(prev); - raw_spin_lock(&rq->lock); - } - pre_schedule(rq, prev); if (unlikely(!rq->nr_running)) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f00772e57c9..6fa833ab2cb8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, struct cfs_rq *busiest_cfs_rq) { - int loops = 0, pulled = 0, pinned = 0; + int loops = 0, pulled = 0; long rem_load_move = max_load_move; struct task_struct *p, *n; if (max_load_move == 0) goto out; - pinned = 1; - list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { if (loops++ > sysctl_sched_nr_migrate) break; if ((p->se.load.weight >> 1) > rem_load_move || - !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) + !can_migrate_task(p, busiest, this_cpu, sd, idle, + all_pinned)) continue; pull_task(busiest, p, this_rq, this_cpu); @@ -2153,9 +2152,6 @@ out: */ schedstat_add(sd, lb_gained[idle], pulled); - if (all_pinned) - *all_pinned = pinned; - return max_load_move - rem_load_move; } @@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (!sds.busiest || sds.busiest_nr_running == 0) goto out_balanced; + sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; + /* * If the busiest group is imbalanced the below checks don't * work because they assumes all things are equal, which typically @@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, * Don't pull any tasks if this group is already above the domain * average load. */ - sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; if (sds.this_load >= sds.avg_load) goto out_balanced; @@ -3340,6 +3337,7 @@ redo: * still unbalanced. ld_moved simply stays zero, so it is * correctly treated as an imbalance. */ + all_pinned = 1; local_irq_save(flags); double_rq_lock(this_rq, busiest); ld_moved = move_tasks(this_rq, this_cpu, busiest, diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7aa40f8e182d..6957aa298dfa 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); } -static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) +static void blk_add_trace_unplug(void *ignore, struct request_queue *q, + unsigned int depth, bool explicit) { struct blk_trace *bt = q->blk_trace; if (bt) { - unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; - __be64 rpdu = cpu_to_be64(pdu); + __be64 rpdu = cpu_to_be64(depth); + u32 what; - __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, - sizeof(rpdu), &rpdu); - } -} + if (explicit) + what = BLK_TA_UNPLUG_IO; + else + what = BLK_TA_UNPLUG_TIMER; -static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q) -{ - struct blk_trace *bt = q->blk_trace; - - if (bt) { - unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; - __be64 rpdu = cpu_to_be64(pdu); - - __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, - sizeof(rpdu), &rpdu); + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); } } @@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void) WARN_ON(ret); ret = register_trace_block_plug(blk_add_trace_plug, NULL); WARN_ON(ret); - ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); - WARN_ON(ret); - ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); + ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); WARN_ON(ret); ret = register_trace_block_split(blk_add_trace_split, NULL); WARN_ON(ret); @@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void) unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); unregister_trace_block_split(blk_add_trace_split, NULL); - unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); - unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); + unregister_trace_block_unplug(blk_add_trace_unplug, NULL); unregister_trace_block_plug(blk_add_trace_plug, NULL); unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); unregister_trace_block_getrq(blk_add_trace_getrq, NULL); diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 05672e819f8c..a235f3cc471c 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -49,12 +49,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) val = *s - '0'; else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') val = _tolower(*s) - 'a' + 10; - else if (*s == '\n') { - if (*(s + 1) == '\0') - break; - else - return -EINVAL; - } else + else if (*s == '\n' && *(s + 1) == '\0') + break; + else return -EINVAL; if (val >= base) diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c index 325c2f9ecebd..d55769d63cb8 100644 --- a/lib/test-kstrtox.c +++ b/lib/test-kstrtox.c @@ -315,12 +315,12 @@ static void __init test_kstrtou64_ok(void) {"65537", 10, 65537}, {"2147483646", 10, 2147483646}, {"2147483647", 10, 2147483647}, - {"2147483648", 10, 2147483648}, - {"2147483649", 10, 2147483649}, - {"4294967294", 10, 4294967294}, - {"4294967295", 10, 4294967295}, - {"4294967296", 10, 4294967296}, - {"4294967297", 10, 4294967297}, + {"2147483648", 10, 2147483648ULL}, + {"2147483649", 10, 2147483649ULL}, + {"4294967294", 10, 4294967294ULL}, + {"4294967295", 10, 4294967295ULL}, + {"4294967296", 10, 4294967296ULL}, + {"4294967297", 10, 4294967297ULL}, {"9223372036854775806", 10, 9223372036854775806ULL}, {"9223372036854775807", 10, 9223372036854775807ULL}, {"9223372036854775808", 10, 9223372036854775808ULL}, @@ -369,12 +369,12 @@ static void __init test_kstrtos64_ok(void) {"65537", 10, 65537}, {"2147483646", 10, 2147483646}, {"2147483647", 10, 2147483647}, - {"2147483648", 10, 2147483648}, - {"2147483649", 10, 2147483649}, - {"4294967294", 10, 4294967294}, - {"4294967295", 10, 4294967295}, - {"4294967296", 10, 4294967296}, - {"4294967297", 10, 4294967297}, + {"2147483648", 10, 2147483648LL}, + {"2147483649", 10, 2147483649LL}, + {"4294967294", 10, 4294967294LL}, + {"4294967295", 10, 4294967295LL}, + {"4294967296", 10, 4294967296LL}, + {"4294967297", 10, 4294967297LL}, {"9223372036854775806", 10, 9223372036854775806LL}, {"9223372036854775807", 10, 9223372036854775807LL}, }; @@ -418,10 +418,10 @@ static void __init test_kstrtou32_ok(void) {"65537", 10, 65537}, {"2147483646", 10, 2147483646}, {"2147483647", 10, 2147483647}, - {"2147483648", 10, 2147483648}, - {"2147483649", 10, 2147483649}, - {"4294967294", 10, 4294967294}, - {"4294967295", 10, 4294967295}, + {"2147483648", 10, 2147483648U}, + {"2147483649", 10, 2147483649U}, + {"4294967294", 10, 4294967294U}, + {"4294967295", 10, 4294967295U}, }; TEST_OK(kstrtou32, u32, "%u", test_u32_ok); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0a619e0e2e0b..470dcda10add 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -244,25 +244,29 @@ static ssize_t single_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag) { - if (test_bit(flag, &transparent_hugepage_flags)) - return sprintf(buf, "[yes] no\n"); - else - return sprintf(buf, "yes [no]\n"); + return sprintf(buf, "%d\n", + !!test_bit(flag, &transparent_hugepage_flags)); } + static ssize_t single_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag) { - if (!memcmp("yes", buf, - min(sizeof("yes")-1, count))) { - set_bit(flag, &transparent_hugepage_flags); - } else if (!memcmp("no", buf, - min(sizeof("no")-1, count))) { - clear_bit(flag, &transparent_hugepage_flags); - } else + unsigned long value; + int ret; + + ret = kstrtoul(buf, 10, &value); + if (ret < 0) + return ret; + if (value > 1) return -EINVAL; + if (value) + set_bit(flag, &transparent_hugepage_flags); + else + clear_bit(flag, &transparent_hugepage_flags); + return count; } @@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_OOM; page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), vma, haddr, numa_node_id(), 0); - if (unlikely(!page)) + if (unlikely(!page)) { + count_vm_event(THP_FAULT_FALLBACK); goto out; + } + count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { put_page(page); goto out; @@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, new_page = NULL; if (unlikely(!new_page)) { + count_vm_event(THP_FAULT_FALLBACK); ret = do_huge_pmd_wp_page_fallback(mm, vma, address, pmd, orig_pmd, page, haddr); put_page(page); goto out; } + count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { put_page(new_page); @@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page) BUG_ON(!PageSwapBacked(page)); __split_huge_page(page, anon_vma); + count_vm_event(THP_SPLIT); BUG_ON(PageCompound(page)); out_unlock: @@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm, node, __GFP_OTHER_NODE); if (unlikely(!new_page)) { up_read(&mm->mmap_sem); + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); return; } + count_vm_event(THP_COLLAPSE_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { up_read(&mm->mmap_sem); put_page(new_page); @@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage) #ifndef CONFIG_NUMA if (!*hpage) { *hpage = alloc_hugepage(khugepaged_defrag()); - if (unlikely(!*hpage)) + if (unlikely(!*hpage)) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); break; + } + count_vm_event(THP_COLLAPSE_ALLOC); } #else if (IS_ERR(*hpage)) @@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void) do { hpage = alloc_hugepage(khugepaged_defrag()); - if (!hpage) + if (!hpage) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); khugepaged_alloc_sleep(); + } else + count_vm_event(THP_COLLAPSE_ALLOC); } while (unlikely(!hpage) && likely(khugepaged_enabled())); return hpage; @@ -2210,8 +2228,11 @@ static void khugepaged_loop(void) while (likely(khugepaged_enabled())) { #ifndef CONFIG_NUMA hpage = khugepaged_alloc_hugepage(); - if (unlikely(!hpage)) + if (unlikely(!hpage)) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); break; + } + count_vm_event(THP_COLLAPSE_ALLOC); #else if (IS_ERR(hpage)) { khugepaged_alloc_sleep(); diff --git a/mm/memory.c b/mm/memory.c index 9da8cab1b1b0..ce22a250926f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1410,6 +1410,13 @@ no_page_table: return page; } +static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_stack_continue(vma->vm_prev, addr); +} + /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task @@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { unsigned long pg = start & PAGE_MASK; - struct vm_area_struct *gate_vma = get_gate_vma(mm); pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, pte_unmap(pte); return i ? : -EFAULT; } + vma = get_gate_vma(mm); if (pages) { struct page *page; - page = vm_normal_page(gate_vma, start, *pte); + page = vm_normal_page(vma, start, *pte); if (!page) { if (!(gup_flags & FOLL_DUMP) && is_zero_pfn(pte_pfn(*pte))) @@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, get_page(page); } pte_unmap(pte); - if (vmas) - vmas[i] = gate_vma; - i++; - start += PAGE_SIZE; - nr_pages--; - continue; + goto next_page; } if (!vma || @@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, continue; } + /* + * If we don't actually want the page itself, + * and it's the stack guard page, just skip it. + */ + if (!pages && stack_guard_page(vma, start)) + goto next_page; + do { struct page *page; unsigned int foll_flags = gup_flags; @@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, flush_anon_page(vma, page, start); flush_dcache_page(page); } +next_page: if (vmas) vmas[i] = vma; i++; @@ -3678,7 +3688,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, */ #ifdef CONFIG_HAVE_IOREMAP_PROT vma = find_vma(mm, addr); - if (!vma) + if (!vma || vma->vm_start > addr) break; if (vma->vm_ops && vma->vm_ops->access) ret = vma->vm_ops->access(vma, addr, buf, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a2acaf820fe5..9ca1d604f7cd 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -375,7 +375,7 @@ void online_page(struct page *page) #endif #ifdef CONFIG_FLATMEM - max_mapnr = max(page_to_pfn(page), max_mapnr); + max_mapnr = max(pfn, max_mapnr); #endif ClearPageReserved(page); diff --git a/mm/mlock.c b/mm/mlock.c index 2689a08c79af..6b55e3efe0df 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page) } } -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_stack_continue(vma->vm_prev, addr); -} - /** * __mlock_vma_pages_range() - mlock a range of pages in the vma. * @vma: target vma @@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, if (vma->vm_flags & VM_LOCKED) gup_flags |= FOLL_MLOCK; - /* We don't try to access the guard page of a stack vma */ - if (stack_guard_page(vma, start)) { - addr += PAGE_SIZE; - nr_pages--; - } - return __get_user_pages(current, mm, addr, nr_pages, gup_flags, NULL, NULL, nonblocking); } diff --git a/mm/mmap.c b/mm/mmap.c index 2ec8eb5a9cdd..e27e0cf0de03 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ - if (mm->start_brk > PAGE_ALIGN(mm->end_data)) + if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; @@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma, size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; - error = acct_stack_growth(vma, size, grow); - if (!error) { - vma->vm_start = address; - vma->vm_pgoff -= grow; - perf_event_mmap(vma); + error = -ENOMEM; + if (grow <= vma->vm_pgoff) { + error = acct_stack_growth(vma, size, grow); + if (!error) { + vma->vm_start = address; + vma->vm_pgoff -= grow; + perf_event_mmap(vma); + } } } vma_unlock_anon_vma(vma); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6a819d1b2c7d..83fb72c108b7 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -83,24 +83,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, } #endif /* CONFIG_NUMA */ -/* - * If this is a system OOM (not a memcg OOM) and the task selected to be - * killed is not already running at high (RT) priorities, speed up the - * recovery by boosting the dying task to the lowest FIFO priority. - * That helps with the recovery and avoids interfering with RT tasks. - */ -static void boost_dying_task_prio(struct task_struct *p, - struct mem_cgroup *mem) -{ - struct sched_param param = { .sched_priority = 1 }; - - if (mem) - return; - - if (!rt_task(p)) - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); -} - /* * The process p may have detached its own ->mm while exiting or through * use_mm(), but one or more of its subthreads may still have a valid @@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) set_tsk_thread_flag(p, TIF_MEMDIE); force_sig(SIGKILL, p); - /* - * We give our sacrificial lamb high priority and access to - * all the memory it needs. That way it should be able to - * exit() and clear out its resources quickly... - */ - boost_dying_task_prio(p, mem); - return 0; } #undef K @@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, */ if (p->flags & PF_EXITING) { set_tsk_thread_flag(p, TIF_MEMDIE); - boost_dying_task_prio(p, mem); return 0; } @@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) */ if (fatal_signal_pending(current)) { set_thread_flag(TIF_MEMDIE); - boost_dying_task_prio(current, NULL); return; } @@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, */ if (fatal_signal_pending(current)) { set_thread_flag(TIF_MEMDIE); - boost_dying_task_prio(current, NULL); return; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2747f5e5abc1..9f8a97b9a350 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3176,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data) * Called with zonelists_mutex held always * unless system_state == SYSTEM_BOOTING. */ -void build_all_zonelists(void *data) +void __ref build_all_zonelists(void *data) { set_zonelist_order(); diff --git a/mm/shmem.c b/mm/shmem.c index 58da7c150ba6..8fa27e4e582a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long * a waste to allocate index if we cannot allocate data. */ if (sbinfo->max_blocks) { - if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0) + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks - 1) >= 0) return ERR_PTR(-ENOSPC); percpu_counter_inc(&sbinfo->used_blocks); spin_lock(&inode->i_lock); @@ -1397,7 +1398,8 @@ repeat: shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { - if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) || + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks) >= 0 || shmem_acct_block(info->flags)) { spin_unlock(&info->lock); error = -ENOSPC; diff --git a/mm/vmscan.c b/mm/vmscan.c index c7f5a6d4b75b..f6b435c80079 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone) return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; } -/* - * As hibernation is going on, kswapd is freezed so that it can't mark - * the zone into all_unreclaimable. It can't handle OOM during hibernation. - * So let's check zone's unreclaimable in direct reclaim as well as kswapd. - */ +/* All zones in zonelist are unreclaimable? */ static bool all_unreclaimable(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; - bool all_unreclaimable = true; for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(sc->gfp_mask), sc->nodemask) { @@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist, continue; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; - if (zone_reclaimable(zone)) { - all_unreclaimable = false; - break; - } + if (!zone->all_unreclaimable) + return false; } - return all_unreclaimable; + return true; } /* @@ -2108,6 +2102,14 @@ out: if (sc->nr_reclaimed) return sc->nr_reclaimed; + /* + * As hibernation is going on, kswapd is freezed so that it can't mark + * the zone into all_unreclaimable. Thus bypassing all_unreclaimable + * check. + */ + if (oom_killer_disabled) + return 0; + /* top priority shrink_zones still had more to do? don't OOM, then */ if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) return 1; diff --git a/mm/vmstat.c b/mm/vmstat.c index 772b39b87d95..897ea9e88238 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -321,9 +321,12 @@ static inline void mod_state(struct zone *zone, /* * The fetching of the stat_threshold is racy. We may apply * a counter threshold to the wrong the cpu if we get - * rescheduled while executing here. However, the following - * will apply the threshold again and therefore bring the - * counter under the threshold. + * rescheduled while executing here. However, the next + * counter update will apply the threshold again and + * therefore bring the counter under the threshold again. + * + * Most of the time the thresholds are the same anyways + * for all cpus in a zone. */ t = this_cpu_read(pcp->stat_threshold); @@ -945,7 +948,16 @@ static const char * const vmstat_text[] = { "unevictable_pgs_cleared", "unevictable_pgs_stranded", "unevictable_pgs_mlockfreed", + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + "thp_fault_alloc", + "thp_fault_fallback", + "thp_collapse_alloc", + "thp_collapse_alloc_failed", + "thp_split", #endif + +#endif /* CONFIG_VM_EVENTS_COUNTERS */ }; static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, diff --git a/net/9p/client.c b/net/9p/client.c index 48b8e084e710..77367745be9b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -929,15 +929,15 @@ error: } EXPORT_SYMBOL(p9_client_attach); -struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, - int clone) +struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, + char **wnames, int clone) { int err; struct p9_client *clnt; struct p9_fid *fid; struct p9_qid *wqids; struct p9_req_t *req; - int16_t nwqids, count; + uint16_t nwqids, count; err = 0; wqids = NULL; @@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, fid = oldfid; - P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n", + P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n", oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, @@ -1220,27 +1220,6 @@ error: } EXPORT_SYMBOL(p9_client_fsync); -int p9_client_sync_fs(struct p9_fid *fid) -{ - int err = 0; - struct p9_req_t *req; - struct p9_client *clnt; - - P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid); - - clnt = fid->clnt; - req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid); - if (IS_ERR(req)) { - err = PTR_ERR(req); - goto error; - } - P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid); - p9_free_req(clnt, req); -error: - return err; -} -EXPORT_SYMBOL(p9_client_sync_fs); - int p9_client_clunk(struct p9_fid *fid) { int err; diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 8a4084fa8b5a..b58a501cf3d1 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, } break; case 'T':{ - int16_t *nwname = va_arg(ap, int16_t *); + uint16_t *nwname = va_arg(ap, uint16_t *); char ***wnames = va_arg(ap, char ***); errcode = p9pdu_readf(pdu, proto_version, @@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, case 'E':{ int32_t cnt = va_arg(ap, int32_t); const char *k = va_arg(ap, const void *); - const char *u = va_arg(ap, const void *); + const char __user *u = va_arg(ap, + const void __user *); errcode = p9pdu_writef(pdu, proto_version, "d", cnt); if (!errcode && pdu_write_urw(pdu, k, u, cnt)) @@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, } break; case 'T':{ - int16_t nwname = va_arg(ap, int); + uint16_t nwname = va_arg(ap, int); const char **wnames = va_arg(ap, const char **); errcode = p9pdu_writef(pdu, proto_version, "w", diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index d47880e971dd..e883172f9aa2 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c @@ -66,7 +66,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, uint32_t pdata_mapped_pages; struct trans_rpage_info *rpinfo; - *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1); + *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); if (*pdata_off) first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index e8f046b07182..244e70742183 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -326,8 +326,11 @@ req_retry_pinned: outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, pdata_off, rpinfo->rp_data, pdata_len); } else { - char *pbuf = req->tc->pubuf ? req->tc->pubuf : - req->tc->pkbuf; + char *pbuf; + if (req->tc->pubuf) + pbuf = (__force char *) req->tc->pubuf; + else + pbuf = req->tc->pkbuf; outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, req->tc->pbuf_size); } @@ -352,8 +355,12 @@ req_retry_pinned: in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM, pdata_off, rpinfo->rp_data, pdata_len); } else { - char *pbuf = req->tc->pubuf ? req->tc->pubuf : - req->tc->pkbuf; + char *pbuf; + if (req->tc->pubuf) + pbuf = (__force char *) req->tc->pubuf; + else + pbuf = req->tc->pkbuf; + in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM, pbuf, req->tc->pbuf_size); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 50af02737a3d..5a80f41c0cba 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -579,9 +579,15 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, r_linger_osd) { - __unregister_linger_request(osdc, req); + /* + * reregister request prior to unregistering linger so + * that r_osd is preserved. + */ + BUG_ON(!list_empty(&req->r_req_lru_item)); __register_request(osdc, req); - list_move(&req->r_req_lru_item, &osdc->req_unsent); + list_add(&req->r_req_lru_item, &osdc->req_unsent); + list_add(&req->r_osd_item, &req->r_osd->o_requests); + __unregister_linger_request(osdc, req); dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, osd->o_osd); } @@ -798,7 +804,7 @@ static void __register_request(struct ceph_osd_client *osdc, req->r_request->hdr.tid = cpu_to_le64(req->r_tid); INIT_LIST_HEAD(&req->r_req_lru_item); - dout("register_request %p tid %lld\n", req, req->r_tid); + dout("__register_request %p tid %lld\n", req, req->r_tid); __insert_request(osdc, req); ceph_osdc_get_request(req); osdc->num_requests++; diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index 9fea75535221..96bee5c46008 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -13,7 +13,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) { FILE *fp; char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; - char *token, *saved_ptr; + char *token, *saved_ptr = NULL; int found = 0; fp = fopen("/proc/mounts", "r");