mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 17:51:43 +00:00
Merge tag 'v3.5-rc6' into next/dt
New pull requests are based on Linux 3.5-rc6
This commit is contained in:
commit
14d597b74a
@ -1,26 +1,5 @@
|
||||
What: /sys/block/rssd*/registers
|
||||
Date: March 2012
|
||||
KernelVersion: 3.3
|
||||
Contact: Asai Thambi S P <asamymuthupa@micron.com>
|
||||
Description: This is a read-only file. Dumps below driver information and
|
||||
hardware registers.
|
||||
- S ACTive
|
||||
- Command Issue
|
||||
- Completed
|
||||
- PORT IRQ STAT
|
||||
- HOST IRQ STAT
|
||||
- Allocated
|
||||
- Commands in Q
|
||||
|
||||
What: /sys/block/rssd*/status
|
||||
Date: April 2012
|
||||
KernelVersion: 3.4
|
||||
Contact: Asai Thambi S P <asamymuthupa@micron.com>
|
||||
Description: This is a read-only file. Indicates the status of the device.
|
||||
|
||||
What: /sys/block/rssd*/flags
|
||||
Date: May 2012
|
||||
KernelVersion: 3.5
|
||||
Contact: Asai Thambi S P <asamymuthupa@micron.com>
|
||||
Description: This is a read-only file. Dumps the flags in port and driver
|
||||
data structure
|
||||
|
@ -7,39 +7,39 @@ This target is read-only.
|
||||
|
||||
Construction Parameters
|
||||
=======================
|
||||
<version> <dev> <hash_dev> <hash_start>
|
||||
<version> <dev> <hash_dev>
|
||||
<data_block_size> <hash_block_size>
|
||||
<num_data_blocks> <hash_start_block>
|
||||
<algorithm> <digest> <salt>
|
||||
|
||||
<version>
|
||||
This is the version number of the on-disk format.
|
||||
This is the type of the on-disk hash format.
|
||||
|
||||
0 is the original format used in the Chromium OS.
|
||||
The salt is appended when hashing, digests are stored continuously and
|
||||
the rest of the block is padded with zeros.
|
||||
The salt is appended when hashing, digests are stored continuously and
|
||||
the rest of the block is padded with zeros.
|
||||
|
||||
1 is the current format that should be used for new devices.
|
||||
The salt is prepended when hashing and each digest is
|
||||
padded with zeros to the power of two.
|
||||
The salt is prepended when hashing and each digest is
|
||||
padded with zeros to the power of two.
|
||||
|
||||
<dev>
|
||||
This is the device containing the data the integrity of which needs to be
|
||||
This is the device containing data, the integrity of which needs to be
|
||||
checked. It may be specified as a path, like /dev/sdaX, or a device number,
|
||||
<major>:<minor>.
|
||||
|
||||
<hash_dev>
|
||||
This is the device that that supplies the hash tree data. It may be
|
||||
This is the device that supplies the hash tree data. It may be
|
||||
specified similarly to the device path and may be the same device. If the
|
||||
same device is used, the hash_start should be outside of the dm-verity
|
||||
configured device size.
|
||||
same device is used, the hash_start should be outside the configured
|
||||
dm-verity device.
|
||||
|
||||
<data_block_size>
|
||||
The block size on a data device. Each block corresponds to one digest on
|
||||
the hash device.
|
||||
The block size on a data device in bytes.
|
||||
Each block corresponds to one digest on the hash device.
|
||||
|
||||
<hash_block_size>
|
||||
The size of a hash block.
|
||||
The size of a hash block in bytes.
|
||||
|
||||
<num_data_blocks>
|
||||
The number of data blocks on the data device. Additional blocks are
|
||||
@ -65,7 +65,7 @@ Construction Parameters
|
||||
Theory of operation
|
||||
===================
|
||||
|
||||
dm-verity is meant to be setup as part of a verified boot path. This
|
||||
dm-verity is meant to be set up as part of a verified boot path. This
|
||||
may be anything ranging from a boot using tboot or trustedgrub to just
|
||||
booting from a known-good device (like a USB drive or CD).
|
||||
|
||||
@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
|
||||
has been authenticated in some way (cryptographic signatures, etc).
|
||||
After instantiation, all hashes will be verified on-demand during
|
||||
disk access. If they cannot be verified up to the root node of the
|
||||
tree, the root hash, then the I/O will fail. This should identify
|
||||
tree, the root hash, then the I/O will fail. This should detect
|
||||
tampering with any data on the device and the hash data.
|
||||
|
||||
Cryptographic hashes are used to assert the integrity of the device on a
|
||||
per-block basis. This allows for a lightweight hash computation on first read
|
||||
into the page cache. Block hashes are stored linearly-aligned to the nearest
|
||||
block the size of a page.
|
||||
per-block basis. This allows for a lightweight hash computation on first read
|
||||
into the page cache. Block hashes are stored linearly, aligned to the nearest
|
||||
block size.
|
||||
|
||||
Hash Tree
|
||||
---------
|
||||
|
||||
Each node in the tree is a cryptographic hash. If it is a leaf node, the hash
|
||||
is of some block data on disk. If it is an intermediary node, then the hash is
|
||||
of a number of child nodes.
|
||||
of some data block on disk is calculated. If it is an intermediary node,
|
||||
the hash of a number of child nodes is calculated.
|
||||
|
||||
Each entry in the tree is a collection of neighboring nodes that fit in one
|
||||
block. The number is determined based on block_size and the size of the
|
||||
@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
|
||||
On-disk format
|
||||
==============
|
||||
|
||||
Below is the recommended on-disk format. The verity kernel code does not
|
||||
read the on-disk header. It only reads the hash blocks which directly
|
||||
follow the header. It is expected that a user-space tool will verify the
|
||||
integrity of the verity_header and then call dmsetup with the correct
|
||||
parameters. Alternatively, the header can be omitted and the dmsetup
|
||||
parameters can be passed via the kernel command-line in a rooted chain
|
||||
of trust where the command-line is verified.
|
||||
The verity kernel code does not read the verity metadata on-disk header.
|
||||
It only reads the hash blocks which directly follow the header.
|
||||
It is expected that a user-space tool will verify the integrity of the
|
||||
verity header.
|
||||
|
||||
The on-disk format is especially useful in cases where the hash blocks
|
||||
are on a separate partition. The magic number allows easy identification
|
||||
of the partition contents. Alternatively, the hash blocks can be stored
|
||||
in the same partition as the data to be verified. In such a configuration
|
||||
the filesystem on the partition would be sized a little smaller than
|
||||
the full-partition, leaving room for the hash blocks.
|
||||
|
||||
struct superblock {
|
||||
uint8_t signature[8]
|
||||
"verity\0\0";
|
||||
|
||||
uint8_t version;
|
||||
1 - current format
|
||||
|
||||
uint8_t data_block_bits;
|
||||
log2(data block size)
|
||||
|
||||
uint8_t hash_block_bits;
|
||||
log2(hash block size)
|
||||
|
||||
uint8_t pad1[1];
|
||||
zero padding
|
||||
|
||||
uint16_t salt_size;
|
||||
big-endian salt size
|
||||
|
||||
uint8_t pad2[2];
|
||||
zero padding
|
||||
|
||||
uint32_t data_blocks_hi;
|
||||
big-endian high 32 bits of the 64-bit number of data blocks
|
||||
|
||||
uint32_t data_blocks_lo;
|
||||
big-endian low 32 bits of the 64-bit number of data blocks
|
||||
|
||||
uint8_t algorithm[16];
|
||||
cryptographic algorithm
|
||||
|
||||
uint8_t salt[384];
|
||||
salt (the salt size is specified above)
|
||||
|
||||
uint8_t pad3[88];
|
||||
zero padding to 512-byte boundary
|
||||
}
|
||||
Alternatively, the header can be omitted and the dmsetup parameters can
|
||||
be passed via the kernel command-line in a rooted chain of trust where
|
||||
the command-line is verified.
|
||||
|
||||
Directly following the header (and with sector number padded to the next hash
|
||||
block boundary) are the hash blocks which are stored a depth at a time
|
||||
(starting from the root), sorted in order of increasing index.
|
||||
|
||||
The full specification of kernel parameters and on-disk metadata format
|
||||
is available at the cryptsetup project's wiki page
|
||||
http://code.google.com/p/cryptsetup/wiki/DMVerity
|
||||
|
||||
Status
|
||||
======
|
||||
V (for Valid) is returned if every check performed so far was valid.
|
||||
@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Setup a device:
|
||||
dmsetup create vroot --table \
|
||||
"0 2097152 "\
|
||||
"verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
|
||||
Set up a device:
|
||||
# dmsetup create vroot --readonly --table \
|
||||
"0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
|
||||
"4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
|
||||
"1234000000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
A command line tool veritysetup is available to compute or verify
|
||||
the hash tree or activate the kernel driver. This is available from
|
||||
the LVM2 upstream repository and may be supplied as a package called
|
||||
device-mapper-verity-tools:
|
||||
git://sources.redhat.com/git/lvm2
|
||||
http://sourceware.org/git/?p=lvm2.git
|
||||
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
|
||||
the hash tree or activate the kernel device. This is available from
|
||||
the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
|
||||
(as a libcryptsetup extension).
|
||||
|
||||
veritysetup -a vroot /dev/sda1 /dev/sda2 \
|
||||
4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
|
||||
Create hash on the device:
|
||||
# veritysetup format /dev/sda1 /dev/sda2
|
||||
...
|
||||
Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
|
||||
|
||||
Activate the device:
|
||||
# veritysetup create vroot /dev/sda1 /dev/sda2 \
|
||||
4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
Required properties:
|
||||
- compatible : "fsl,mma8450".
|
||||
- reg: the I2C address of MMA8450
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -46,8 +46,8 @@ Examples:
|
||||
|
||||
ecspi@70010000 { /* ECSPI1 */
|
||||
fsl,spi-num-chipselects = <2>;
|
||||
cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
|
||||
<&gpio3 25 0>; /* GPIO4_25 */
|
||||
cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
|
||||
<&gpio4 25 0>; /* GPIO4_25 */
|
||||
status = "okay";
|
||||
|
||||
pmic: mc13892@0 {
|
||||
|
@ -29,6 +29,6 @@ esdhc@70008000 {
|
||||
compatible = "fsl,imx51-esdhc";
|
||||
reg = <0x70008000 0x4000>;
|
||||
interrupts = <2>;
|
||||
cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
|
||||
wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
|
||||
cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
|
||||
wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
|
||||
};
|
||||
|
@ -19,6 +19,6 @@ ethernet@83fec000 {
|
||||
reg = <0x83fec000 0x4000>;
|
||||
interrupts = <87>;
|
||||
phy-mode = "mii";
|
||||
phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
|
||||
phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
|
||||
local-mac-address = [00 04 9F 01 1B B9];
|
||||
};
|
||||
|
@ -17,6 +17,6 @@ ecspi@70010000 {
|
||||
reg = <0x70010000 0x4000>;
|
||||
interrupts = <36>;
|
||||
fsl,spi-num-chipselects = <2>;
|
||||
cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
|
||||
<&gpio3 25 0>; /* GPIO4_25 */
|
||||
cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
|
||||
<&gpio3 25 0>; /* GPIO3_25 */
|
||||
};
|
||||
|
@ -3,6 +3,7 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order.
|
||||
This isn't an exhaustive list, but you should add new prefixes to it before
|
||||
using them to avoid name-space collisions.
|
||||
|
||||
ad Avionic Design GmbH
|
||||
adi Analog Devices, Inc.
|
||||
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
|
||||
apm Applied Micro Circuits Corporation (APM)
|
||||
|
57
Documentation/prctl/no_new_privs.txt
Normal file
57
Documentation/prctl/no_new_privs.txt
Normal file
@ -0,0 +1,57 @@
|
||||
The execve system call can grant a newly-started program privileges that
|
||||
its parent did not have. The most obvious examples are setuid/setgid
|
||||
programs and file capabilities. To prevent the parent program from
|
||||
gaining these privileges as well, the kernel and user code must be
|
||||
careful to prevent the parent from doing anything that could subvert the
|
||||
child. For example:
|
||||
|
||||
- The dynamic loader handles LD_* environment variables differently if
|
||||
a program is setuid.
|
||||
|
||||
- chroot is disallowed to unprivileged processes, since it would allow
|
||||
/etc/passwd to be replaced from the point of view of a process that
|
||||
inherited chroot.
|
||||
|
||||
- The exec code has special handling for ptrace.
|
||||
|
||||
These are all ad-hoc fixes. The no_new_privs bit (since Linux 3.5) is a
|
||||
new, generic mechanism to make it safe for a process to modify its
|
||||
execution environment in a manner that persists across execve. Any task
|
||||
can set no_new_privs. Once the bit is set, it is inherited across fork,
|
||||
clone, and execve and cannot be unset. With no_new_privs set, execve
|
||||
promises not to grant the privilege to do anything that could not have
|
||||
been done without the execve call. For example, the setuid and setgid
|
||||
bits will no longer change the uid or gid; file capabilities will not
|
||||
add to the permitted set, and LSMs will not relax constraints after
|
||||
execve.
|
||||
|
||||
To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
|
||||
|
||||
Be careful, though: LSMs might also not tighten constraints on exec
|
||||
in no_new_privs mode. (This means that setting up a general-purpose
|
||||
service launcher to set no_new_privs before execing daemons may
|
||||
interfere with LSM-based sandboxing.)
|
||||
|
||||
Note that no_new_privs does not prevent privilege changes that do not
|
||||
involve execve. An appropriately privileged task can still call
|
||||
setuid(2) and receive SCM_RIGHTS datagrams.
|
||||
|
||||
There are two main use cases for no_new_privs so far:
|
||||
|
||||
- Filters installed for the seccomp mode 2 sandbox persist across
|
||||
execve and can change the behavior of newly-executed programs.
|
||||
Unprivileged users are therefore only allowed to install such filters
|
||||
if no_new_privs is set.
|
||||
|
||||
- By itself, no_new_privs can be used to reduce the attack surface
|
||||
available to an unprivileged user. If everything running with a
|
||||
given uid has no_new_privs set, then that uid will be unable to
|
||||
escalate its privileges by directly attacking setuid, setgid, and
|
||||
fcap-using binaries; it will need to compromise something without the
|
||||
no_new_privs bit set first.
|
||||
|
||||
In the future, other potentially dangerous kernel features could become
|
||||
available to unprivileged tasks if no_new_privs is set. In principle,
|
||||
several options to unshare(2) and clone(2) would be safe when
|
||||
no_new_privs is set, and no_new_privs + chroot is considerable less
|
||||
dangerous than chroot by itself.
|
@ -1930,6 +1930,23 @@ The "pte_enc" field provides a value that can OR'ed into the hash
|
||||
PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
|
||||
into the hash PTE second double word).
|
||||
|
||||
4.75 KVM_IRQFD
|
||||
|
||||
Capability: KVM_CAP_IRQFD
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_irqfd (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Allows setting an eventfd to directly trigger a guest interrupt.
|
||||
kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
|
||||
kvm_irqfd.gsi specifies the irqchip pin toggled by this event. When
|
||||
an event is tiggered on the eventfd, an interrupt is injected into
|
||||
the guest using the specified gsi pin. The irqfd is removed using
|
||||
the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
|
||||
and kvm_irqfd.gsi.
|
||||
|
||||
|
||||
5. The kvm_run structure
|
||||
------------------------
|
||||
|
||||
|
@ -4654,8 +4654,8 @@ L: netfilter@vger.kernel.org
|
||||
L: coreteam@netfilter.org
|
||||
W: http://www.netfilter.org/
|
||||
W: http://www.iptables.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
|
||||
T: git git://1984.lsi.us.es/nf
|
||||
T: git git://1984.lsi.us.es/nf-next
|
||||
S: Supported
|
||||
F: include/linux/netfilter*
|
||||
F: include/linux/netfilter/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -243,7 +243,7 @@ typedef struct {
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static inline u64 atomic64_read(atomic64_t *v)
|
||||
static inline u64 atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
|
@ -60,13 +60,13 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define set_domain(x) \
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
"mcr p15, 0, %0, c3, c0 @ set domain" \
|
||||
: : "r" (x)); \
|
||||
isb(); \
|
||||
} while (0)
|
||||
static inline void set_domain(unsigned val)
|
||||
{
|
||||
asm volatile(
|
||||
"mcr p15, 0, %0, c3, c0 @ set domain"
|
||||
: : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
#define modify_domain(dom,type) \
|
||||
do { \
|
||||
@ -78,8 +78,8 @@
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#define set_domain(x) do { } while (0)
|
||||
#define modify_domain(dom,type) do { } while (0)
|
||||
static inline void set_domain(unsigned val) { }
|
||||
static inline void modify_domain(unsigned dom, unsigned type) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_SYSCALL_TRACE 8
|
||||
#define TIF_SYSCALL_AUDIT 9
|
||||
#define TIF_SYSCALL_RESTARTSYS 10
|
||||
#define TIF_POLLING_NRFLAG 16
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
|
||||
|
||||
/* Checks for any syscall work in entry-common.S */
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_RESTARTSYS)
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
|
||||
|
||||
/*
|
||||
* Change these and you break ASM code in entry-common.S
|
||||
|
@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
|
||||
TEST_BF_R ("mov pc, r",0,2f,"")
|
||||
TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
|
||||
TEST_BB( "sub pc, pc, #1b-2b+8")
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
|
||||
#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
|
||||
TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
|
||||
#endif
|
||||
TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
|
||||
TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
|
||||
|
@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
event_requires_mode_exclusion(&event->attr)) {
|
||||
pr_debug("ARM performance counters do not support "
|
||||
"mode exclusion\n");
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include <linux/regset.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/traps.h>
|
||||
@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
|
||||
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
|
||||
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
|
||||
|
||||
if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
|
||||
scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
|
||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
return scno;
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
*/
|
||||
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
|
||||
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
|
||||
#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
|
||||
|
||||
/*
|
||||
* With EABI, the syscall number has to be loaded into r7.
|
||||
@ -46,6 +47,18 @@ const unsigned long sigreturn_codes[7] = {
|
||||
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
|
||||
};
|
||||
|
||||
/*
|
||||
* Either we support OABI only, or we have EABI with the OABI
|
||||
* compat layer enabled. In the later case we don't know if
|
||||
* user space is EABI or not, and if not we must not clobber r7.
|
||||
* Always using the OABI syscall solves that issue and works for
|
||||
* all those cases.
|
||||
*/
|
||||
const unsigned long syscall_restart_code[2] = {
|
||||
SWI_SYS_RESTART, /* swi __NR_restart_syscall */
|
||||
0xe49df004, /* ldr pc, [sp], #4 */
|
||||
};
|
||||
|
||||
/*
|
||||
* atomically swap in the new signal mask, and wait for a signal.
|
||||
*/
|
||||
@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
||||
case -ERESTARTNOHAND:
|
||||
case -ERESTARTSYS:
|
||||
case -ERESTARTNOINTR:
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
regs->ARM_r0 = regs->ARM_ORIG_r0;
|
||||
regs->ARM_pc = restart_addr;
|
||||
break;
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
regs->ARM_r0 = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
||||
* debugger has chosen to restart at a different PC.
|
||||
*/
|
||||
if (regs->ARM_pc == restart_addr) {
|
||||
if (retval == -ERESTARTNOHAND ||
|
||||
retval == -ERESTART_RESTARTBLOCK
|
||||
if (retval == -ERESTARTNOHAND
|
||||
|| (retval == -ERESTARTSYS
|
||||
&& !(ka.sa.sa_flags & SA_RESTART))) {
|
||||
regs->ARM_r0 = -EINTR;
|
||||
regs->ARM_pc = continue_addr;
|
||||
}
|
||||
clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
|
||||
}
|
||||
|
||||
handle_signal(signr, &ka, &info, regs);
|
||||
@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
||||
* ignore the restart.
|
||||
*/
|
||||
if (retval == -ERESTART_RESTARTBLOCK
|
||||
&& regs->ARM_pc == restart_addr)
|
||||
set_thread_flag(TIF_SYSCALL_RESTARTSYS);
|
||||
&& regs->ARM_pc == continue_addr) {
|
||||
if (thumb_mode(regs)) {
|
||||
regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
|
||||
regs->ARM_pc -= 2;
|
||||
} else {
|
||||
#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
|
||||
regs->ARM_r7 = __NR_restart_syscall;
|
||||
regs->ARM_pc -= 4;
|
||||
#else
|
||||
u32 __user *usp;
|
||||
|
||||
regs->ARM_sp -= 4;
|
||||
usp = (u32 __user *)regs->ARM_sp;
|
||||
|
||||
if (put_user(regs->ARM_pc, usp) == 0) {
|
||||
regs->ARM_pc = KERN_RESTART_CODE;
|
||||
} else {
|
||||
regs->ARM_sp += 4;
|
||||
force_sigsegv(0, current);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
restore_saved_sigmask();
|
||||
|
@ -8,5 +8,7 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
|
||||
#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
|
||||
|
||||
extern const unsigned long sigreturn_codes[7];
|
||||
extern const unsigned long syscall_restart_code[2];
|
||||
|
@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
|
||||
*/
|
||||
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
|
||||
sigreturn_codes, sizeof(sigreturn_codes));
|
||||
memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
|
||||
syscall_restart_code, sizeof(syscall_restart_code));
|
||||
|
||||
flush_icache_range(vectors, vectors + PAGE_SIZE);
|
||||
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
|
||||
|
@ -183,7 +183,9 @@ SECTIONS
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
__data_loc = ALIGN(4); /* location in binary */
|
||||
|
@ -50,5 +50,6 @@
|
||||
#define POWER_MANAGEMENT (BRIDGE_VIRT_BASE | 0x011c)
|
||||
|
||||
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
|
||||
#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
|
||||
|
||||
#endif
|
||||
|
@ -78,6 +78,7 @@
|
||||
|
||||
/* North-South Bridge */
|
||||
#define BRIDGE_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0x20000)
|
||||
#define BRIDGE_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x20000)
|
||||
|
||||
/* Cryptographic Engine */
|
||||
#define DOVE_CRYPT_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x30000)
|
||||
|
@ -201,7 +201,6 @@ int __init mx35_clocks_init()
|
||||
pr_err("i.MX35 clk %d: register failed with %ld\n",
|
||||
i, PTR_ERR(clk[i]));
|
||||
|
||||
|
||||
clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
|
||||
clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
|
||||
clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
|
||||
@ -264,6 +263,14 @@ int __init mx35_clocks_init()
|
||||
clk_prepare_enable(clk[iim_gate]);
|
||||
clk_prepare_enable(clk[emi_gate]);
|
||||
|
||||
/*
|
||||
* SCC is needed to boot via mmc after a watchdog reset. The clock code
|
||||
* before conversion to common clk also enabled UART1 (which isn't
|
||||
* handled here and not needed for mmc) and IIM (which is enabled
|
||||
* unconditionally above).
|
||||
*/
|
||||
clk_prepare_enable(clk[scc_gate]);
|
||||
|
||||
imx_print_silicon_rev("i.MX35", mx35_revision());
|
||||
|
||||
#ifdef CONFIG_MXC_USE_EPIT
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <mach/common.h>
|
||||
#include <mach/iomux-mx27.h>
|
||||
|
||||
|
@ -1,29 +0,0 @@
|
||||
#ifndef __ASM_MACH_GPIO_PXA_H
|
||||
#define __ASM_MACH_GPIO_PXA_H
|
||||
|
||||
#include <mach/addr-map.h>
|
||||
#include <mach/cputype.h>
|
||||
#include <mach/irqs.h>
|
||||
|
||||
#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
|
||||
|
||||
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
|
||||
#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
|
||||
|
||||
#define gpio_to_bank(gpio) ((gpio) >> 5)
|
||||
|
||||
/* NOTE: these macros are defined here to make optimization of
|
||||
* gpio_{get,set}_value() to work when 'gpio' is a constant.
|
||||
* Usage of these macros otherwise is no longer recommended,
|
||||
* use generic GPIO API whenever possible.
|
||||
*/
|
||||
#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
|
||||
|
||||
#define GPLR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
|
||||
#define GPDR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
|
||||
#define GPSR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
|
||||
#define GPCR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
|
||||
|
||||
#include <plat/gpio-pxa.h>
|
||||
|
||||
#endif /* __ASM_MACH_GPIO_PXA_H */
|
@ -31,5 +31,6 @@
|
||||
#define IRQ_MASK_HIGH_OFF 0x0014
|
||||
|
||||
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
|
||||
#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
|
||||
|
||||
#endif
|
||||
|
@ -42,6 +42,7 @@
|
||||
#define MV78XX0_CORE0_REGS_PHYS_BASE 0xf1020000
|
||||
#define MV78XX0_CORE1_REGS_PHYS_BASE 0xf1024000
|
||||
#define MV78XX0_CORE_REGS_VIRT_BASE 0xfe400000
|
||||
#define MV78XX0_CORE_REGS_PHYS_BASE 0xfe400000
|
||||
#define MV78XX0_CORE_REGS_SIZE SZ_16K
|
||||
|
||||
#define MV78XX0_PCIE_IO_PHYS_BASE(i) (0xf0800000 + ((i) << 20))
|
||||
@ -59,6 +60,7 @@
|
||||
* Core-specific peripheral registers.
|
||||
*/
|
||||
#define BRIDGE_VIRT_BASE (MV78XX0_CORE_REGS_VIRT_BASE)
|
||||
#define BRIDGE_PHYS_BASE (MV78XX0_CORE_REGS_PHYS_BASE)
|
||||
|
||||
/*
|
||||
* Register Map
|
||||
|
@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init apx4devkit_fec_phy_clk_enable(void)
|
||||
{
|
||||
struct clk *clk;
|
||||
|
||||
/* Enable fec phy clock */
|
||||
clk = clk_get_sys("enet_out", NULL);
|
||||
if (!IS_ERR(clk))
|
||||
clk_prepare_enable(clk);
|
||||
}
|
||||
|
||||
static void __init apx4devkit_init(void)
|
||||
{
|
||||
mx28_soc_init();
|
||||
@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
|
||||
phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
|
||||
apx4devkit_phy_fixup);
|
||||
|
||||
apx4devkit_fec_phy_clk_enable();
|
||||
mx28_add_fec(0, &mx28_fec_pdata);
|
||||
|
||||
mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
|
||||
|
@ -494,8 +494,8 @@ static void __init overo_init(void)
|
||||
|
||||
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
|
||||
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
|
||||
omap_hsmmc_init(mmc);
|
||||
overo_i2c_init();
|
||||
omap_hsmmc_init(mmc);
|
||||
omap_display_init(&overo_dss_data);
|
||||
omap_serial_init();
|
||||
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
|
||||
|
@ -1928,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
|
||||
|
||||
static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
|
||||
@ -1963,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
|
||||
|
||||
static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
|
||||
@ -1998,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
|
||||
|
||||
static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
|
||||
@ -2033,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
|
||||
|
||||
static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
|
||||
@ -3864,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
|
||||
};
|
||||
|
||||
/* usb_host_fs -> l3_main_2 */
|
||||
static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
|
||||
.master = &omap44xx_usb_host_fs_hwmod,
|
||||
.slave = &omap44xx_l3_main_2_hwmod,
|
||||
.clk = "l3_div_ck",
|
||||
@ -3922,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
|
||||
};
|
||||
|
||||
/* aess -> l4_abe */
|
||||
static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
|
||||
.master = &omap44xx_aess_hwmod,
|
||||
.slave = &omap44xx_l4_abe_hwmod,
|
||||
.clk = "ocp_abe_iclk",
|
||||
@ -4013,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
|
||||
};
|
||||
|
||||
/* l4_abe -> aess */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
|
||||
.master = &omap44xx_l4_abe_hwmod,
|
||||
.slave = &omap44xx_aess_hwmod,
|
||||
.clk = "ocp_abe_iclk",
|
||||
@ -4031,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
|
||||
};
|
||||
|
||||
/* l4_abe -> aess (dma) */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
|
||||
.master = &omap44xx_l4_abe_hwmod,
|
||||
.slave = &omap44xx_aess_hwmod,
|
||||
.clk = "ocp_abe_iclk",
|
||||
@ -5857,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
|
||||
};
|
||||
|
||||
/* l4_cfg -> usb_host_fs */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
|
||||
.master = &omap44xx_l4_cfg_hwmod,
|
||||
.slave = &omap44xx_usb_host_fs_hwmod,
|
||||
.clk = "l4_div_ck",
|
||||
@ -6014,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&omap44xx_iva__l3_main_2,
|
||||
&omap44xx_l3_main_1__l3_main_2,
|
||||
&omap44xx_l4_cfg__l3_main_2,
|
||||
&omap44xx_usb_host_fs__l3_main_2,
|
||||
/* &omap44xx_usb_host_fs__l3_main_2, */
|
||||
&omap44xx_usb_host_hs__l3_main_2,
|
||||
&omap44xx_usb_otg_hs__l3_main_2,
|
||||
&omap44xx_l3_main_1__l3_main_3,
|
||||
&omap44xx_l3_main_2__l3_main_3,
|
||||
&omap44xx_l4_cfg__l3_main_3,
|
||||
&omap44xx_aess__l4_abe,
|
||||
/* &omap44xx_aess__l4_abe, */
|
||||
&omap44xx_dsp__l4_abe,
|
||||
&omap44xx_l3_main_1__l4_abe,
|
||||
&omap44xx_mpu__l4_abe,
|
||||
@ -6029,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&omap44xx_l4_cfg__l4_wkup,
|
||||
&omap44xx_mpu__mpu_private,
|
||||
&omap44xx_l4_cfg__ocp_wp_noc,
|
||||
&omap44xx_l4_abe__aess,
|
||||
&omap44xx_l4_abe__aess_dma,
|
||||
/* &omap44xx_l4_abe__aess, */
|
||||
/* &omap44xx_l4_abe__aess_dma, */
|
||||
&omap44xx_l3_main_2__c2c,
|
||||
&omap44xx_l4_wkup__counter_32k,
|
||||
&omap44xx_l4_cfg__ctrl_module_core,
|
||||
@ -6136,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&omap44xx_l4_per__uart2,
|
||||
&omap44xx_l4_per__uart3,
|
||||
&omap44xx_l4_per__uart4,
|
||||
&omap44xx_l4_cfg__usb_host_fs,
|
||||
/* &omap44xx_l4_cfg__usb_host_fs, */
|
||||
&omap44xx_l4_cfg__usb_host_hs,
|
||||
&omap44xx_l4_cfg__usb_otg_hs,
|
||||
&omap44xx_l4_cfg__usb_tll_hs,
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "twl-common.h"
|
||||
#include "pm.h"
|
||||
#include "voltage.h"
|
||||
#include "mux.h"
|
||||
|
||||
static struct i2c_board_info __initdata pmic_i2c_board_info = {
|
||||
.addr = 0x48,
|
||||
@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
|
||||
struct twl6040_platform_data *twl6040_data, int twl6040_irq)
|
||||
{
|
||||
/* PMIC part*/
|
||||
omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
|
||||
strncpy(omap4_i2c1_board_info[0].type, pmic_type,
|
||||
sizeof(omap4_i2c1_board_info[0].type));
|
||||
omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
|
||||
|
@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
|
||||
GPIO19_SSP2_SCLK,
|
||||
GPIO86_SSP2_RXD,
|
||||
GPIO87_SSP2_TXD,
|
||||
GPIO88_GPIO,
|
||||
GPIO88_GPIO | MFP_LPM_DRIVE_HIGH, /* TSC2046_CS */
|
||||
|
||||
/* BQ24022 Regulator */
|
||||
GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_nCHARGE_EN */
|
||||
GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_ISET2 */
|
||||
|
||||
/* HX4700 specific input GPIOs */
|
||||
GPIO12_GPIO | WAKEUP_ON_EDGE_RISE, /* ASIC3_IRQ */
|
||||
@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
|
||||
GPIO14_GPIO, /* nWLAN_IRQ */
|
||||
|
||||
/* HX4700 specific output GPIOs */
|
||||
GPIO61_GPIO | MFP_LPM_DRIVE_HIGH, /* W3220_nRESET */
|
||||
GPIO71_GPIO | MFP_LPM_DRIVE_HIGH, /* ASIC3_nRESET */
|
||||
GPIO81_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_GP_nRESET */
|
||||
GPIO116_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_HW_nRESET */
|
||||
GPIO102_GPIO | MFP_LPM_DRIVE_LOW, /* SYNAPTICS_POWER_ON */
|
||||
|
||||
GPIO10_GPIO, /* GSM_IRQ */
|
||||
@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
|
||||
{ GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
|
||||
{ GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
|
||||
{ GPIO32_HX4700_RS232_ON, GPIOF_OUT_INIT_HIGH, "RS232_ON" },
|
||||
{ GPIO61_HX4700_W3220_nRESET, GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
|
||||
{ GPIO71_HX4700_ASIC3_nRESET, GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
|
||||
{ GPIO81_HX4700_CPU_GP_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
|
||||
{ GPIO82_HX4700_EUART_RESET, GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
|
||||
{ GPIO116_HX4700_CPU_HW_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
|
||||
};
|
||||
|
||||
static void __init hx4700_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
PCFR = PCFR_GPR_EN | PCFR_OPDE;
|
||||
|
||||
pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
|
||||
gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
|
||||
ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
|
||||
|
@ -339,7 +339,6 @@ void __init pci_versatile_preinit(void)
|
||||
static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
{
|
||||
int irq;
|
||||
int devslot = PCI_SLOT(dev->devfn);
|
||||
|
||||
/* slot, pin, irq
|
||||
* 24 1 27
|
||||
|
@ -64,7 +64,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
extern phys_addr_t arm_dma_limit;
|
||||
#else
|
||||
#define arm_dma_limit ((u32)~0)
|
||||
#define arm_dma_limit ((phys_addr_t)~0)
|
||||
#endif
|
||||
|
||||
extern phys_addr_t arm_lowmem_limit;
|
||||
|
@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
|
||||
/*
|
||||
* The Linux PMD is made of two consecutive section entries covering 2MB
|
||||
* (see definition in include/asm/pgtable-2level.h). However a call to
|
||||
* create_mapping() may optimize static mappings by using individual
|
||||
* 1MB section mappings. This leaves the actual PMD potentially half
|
||||
* initialized if the top or bottom section entry isn't used, leaving it
|
||||
* open to problems if a subsequent ioremap() or vmalloc() tries to use
|
||||
* the virtual space left free by that unused section entry.
|
||||
*
|
||||
* Let's avoid the issue by inserting dummy vm entries covering the unused
|
||||
* PMD halves once the static mappings are in place.
|
||||
*/
|
||||
|
||||
static void __init pmd_empty_section_gap(unsigned long addr)
|
||||
{
|
||||
struct vm_struct *vm;
|
||||
|
||||
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
|
||||
vm->addr = (void *)addr;
|
||||
vm->size = SECTION_SIZE;
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->caller = pmd_empty_section_gap;
|
||||
vm_area_add_early(vm);
|
||||
}
|
||||
|
||||
static void __init fill_pmd_gaps(void)
|
||||
{
|
||||
struct vm_struct *vm;
|
||||
unsigned long addr, next = 0;
|
||||
pmd_t *pmd;
|
||||
|
||||
/* we're still single threaded hence no lock needed here */
|
||||
for (vm = vmlist; vm; vm = vm->next) {
|
||||
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
||||
continue;
|
||||
addr = (unsigned long)vm->addr;
|
||||
if (addr < next)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Check if this vm starts on an odd section boundary.
|
||||
* If so and the first section entry for this PMD is free
|
||||
* then we block the corresponding virtual address.
|
||||
*/
|
||||
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
||||
pmd = pmd_off_k(addr);
|
||||
if (pmd_none(*pmd))
|
||||
pmd_empty_section_gap(addr & PMD_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Then check if this vm ends on an odd section boundary.
|
||||
* If so and the second section entry for this PMD is empty
|
||||
* then we block the corresponding virtual address.
|
||||
*/
|
||||
addr += vm->size;
|
||||
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
||||
pmd = pmd_off_k(addr) + 1;
|
||||
if (pmd_none(*pmd))
|
||||
pmd_empty_section_gap(addr);
|
||||
}
|
||||
|
||||
/* no need to look at any vm entry until we hit the next PMD */
|
||||
next = (addr + PMD_SIZE - 1) & PMD_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
#define fill_pmd_gaps() do { } while (0)
|
||||
#endif
|
||||
|
||||
static void * __initdata vmalloc_min =
|
||||
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
|
||||
|
||||
@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
|
||||
*/
|
||||
if (mdesc->map_io)
|
||||
mdesc->map_io();
|
||||
fill_pmd_gaps();
|
||||
|
||||
/*
|
||||
* Finally flush the caches and tlb to ensure that we're in a
|
||||
|
@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
lwz r3,VCORE_NAPPING_THREADS(r5)
|
||||
lwz r4,VCPU_PTID(r9)
|
||||
li r0,1
|
||||
sldi r0,r0,r4
|
||||
sld r0,r0,r4
|
||||
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
|
||||
beq 43f
|
||||
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
|
||||
|
@ -971,7 +971,7 @@ static int cpu_cmd(void)
|
||||
/* print cpus waiting or in xmon */
|
||||
printf("cpus stopped:");
|
||||
count = 0;
|
||||
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
|
||||
if (count == 0)
|
||||
printf(" %x", cpu);
|
||||
|
@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
|
||||
{
|
||||
struct kvm_mmu_page *page;
|
||||
|
||||
if (list_empty(&kvm->arch.active_mmu_pages))
|
||||
return;
|
||||
|
||||
page = container_of(kvm->arch.active_mmu_pages.prev,
|
||||
struct kvm_mmu_page, link);
|
||||
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
|
||||
|
@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
|
||||
|
||||
blkg->pd[i] = pd;
|
||||
pd->blkg = blkg;
|
||||
}
|
||||
|
||||
/* invoke per-policy init */
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||
struct blkcg_policy *pol = blkcg_policy[i];
|
||||
|
||||
/* invoke per-policy init */
|
||||
if (blkcg_policy_enabled(blkg->q, pol))
|
||||
pol->pd_init_fn(blkg);
|
||||
}
|
||||
@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
|
||||
|
||||
static void blkg_destroy(struct blkcg_gq *blkg)
|
||||
{
|
||||
struct request_queue *q = blkg->q;
|
||||
struct blkcg *blkcg = blkg->blkcg;
|
||||
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
lockdep_assert_held(blkg->q->queue_lock);
|
||||
lockdep_assert_held(&blkcg->lock);
|
||||
|
||||
/* Something wrong if we are trying to remove same group twice */
|
||||
|
@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
|
||||
*/
|
||||
void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
{
|
||||
int i;
|
||||
|
||||
while (true) {
|
||||
bool drain = false;
|
||||
int i;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
break;
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
/*
|
||||
* With queue marked dead, any woken up waiter will fail the
|
||||
* allocation path, so the wakeup chaining is lost and we're
|
||||
* left with hung waiters. We need to wake up those waiters.
|
||||
*/
|
||||
if (q->request_fn) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
|
||||
wake_up_all(&q->rq.wait[i]);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
/* mark @q DEAD, no new request or merges will be allowed afterwards */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
spin_lock_irq(lock);
|
||||
|
||||
/*
|
||||
@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
if (q->queue_lock != &q->__queue_lock)
|
||||
q->queue_lock = &q->__queue_lock;
|
||||
|
||||
spin_unlock_irq(lock);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
||||
blk_sync_queue(q);
|
||||
|
||||
spin_lock_irq(lock);
|
||||
if (q->queue_lock != &q->__queue_lock)
|
||||
q->queue_lock = &q->__queue_lock;
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
/* @q is and will stay empty, shutdown and put */
|
||||
blk_put_queue(q);
|
||||
}
|
||||
|
@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
|
||||
mod_timer(&q->timeout, expiry);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_abort_queue -- Abort all request on given queue
|
||||
* @queue: pointer to queue
|
||||
*
|
||||
*/
|
||||
void blk_abort_queue(struct request_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct request *rq, *tmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
/*
|
||||
* Not a request based block device, nothing to abort
|
||||
*/
|
||||
if (!q->request_fn)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
elv_abort_queue(q);
|
||||
|
||||
/*
|
||||
* Splice entries to local list, to avoid deadlocking if entries
|
||||
* get readded to the timeout list by error handling
|
||||
*/
|
||||
list_splice_init(&q->timeout_list, &list);
|
||||
|
||||
list_for_each_entry_safe(rq, tmp, &list, timeout_list)
|
||||
blk_abort_request(rq);
|
||||
|
||||
/*
|
||||
* Occasionally, blk_abort_request() will return without
|
||||
* deleting the element from the list. Make sure we add those back
|
||||
* instead of leaving them on the local stack list.
|
||||
*/
|
||||
list_splice(&list, &q->timeout_list);
|
||||
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_abort_queue);
|
||||
|
@ -17,8 +17,6 @@
|
||||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
|
||||
|
||||
/*
|
||||
* tunables
|
||||
*/
|
||||
@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
|
||||
return pd ? container_of(pd, struct cfq_group, pd) : NULL;
|
||||
}
|
||||
|
||||
static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
|
||||
{
|
||||
return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
|
||||
}
|
||||
|
||||
static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
|
||||
{
|
||||
return pd_to_blkg(&cfqg->pd);
|
||||
@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
|
||||
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
|
||||
static struct blkcg_policy blkcg_policy_cfq;
|
||||
|
||||
static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
|
||||
{
|
||||
return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
|
||||
}
|
||||
|
||||
static inline void cfqg_get(struct cfq_group *cfqg)
|
||||
{
|
||||
return blkg_get(cfqg_to_blkg(cfqg));
|
||||
@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
|
||||
|
||||
cfq_shutdown_timer_wq(cfqd);
|
||||
|
||||
#ifndef CONFIG_CFQ_GROUP_IOSCHED
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
blkcg_deactivate_policy(q, &blkcg_policy_cfq);
|
||||
#else
|
||||
kfree(cfqd->root_group);
|
||||
#endif
|
||||
blkcg_deactivate_policy(q, &blkcg_policy_cfq);
|
||||
kfree(cfqd);
|
||||
}
|
||||
|
||||
@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
if (!cfq_group_idle)
|
||||
cfq_group_idle = 1;
|
||||
#else
|
||||
cfq_group_idle = 0;
|
||||
#endif
|
||||
|
||||
ret = blkcg_policy_register(&blkcg_policy_cfq);
|
||||
if (ret)
|
||||
return ret;
|
||||
#else
|
||||
cfq_group_idle = 0;
|
||||
#endif
|
||||
|
||||
ret = -ENOMEM;
|
||||
cfq_pool = KMEM_CACHE(cfq_queue, 0);
|
||||
if (!cfq_pool)
|
||||
goto err_pol_unreg;
|
||||
@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
|
||||
err_free_pool:
|
||||
kmem_cache_destroy(cfq_pool);
|
||||
err_pol_unreg:
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
blkcg_policy_unregister(&blkcg_policy_cfq);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit cfq_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
blkcg_policy_unregister(&blkcg_policy_cfq);
|
||||
#endif
|
||||
elv_unregister(&iosched_cfq);
|
||||
kmem_cache_destroy(cfq_pool);
|
||||
}
|
||||
|
@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
|
||||
break;
|
||||
}
|
||||
|
||||
if (capable(CAP_SYS_RAWIO))
|
||||
return 0;
|
||||
|
||||
/* In particular, rule out all resets and host-specific ioctls. */
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"%s: sending ioctl %x to a partition!\n", current->comm, cmd);
|
||||
|
||||
return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_verify_blk_ioctl);
|
||||
|
||||
|
@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
|
||||
first_word = 0;
|
||||
spin_lock_irq(&b->bm_lock);
|
||||
}
|
||||
|
||||
/* last page (respectively only page, for first page == last page) */
|
||||
last_word = MLPP(el >> LN2_BPL);
|
||||
bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
|
||||
|
||||
/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
|
||||
* ==> e = 32767, el = 32768, last_page = 2,
|
||||
* and now last_word = 0.
|
||||
* We do not want to touch last_page in this case,
|
||||
* as we did not allocate it, it is not present in bitmap->bm_pages.
|
||||
*/
|
||||
if (last_word)
|
||||
bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
|
||||
|
||||
/* possibly trailing bits.
|
||||
* example: (e & 63) == 63, el will be e+1.
|
||||
|
@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
||||
req->rq_state |= RQ_LOCAL_COMPLETED;
|
||||
req->rq_state &= ~RQ_LOCAL_PENDING;
|
||||
|
||||
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
|
||||
if (req->rq_state & RQ_LOCAL_ABORTED) {
|
||||
_req_may_be_done(req, m);
|
||||
break;
|
||||
}
|
||||
|
||||
__drbd_chk_io_error(mdev, false);
|
||||
|
||||
goto_queue_for_net_read:
|
||||
|
||||
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
|
||||
|
||||
/* no point in retrying if there is no good remote data,
|
||||
* or we have no connection. */
|
||||
if (mdev->state.pdsk != D_UP_TO_DATE) {
|
||||
@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
|
||||
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
|
||||
}
|
||||
|
||||
static void maybe_pull_ahead(struct drbd_conf *mdev)
|
||||
{
|
||||
int congested = 0;
|
||||
|
||||
/* If I don't even have good local storage, we can not reasonably try
|
||||
* to pull ahead of the peer. We also need the local reference to make
|
||||
* sure mdev->act_log is there.
|
||||
* Note: caller has to make sure that net_conf is there.
|
||||
*/
|
||||
if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
|
||||
return;
|
||||
|
||||
if (mdev->net_conf->cong_fill &&
|
||||
atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
|
||||
dev_info(DEV, "Congestion-fill threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
|
||||
dev_info(DEV, "Congestion-extents threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (congested) {
|
||||
queue_barrier(mdev); /* last barrier, after mirrored writes */
|
||||
|
||||
if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
|
||||
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
|
||||
else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
|
||||
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
|
||||
}
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
|
||||
{
|
||||
const int rw = bio_rw(bio);
|
||||
@ -972,29 +1011,8 @@ allocate_barrier:
|
||||
_req_mod(req, queue_for_send_oos);
|
||||
|
||||
if (remote &&
|
||||
mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
|
||||
int congested = 0;
|
||||
|
||||
if (mdev->net_conf->cong_fill &&
|
||||
atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
|
||||
dev_info(DEV, "Congestion-fill threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
|
||||
dev_info(DEV, "Congestion-extents threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (congested) {
|
||||
queue_barrier(mdev); /* last barrier, after mirrored writes */
|
||||
|
||||
if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
|
||||
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
|
||||
else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
|
||||
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
|
||||
}
|
||||
}
|
||||
mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
|
||||
maybe_pull_ahead(mdev);
|
||||
|
||||
spin_unlock_irq(&mdev->req_lock);
|
||||
kfree(b); /* if someone else has beaten us to it... */
|
||||
|
@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
|
||||
|
||||
if (drive == current_reqD)
|
||||
drive = current_drive;
|
||||
__cancel_delayed_work(&fd_timeout);
|
||||
|
||||
if (drive < 0 || drive >= N_DRIVE) {
|
||||
delay = 20UL * HZ;
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <../drivers/ata/ahci.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include "mtip32xx.h"
|
||||
|
||||
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
|
||||
@ -85,6 +86,7 @@ static int instance;
|
||||
* allocated in mtip_init().
|
||||
*/
|
||||
static int mtip_major;
|
||||
static struct dentry *dfs_parent;
|
||||
|
||||
static DEFINE_SPINLOCK(rssd_index_lock);
|
||||
static DEFINE_IDA(rssd_index_ida);
|
||||
@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
|
||||
}
|
||||
|
||||
/*
|
||||
* Sysfs register/status dump.
|
||||
* Sysfs status dump.
|
||||
*
|
||||
* @dev Pointer to the device structure, passed by the kernrel.
|
||||
* @attr Pointer to the device_attribute structure passed by the kernel.
|
||||
@ -2555,71 +2557,6 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
|
||||
* return value
|
||||
* The size, in bytes, of the data copied into buf.
|
||||
*/
|
||||
static ssize_t mtip_hw_show_registers(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
u32 group_allocated;
|
||||
struct driver_data *dd = dev_to_disk(dev)->private_data;
|
||||
int size = 0;
|
||||
int n;
|
||||
|
||||
size += sprintf(&buf[size], "Hardware\n--------\n");
|
||||
size += sprintf(&buf[size], "S ACTive : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->s_active[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "Command Issue : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->cmd_issue[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "Completed : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->completed[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->port->mmio + PORT_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->mmio + HOST_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "\n");
|
||||
|
||||
size += sprintf(&buf[size], "Local\n-----\n");
|
||||
size += sprintf(&buf[size], "Allocated : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->allocated[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->allocated[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
size += sprintf(&buf[size], "Commands in Q: [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->cmds_to_issue[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->cmds_to_issue[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t mtip_hw_show_status(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -2637,24 +2574,121 @@ static ssize_t mtip_hw_show_status(struct device *dev,
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t mtip_hw_show_flags(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
|
||||
|
||||
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
|
||||
size_t len, loff_t *offset)
|
||||
{
|
||||
struct driver_data *dd = dev_to_disk(dev)->private_data;
|
||||
int size = 0;
|
||||
struct driver_data *dd = (struct driver_data *)f->private_data;
|
||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
||||
u32 group_allocated;
|
||||
int size = *offset;
|
||||
int n;
|
||||
|
||||
size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
|
||||
dd->port->flags);
|
||||
size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n",
|
||||
dd->dd_flag);
|
||||
if (!len || size)
|
||||
return 0;
|
||||
|
||||
return size;
|
||||
if (size < 0)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->s_active[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->cmd_issue[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "H/ Completed : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->completed[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->port->mmio + PORT_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->mmio + HOST_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "\n");
|
||||
|
||||
size += sprintf(&buf[size], "L/ Allocated : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->allocated[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->allocated[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->cmds_to_issue[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->cmds_to_issue[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
*offset = size <= len ? size : len;
|
||||
size = copy_to_user(ubuf, buf, *offset);
|
||||
if (size)
|
||||
return -EFAULT;
|
||||
|
||||
return *offset;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
|
||||
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
|
||||
static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
|
||||
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
|
||||
size_t len, loff_t *offset)
|
||||
{
|
||||
struct driver_data *dd = (struct driver_data *)f->private_data;
|
||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
||||
int size = *offset;
|
||||
|
||||
if (!len || size)
|
||||
return 0;
|
||||
|
||||
if (size < 0)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
|
||||
dd->port->flags);
|
||||
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
|
||||
dd->dd_flag);
|
||||
|
||||
*offset = size <= len ? size : len;
|
||||
size = copy_to_user(ubuf, buf, *offset);
|
||||
if (size)
|
||||
return -EFAULT;
|
||||
|
||||
return *offset;
|
||||
}
|
||||
|
||||
static const struct file_operations mtip_regs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = mtip_hw_read_registers,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations mtip_flags_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = mtip_hw_read_flags,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
/*
|
||||
* Create the sysfs related attributes.
|
||||
@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
|
||||
if (!kobj || !dd)
|
||||
return -EINVAL;
|
||||
|
||||
if (sysfs_create_file(kobj, &dev_attr_registers.attr))
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating 'registers' sysfs entry\n");
|
||||
if (sysfs_create_file(kobj, &dev_attr_status.attr))
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating 'status' sysfs entry\n");
|
||||
if (sysfs_create_file(kobj, &dev_attr_flags.attr))
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating 'flags' sysfs entry\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
|
||||
if (!kobj || !dd)
|
||||
return -EINVAL;
|
||||
|
||||
sysfs_remove_file(kobj, &dev_attr_registers.attr);
|
||||
sysfs_remove_file(kobj, &dev_attr_status.attr);
|
||||
sysfs_remove_file(kobj, &dev_attr_flags.attr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtip_hw_debugfs_init(struct driver_data *dd)
|
||||
{
|
||||
if (!dfs_parent)
|
||||
return -1;
|
||||
|
||||
dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
|
||||
if (IS_ERR_OR_NULL(dd->dfs_node)) {
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating node %s under debugfs\n",
|
||||
dd->disk->disk_name);
|
||||
dd->dfs_node = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
|
||||
&mtip_flags_fops);
|
||||
debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
|
||||
&mtip_regs_fops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mtip_hw_debugfs_exit(struct driver_data *dd)
|
||||
{
|
||||
debugfs_remove_recursive(dd->dfs_node);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Perform any init/resume time hardware setup
|
||||
*
|
||||
@ -3730,6 +3784,7 @@ skip_create_disk:
|
||||
mtip_hw_sysfs_init(dd, kobj);
|
||||
kobject_put(kobj);
|
||||
}
|
||||
mtip_hw_debugfs_init(dd);
|
||||
|
||||
if (dd->mtip_svc_handler) {
|
||||
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
|
||||
@ -3755,6 +3810,8 @@ start_service_thread:
|
||||
return rv;
|
||||
|
||||
kthread_run_error:
|
||||
mtip_hw_debugfs_exit(dd);
|
||||
|
||||
/* Delete our gendisk. This also removes the device from /dev */
|
||||
del_gendisk(dd->disk);
|
||||
|
||||
@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
|
||||
kobject_put(kobj);
|
||||
}
|
||||
}
|
||||
mtip_hw_debugfs_exit(dd);
|
||||
|
||||
/*
|
||||
* Delete our gendisk structure. This also removes the device
|
||||
@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
|
||||
}
|
||||
mtip_major = error;
|
||||
|
||||
if (!dfs_parent) {
|
||||
dfs_parent = debugfs_create_dir("rssd", NULL);
|
||||
if (IS_ERR_OR_NULL(dfs_parent)) {
|
||||
printk(KERN_WARNING "Error creating debugfs parent\n");
|
||||
dfs_parent = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Register our PCI operations. */
|
||||
error = pci_register_driver(&mtip_pci_driver);
|
||||
if (error)
|
||||
if (error) {
|
||||
debugfs_remove(dfs_parent);
|
||||
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
|
||||
*/
|
||||
static void __exit mtip_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(dfs_parent);
|
||||
|
||||
/* Release the allocated major block device number. */
|
||||
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/ata.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
/* Offset of Subsystem Device ID in pci confoguration space */
|
||||
#define PCI_SUBSYSTEM_DEVICEID 0x2E
|
||||
@ -111,6 +110,8 @@
|
||||
#define dbg_printk(format, arg...)
|
||||
#endif
|
||||
|
||||
#define MTIP_DFS_MAX_BUF_SIZE 1024
|
||||
|
||||
#define __force_bit2int (unsigned int __force)
|
||||
|
||||
enum {
|
||||
@ -447,6 +448,8 @@ struct driver_data {
|
||||
unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
|
||||
|
||||
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
|
||||
|
||||
struct dentry *dfs_node;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -513,6 +513,44 @@ static void process_page(unsigned long data)
|
||||
}
|
||||
}
|
||||
|
||||
struct mm_plug_cb {
|
||||
struct blk_plug_cb cb;
|
||||
struct cardinfo *card;
|
||||
};
|
||||
|
||||
static void mm_unplug(struct blk_plug_cb *cb)
|
||||
{
|
||||
struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
|
||||
|
||||
spin_lock_irq(&mmcb->card->lock);
|
||||
activate(mmcb->card);
|
||||
spin_unlock_irq(&mmcb->card->lock);
|
||||
kfree(mmcb);
|
||||
}
|
||||
|
||||
static int mm_check_plugged(struct cardinfo *card)
|
||||
{
|
||||
struct blk_plug *plug = current->plug;
|
||||
struct mm_plug_cb *mmcb;
|
||||
|
||||
if (!plug)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
|
||||
if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
|
||||
return 1;
|
||||
}
|
||||
/* Not currently on the callback list */
|
||||
mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
|
||||
if (!mmcb)
|
||||
return 0;
|
||||
|
||||
mmcb->card = card;
|
||||
mmcb->cb.callback = mm_unplug;
|
||||
list_add(&mmcb->cb.list, &plug->cb_list);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void mm_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct cardinfo *card = q->queuedata;
|
||||
@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
|
||||
*card->biotail = bio;
|
||||
bio->bi_next = NULL;
|
||||
card->biotail = &bio->bi_next;
|
||||
if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
|
||||
activate(card);
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
return;
|
||||
|
@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
|
||||
break;
|
||||
case BLKIF_OP_DISCARD:
|
||||
dst->u.discard.flag = src->u.discard.flag;
|
||||
dst->u.discard.id = src->u.discard.id;
|
||||
dst->u.discard.sector_number = src->u.discard.sector_number;
|
||||
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
|
||||
break;
|
||||
@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
|
||||
break;
|
||||
case BLKIF_OP_DISCARD:
|
||||
dst->u.discard.flag = src->u.discard.flag;
|
||||
dst->u.discard.id = src->u.discard.id;
|
||||
dst->u.discard.sector_number = src->u.discard.sector_number;
|
||||
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
|
||||
break;
|
||||
|
@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
|
||||
return free;
|
||||
}
|
||||
|
||||
static void add_id_to_freelist(struct blkfront_info *info,
|
||||
static int add_id_to_freelist(struct blkfront_info *info,
|
||||
unsigned long id)
|
||||
{
|
||||
if (info->shadow[id].req.u.rw.id != id)
|
||||
return -EINVAL;
|
||||
if (info->shadow[id].request == NULL)
|
||||
return -EINVAL;
|
||||
info->shadow[id].req.u.rw.id = info->shadow_free;
|
||||
info->shadow[id].request = NULL;
|
||||
info->shadow_free = id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *op_name(int op)
|
||||
{
|
||||
static const char *const names[] = {
|
||||
[BLKIF_OP_READ] = "read",
|
||||
[BLKIF_OP_WRITE] = "write",
|
||||
[BLKIF_OP_WRITE_BARRIER] = "barrier",
|
||||
[BLKIF_OP_FLUSH_DISKCACHE] = "flush",
|
||||
[BLKIF_OP_DISCARD] = "discard" };
|
||||
|
||||
if (op < 0 || op >= ARRAY_SIZE(names))
|
||||
return "unknown";
|
||||
|
||||
if (!names[op])
|
||||
return "reserved";
|
||||
|
||||
return names[op];
|
||||
}
|
||||
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
|
||||
{
|
||||
unsigned int end = minor + nr;
|
||||
@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
|
||||
bret = RING_GET_RESPONSE(&info->ring, i);
|
||||
id = bret->id;
|
||||
/*
|
||||
* The backend has messed up and given us an id that we would
|
||||
* never have given to it (we stamp it up to BLK_RING_SIZE -
|
||||
* look in get_id_from_freelist.
|
||||
*/
|
||||
if (id >= BLK_RING_SIZE) {
|
||||
WARN(1, "%s: response to %s has incorrect id (%ld)\n",
|
||||
info->gd->disk_name, op_name(bret->operation), id);
|
||||
/* We can't safely get the 'struct request' as
|
||||
* the id is busted. */
|
||||
continue;
|
||||
}
|
||||
req = info->shadow[id].request;
|
||||
|
||||
if (bret->operation != BLKIF_OP_DISCARD)
|
||||
blkif_completion(&info->shadow[id]);
|
||||
|
||||
add_id_to_freelist(info, id);
|
||||
if (add_id_to_freelist(info, id)) {
|
||||
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
|
||||
info->gd->disk_name, op_name(bret->operation), id);
|
||||
continue;
|
||||
}
|
||||
|
||||
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
|
||||
switch (bret->operation) {
|
||||
case BLKIF_OP_DISCARD:
|
||||
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||
struct request_queue *rq = info->rq;
|
||||
printk(KERN_WARNING "blkfront: %s: discard op failed\n",
|
||||
info->gd->disk_name);
|
||||
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret->operation));
|
||||
error = -EOPNOTSUPP;
|
||||
info->feature_discard = 0;
|
||||
info->feature_secdiscard = 0;
|
||||
@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
case BLKIF_OP_FLUSH_DISKCACHE:
|
||||
case BLKIF_OP_WRITE_BARRIER:
|
||||
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||
printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
|
||||
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
|
||||
"barrier" : "flush disk cache",
|
||||
info->gd->disk_name);
|
||||
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret->operation));
|
||||
error = -EOPNOTSUPP;
|
||||
}
|
||||
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
|
||||
info->shadow[id].req.u.rw.nr_segments == 0)) {
|
||||
printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
|
||||
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
|
||||
"barrier" : "flush disk cache",
|
||||
info->gd->disk_name);
|
||||
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret->operation));
|
||||
error = -EOPNOTSUPP;
|
||||
}
|
||||
if (unlikely(error)) {
|
||||
|
@ -1067,26 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
|
||||
|
||||
old_parent = clk->parent;
|
||||
|
||||
/* find index of new parent clock using cached parent ptrs */
|
||||
if (clk->parents)
|
||||
for (i = 0; i < clk->num_parents; i++)
|
||||
if (clk->parents[i] == parent)
|
||||
break;
|
||||
else
|
||||
if (!clk->parents)
|
||||
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
|
||||
GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* find index of new parent clock using string name comparison
|
||||
* also try to cache the parent to avoid future calls to __clk_lookup
|
||||
* find index of new parent clock using cached parent ptrs,
|
||||
* or if not yet cached, use string name comparison and cache
|
||||
* them now to avoid future calls to __clk_lookup.
|
||||
*/
|
||||
if (i == clk->num_parents)
|
||||
for (i = 0; i < clk->num_parents; i++)
|
||||
if (!strcmp(clk->parent_names[i], parent->name)) {
|
||||
if (clk->parents)
|
||||
clk->parents[i] = __clk_lookup(parent->name);
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < clk->num_parents; i++) {
|
||||
if (clk->parents && clk->parents[i] == parent)
|
||||
break;
|
||||
else if (!strcmp(clk->parent_names[i], parent->name)) {
|
||||
if (clk->parents)
|
||||
clk->parents[i] = __clk_lookup(parent->name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == clk->num_parents) {
|
||||
pr_debug("%s: clock %s is not a possible parent of clock %s\n",
|
||||
|
@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool valid_inferred_mode(const struct drm_connector *connector,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_display_mode *m;
|
||||
bool ok = false;
|
||||
|
||||
list_for_each_entry(m, &connector->probed_modes, head) {
|
||||
if (mode->hdisplay == m->hdisplay &&
|
||||
mode->vdisplay == m->vdisplay &&
|
||||
drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
|
||||
return false; /* duplicated */
|
||||
if (mode->hdisplay <= m->hdisplay &&
|
||||
mode->vdisplay <= m->vdisplay)
|
||||
ok = true;
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
static int
|
||||
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
|
||||
struct detailed_timing *timing)
|
||||
@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
for (i = 0; i < drm_num_dmt_modes; i++) {
|
||||
if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
|
||||
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
|
||||
valid_inferred_mode(connector, drm_dmt_modes + i)) {
|
||||
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
|
||||
if (newmode) {
|
||||
drm_mode_probed_add(connector, newmode);
|
||||
@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
|
||||
return modes;
|
||||
|
||||
fixup_mode_1366x768(newmode);
|
||||
if (!mode_in_range(newmode, edid, timing)) {
|
||||
if (!mode_in_range(newmode, edid, timing) ||
|
||||
!valid_inferred_mode(connector, newmode)) {
|
||||
drm_mode_destroy(dev, newmode);
|
||||
continue;
|
||||
}
|
||||
@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
|
||||
return modes;
|
||||
|
||||
fixup_mode_1366x768(newmode);
|
||||
if (!mode_in_range(newmode, edid, timing)) {
|
||||
if (!mode_in_range(newmode, edid, timing) ||
|
||||
!valid_inferred_mode(connector, newmode)) {
|
||||
drm_mode_destroy(dev, newmode);
|
||||
continue;
|
||||
}
|
||||
|
@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct apertures_struct *ap;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
bool primary;
|
||||
|
||||
ap = alloc_apertures(1);
|
||||
if (!ap)
|
||||
return;
|
||||
|
||||
ap->ranges[0].base = dev_priv->dev->agp->base;
|
||||
ap->ranges[0].size =
|
||||
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
primary =
|
||||
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
|
||||
|
||||
remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
|
||||
|
||||
kfree(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
goto free_priv;
|
||||
}
|
||||
|
||||
dev_priv->mm.gtt = intel_gtt_get();
|
||||
if (!dev_priv->mm.gtt) {
|
||||
DRM_ERROR("Failed to initialize GTT\n");
|
||||
ret = -ENODEV;
|
||||
goto put_bridge;
|
||||
}
|
||||
|
||||
i915_kick_out_firmware_fb(dev_priv);
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
/* overlay on gen2 is broken and can't address above 1G */
|
||||
@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
goto put_bridge;
|
||||
}
|
||||
|
||||
dev_priv->mm.gtt = intel_gtt_get();
|
||||
if (!dev_priv->mm.gtt) {
|
||||
DRM_ERROR("Failed to initialize GTT\n");
|
||||
ret = -ENODEV;
|
||||
goto out_rmmap;
|
||||
}
|
||||
|
||||
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
|
||||
dev_priv->mm.gtt_mapping =
|
||||
|
@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
|
||||
rdev->vm_manager.enabled = false;
|
||||
|
||||
/* mark first vm as always in use, it's the system one */
|
||||
/* allocate enough for 2 full VM pts */
|
||||
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
|
||||
rdev->vm_manager.max_pfn * 8,
|
||||
rdev->vm_manager.max_pfn * 8 * 2,
|
||||
RADEON_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
|
||||
@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
mutex_init(&vm->mutex);
|
||||
INIT_LIST_HEAD(&vm->list);
|
||||
INIT_LIST_HEAD(&vm->va);
|
||||
vm->last_pfn = 0;
|
||||
/* SI requires equal sized PTs for all VMs, so always set
|
||||
* last_pfn to max_pfn. cayman allows variable sized
|
||||
* pts so we can grow then as needed. Once we switch
|
||||
* to two level pts we can unify this again.
|
||||
*/
|
||||
if (rdev->family >= CHIP_TAHITI)
|
||||
vm->last_pfn = rdev->vm_manager.max_pfn;
|
||||
else
|
||||
vm->last_pfn = 0;
|
||||
/* map the ib pool buffer at 0 in virtual address space, set
|
||||
* read only
|
||||
*/
|
||||
|
@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_gem_busy *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
break;
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
r = radeon_gem_handle_lockup(robj->rdev, r);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_gem_wait_idle *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
r = radeon_bo_wait(robj, NULL, false);
|
||||
/* callback hw specific functions if any */
|
||||
if (robj->rdev->asic->ioctl_wait_idle)
|
||||
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
|
||||
if (rdev->asic->ioctl_wait_idle)
|
||||
robj->rdev->asic->ioctl_wait_idle(rdev, robj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
r = radeon_gem_handle_lockup(robj->rdev, r);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
|
||||
WREG32(0x15DC, 0);
|
||||
|
||||
/* empty context1-15 */
|
||||
/* FIXME start with 1G, once using 2 level pt switch to full
|
||||
/* FIXME start with 4G, once using 2 level pt switch to full
|
||||
* vm size space
|
||||
*/
|
||||
/* set vm size, must be a multiple of 4 */
|
||||
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
|
||||
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
|
||||
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
|
||||
for (i = 1; i < 16; i++) {
|
||||
if (i < 8)
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
|
||||
|
@ -282,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
|
||||
|
||||
error = request_threaded_irq(as5011->button_irq,
|
||||
NULL, as5011_button_interrupt,
|
||||
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
|
||||
IRQF_TRIGGER_RISING |
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
"as5011_button", as5011);
|
||||
if (error < 0) {
|
||||
dev_err(&client->dev,
|
||||
@ -296,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
|
||||
|
||||
error = request_threaded_irq(as5011->axis_irq, NULL,
|
||||
as5011_axis_interrupt,
|
||||
plat_data->axis_irqflags,
|
||||
plat_data->axis_irqflags | IRQF_ONESHOT,
|
||||
"as5011_joystick", as5011);
|
||||
if (error) {
|
||||
dev_err(&client->dev,
|
||||
|
@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
|
||||
}
|
||||
|
||||
error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
|
||||
IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
client->dev.driver->name, data);
|
||||
if (error) {
|
||||
dev_err(&client->dev, "Failed to register interrupt\n");
|
||||
goto err_free_mem;
|
||||
|
@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
|
||||
|
||||
error = request_threaded_irq(client->irq, NULL,
|
||||
mpr_touchkey_interrupt,
|
||||
IRQF_TRIGGER_FALLING,
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
client->dev.driver->name, mpr121);
|
||||
if (error) {
|
||||
dev_err(&client->dev, "Failed to register interrupt\n");
|
||||
|
@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
|
||||
msleep(QT1070_RESET_TIME);
|
||||
|
||||
err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
|
||||
IRQF_TRIGGER_NONE, client->dev.driver->name, data);
|
||||
IRQF_TRIGGER_NONE | IRQF_ONESHOT,
|
||||
client->dev.driver->name, data);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "fail to request irq\n");
|
||||
goto err_free_mem;
|
||||
|
@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
|
||||
|
||||
error = request_threaded_irq(chip->irqnum, NULL,
|
||||
tca6416_keys_isr,
|
||||
IRQF_TRIGGER_FALLING,
|
||||
IRQF_TRIGGER_FALLING |
|
||||
IRQF_ONESHOT,
|
||||
"tca6416-keypad", chip);
|
||||
if (error) {
|
||||
dev_dbg(&client->dev,
|
||||
|
@ -360,7 +360,7 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
|
||||
client->irq = gpio_to_irq(client->irq);
|
||||
|
||||
error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
|
||||
IRQF_TRIGGER_FALLING,
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
client->name, keypad_data);
|
||||
if (error) {
|
||||
dev_dbg(&client->dev,
|
||||
|
@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
|
||||
goto error_clk;
|
||||
}
|
||||
|
||||
error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
|
||||
dev_name(dev), kp);
|
||||
error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
|
||||
IRQF_ONESHOT, dev_name(dev), kp);
|
||||
if (error < 0) {
|
||||
dev_err(kp->dev, "Could not allocate keypad press key irq\n");
|
||||
goto error_irq_press;
|
||||
}
|
||||
|
||||
error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
|
||||
dev_name(dev), kp);
|
||||
error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
|
||||
IRQF_ONESHOT, dev_name(dev), kp);
|
||||
if (error < 0) {
|
||||
dev_err(kp->dev, "Could not allocate keypad release key irq\n");
|
||||
goto error_irq_release;
|
||||
|
@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
|
||||
struct ad714x_platform_data *plat_data = dev->platform_data;
|
||||
struct ad714x_chip *ad714x;
|
||||
void *drv_mem;
|
||||
unsigned long irqflags;
|
||||
|
||||
struct ad714x_button_drv *bt_drv;
|
||||
struct ad714x_slider_drv *sd_drv;
|
||||
@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
|
||||
alloc_idx++;
|
||||
}
|
||||
|
||||
irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
|
||||
irqflags |= IRQF_ONESHOT;
|
||||
|
||||
error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
|
||||
plat_data->irqflags ?
|
||||
plat_data->irqflags : IRQF_TRIGGER_FALLING,
|
||||
"ad714x_captouch", ad714x);
|
||||
irqflags, "ad714x_captouch", ad714x);
|
||||
if (error) {
|
||||
dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
|
||||
goto err_unreg_dev;
|
||||
|
@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
|
||||
/* REVISIT: flush the event queue? */
|
||||
|
||||
status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
|
||||
IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
dev_name(&pdev->dev), keys);
|
||||
if (status < 0)
|
||||
goto fail2;
|
||||
|
||||
|
@ -216,7 +216,7 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
|
||||
|
||||
rep_data[0] = 12;
|
||||
result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
|
||||
rep_data[0], &rep_data, 2,
|
||||
rep_data[0], rep_data, 2,
|
||||
WAC_MSG_RETRIES);
|
||||
|
||||
if (result >= 0 && rep_data[1] > 2)
|
||||
@ -401,7 +401,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
|
||||
break;
|
||||
|
||||
case HID_USAGE_CONTACTMAX:
|
||||
wacom_retrieve_report_data(intf, features);
|
||||
/* leave touch_max as is if predefined */
|
||||
if (!features->touch_max)
|
||||
wacom_retrieve_report_data(intf, features);
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
|
@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
|
||||
AD7879_TMR(ts->pen_down_acc_interval);
|
||||
|
||||
err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
|
||||
IRQF_TRIGGER_FALLING,
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
dev_name(dev), ts);
|
||||
if (err) {
|
||||
dev_err(dev, "irq %d busy?\n", ts->irq);
|
||||
|
@ -1149,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
|
||||
goto err_free_object;
|
||||
|
||||
error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
|
||||
pdata->irqflags, client->dev.driver->name, data);
|
||||
pdata->irqflags | IRQF_ONESHOT,
|
||||
client->dev.driver->name, data);
|
||||
if (error) {
|
||||
dev_err(&client->dev, "Failed to register interrupt\n");
|
||||
goto err_free_object;
|
||||
|
@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
|
||||
input_set_drvdata(in_dev, bu21013_data);
|
||||
|
||||
error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
|
||||
IRQF_TRIGGER_FALLING | IRQF_SHARED,
|
||||
IRQF_TRIGGER_FALLING | IRQF_SHARED |
|
||||
IRQF_ONESHOT,
|
||||
DRIVER_TP, bu21013_data);
|
||||
if (error) {
|
||||
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
|
||||
|
@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
|
||||
}
|
||||
|
||||
err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
|
||||
IRQF_TRIGGER_RISING, "touch_reset_key", ts);
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
"touch_reset_key", ts);
|
||||
if (err < 0) {
|
||||
dev_err(&client->dev,
|
||||
"irq %d busy? error %d\n", client->irq, err);
|
||||
|
@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
|
||||
MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
|
||||
|
||||
err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
|
||||
0, "mrstouch", tsdev);
|
||||
IRQF_ONESHOT, "mrstouch", tsdev);
|
||||
if (err) {
|
||||
dev_err(tsdev->dev, "unable to allocate irq\n");
|
||||
goto err_free_mem;
|
||||
|
@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
|
||||
input_set_drvdata(input, tsdata);
|
||||
|
||||
error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
|
||||
IRQF_TRIGGER_FALLING,
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
client->name, tsdata);
|
||||
if (error) {
|
||||
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
|
||||
|
@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
|
||||
goto error_clk;
|
||||
}
|
||||
|
||||
error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
|
||||
error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
|
||||
dev_name(dev), ts);
|
||||
if (error < 0) {
|
||||
dev_err(ts->dev, "Could not allocate ts irq\n");
|
||||
|
@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
|
||||
tsc2005_stop_scan(ts);
|
||||
|
||||
error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
|
||||
IRQF_TRIGGER_RISING, "tsc2005", ts);
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
"tsc2005", ts);
|
||||
if (error) {
|
||||
dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
|
||||
goto err_free_mem;
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <linux/reboot.h>
|
||||
#include "leds.h"
|
||||
|
||||
static int panic_heartbeats;
|
||||
|
||||
struct heartbeat_trig_data {
|
||||
unsigned int phase;
|
||||
unsigned int period;
|
||||
@ -34,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
|
||||
unsigned long brightness = LED_OFF;
|
||||
unsigned long delay = 0;
|
||||
|
||||
if (unlikely(panic_heartbeats)) {
|
||||
led_set_brightness(led_cdev, LED_OFF);
|
||||
return;
|
||||
}
|
||||
|
||||
/* acts like an actual heart beat -- ie thump-thump-pause... */
|
||||
switch (heartbeat_data->phase) {
|
||||
case 0:
|
||||
@ -111,12 +118,19 @@ static int heartbeat_reboot_notifier(struct notifier_block *nb,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int heartbeat_panic_notifier(struct notifier_block *nb,
|
||||
unsigned long code, void *unused)
|
||||
{
|
||||
panic_heartbeats = 1;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block heartbeat_reboot_nb = {
|
||||
.notifier_call = heartbeat_reboot_notifier,
|
||||
};
|
||||
|
||||
static struct notifier_block heartbeat_panic_nb = {
|
||||
.notifier_call = heartbeat_reboot_notifier,
|
||||
.notifier_call = heartbeat_panic_notifier,
|
||||
};
|
||||
|
||||
static int __init heartbeat_trig_init(void)
|
||||
|
@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = dm_pool_commit_metadata(pool->pmd);
|
||||
if (r) {
|
||||
DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
|
||||
__func__, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = dm_pool_reserve_metadata_snap(pool->pmd);
|
||||
if (r)
|
||||
DMWARN("reserve_metadata_snap message failed.");
|
||||
|
@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
|
||||
super_types[mddev->major_version].
|
||||
validate_super(mddev, rdev);
|
||||
if ((info->state & (1<<MD_DISK_SYNC)) &&
|
||||
(!test_bit(In_sync, &rdev->flags) ||
|
||||
rdev->raid_disk != info->raid_disk)) {
|
||||
rdev->raid_disk != info->raid_disk) {
|
||||
/* This was a hot-add request, but events doesn't
|
||||
* match, so reject it.
|
||||
*/
|
||||
@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
|
||||
thread->tsk = kthread_run(md_thread, thread,
|
||||
"%s_%s",
|
||||
mdname(thread->mddev),
|
||||
name ?: mddev->pers->name);
|
||||
name);
|
||||
if (IS_ERR(thread->tsk)) {
|
||||
kfree(thread);
|
||||
return NULL;
|
||||
@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
|
||||
int skipped = 0;
|
||||
struct md_rdev *rdev;
|
||||
char *desc;
|
||||
struct blk_plug plug;
|
||||
|
||||
/* just incase thread restarts... */
|
||||
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
|
||||
@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
|
||||
}
|
||||
mddev->curr_resync_completed = j;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while (j < max_sectors) {
|
||||
sector_t sectors;
|
||||
|
||||
@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
|
||||
* this also signals 'finished resyncing' to md_stop
|
||||
*/
|
||||
out:
|
||||
blk_finish_plug(&plug);
|
||||
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
|
||||
|
||||
/* tell personality that we are finished */
|
||||
|
@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
|
||||
}
|
||||
|
||||
{
|
||||
mddev->thread = md_register_thread(multipathd, mddev, NULL);
|
||||
mddev->thread = md_register_thread(multipathd, mddev,
|
||||
"multipath");
|
||||
if (!mddev->thread) {
|
||||
printk(KERN_ERR "multipath: couldn't allocate thread"
|
||||
" for %s\n", mdname(mddev));
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/device-mapper.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
|
||||
|
||||
@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
|
||||
|
||||
ca->nr = nr_blocks;
|
||||
ca->nr_free = nr_blocks;
|
||||
ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
|
||||
if (!ca->counts)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!nr_blocks)
|
||||
ca->counts = NULL;
|
||||
else {
|
||||
ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
|
||||
if (!ca->counts)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ca_destroy(struct count_array *ca)
|
||||
{
|
||||
vfree(ca->counts);
|
||||
}
|
||||
|
||||
static int ca_load(struct count_array *ca, struct dm_space_map *sm)
|
||||
{
|
||||
int r;
|
||||
@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
|
||||
static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
|
||||
{
|
||||
dm_block_t nr_blocks = ca->nr + extra_blocks;
|
||||
uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
|
||||
uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
|
||||
if (!counts)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
|
||||
kfree(ca->counts);
|
||||
if (ca->counts) {
|
||||
memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
|
||||
ca_destroy(ca);
|
||||
}
|
||||
ca->nr = nr_blocks;
|
||||
ca->nr_free += extra_blocks;
|
||||
ca->counts = counts;
|
||||
@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ca_destroy(struct count_array *ca)
|
||||
{
|
||||
kfree(ca->counts);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
struct sm_checker {
|
||||
@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
|
||||
int r;
|
||||
struct sm_checker *smc;
|
||||
|
||||
if (!sm)
|
||||
return NULL;
|
||||
if (IS_ERR_OR_NULL(sm))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
|
||||
if (!smc)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
|
||||
r = ca_create(&smc->old_counts, sm);
|
||||
if (r) {
|
||||
kfree(smc);
|
||||
return NULL;
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
r = ca_create(&smc->counts, sm);
|
||||
if (r) {
|
||||
ca_destroy(&smc->old_counts);
|
||||
kfree(smc);
|
||||
return NULL;
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
smc->real_sm = sm;
|
||||
@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
|
||||
ca_destroy(&smc->counts);
|
||||
ca_destroy(&smc->old_counts);
|
||||
kfree(smc);
|
||||
return NULL;
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
r = ca_commit(&smc->old_counts, &smc->counts);
|
||||
@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
|
||||
ca_destroy(&smc->counts);
|
||||
ca_destroy(&smc->old_counts);
|
||||
kfree(smc);
|
||||
return NULL;
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
return &smc->sm;
|
||||
@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
|
||||
int r;
|
||||
struct sm_checker *smc;
|
||||
|
||||
if (!sm)
|
||||
return NULL;
|
||||
if (IS_ERR_OR_NULL(sm))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
|
||||
if (!smc)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
|
||||
r = ca_create(&smc->old_counts, sm);
|
||||
if (r) {
|
||||
kfree(smc);
|
||||
return NULL;
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
r = ca_create(&smc->counts, sm);
|
||||
if (r) {
|
||||
ca_destroy(&smc->old_counts);
|
||||
kfree(smc);
|
||||
return NULL;
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
smc->real_sm = sm;
|
||||
|
@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
|
||||
dm_block_t nr_blocks)
|
||||
{
|
||||
struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
|
||||
return dm_sm_checker_create_fresh(sm);
|
||||
struct dm_space_map *smc;
|
||||
|
||||
if (IS_ERR_OR_NULL(sm))
|
||||
return sm;
|
||||
|
||||
smc = dm_sm_checker_create_fresh(sm);
|
||||
if (IS_ERR(smc))
|
||||
dm_sm_destroy(sm);
|
||||
|
||||
return smc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
|
||||
|
||||
|
@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
|
||||
|
||||
void dm_tm_destroy(struct dm_transaction_manager *tm)
|
||||
{
|
||||
if (!tm->is_clone)
|
||||
wipe_shadow_table(tm);
|
||||
|
||||
kfree(tm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_tm_destroy);
|
||||
@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
|
||||
}
|
||||
|
||||
*sm = dm_sm_checker_create(inner);
|
||||
if (!*sm)
|
||||
if (IS_ERR(*sm)) {
|
||||
r = PTR_ERR(*sm);
|
||||
goto bad2;
|
||||
}
|
||||
|
||||
} else {
|
||||
r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
|
||||
@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
|
||||
}
|
||||
|
||||
*sm = dm_sm_checker_create(inner);
|
||||
if (!*sm)
|
||||
if (IS_ERR(*sm)) {
|
||||
r = PTR_ERR(*sm);
|
||||
goto bad2;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
||||
int bad_sectors;
|
||||
|
||||
int disk = start_disk + i;
|
||||
if (disk >= conf->raid_disks)
|
||||
disk -= conf->raid_disks;
|
||||
if (disk >= conf->raid_disks * 2)
|
||||
disk -= conf->raid_disks * 2;
|
||||
|
||||
rdev = rcu_dereference(conf->mirrors[disk].rdev);
|
||||
if (r1_bio->bios[disk] == IO_BLOCKED
|
||||
@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
||||
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
|
||||
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
|
||||
struct md_rdev *blocked_rdev;
|
||||
int plugged;
|
||||
int first_clone;
|
||||
int sectors_handled;
|
||||
int max_sectors;
|
||||
@ -1034,7 +1033,6 @@ read_again:
|
||||
* the bad blocks. Each set of writes gets it's own r1bio
|
||||
* with a set of bios attached.
|
||||
*/
|
||||
plugged = mddev_check_plugged(mddev);
|
||||
|
||||
disks = conf->raid_disks * 2;
|
||||
retry_write:
|
||||
@ -1191,6 +1189,8 @@ read_again:
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
if (!mddev_check_plugged(mddev))
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
/* Mustn't call r1_bio_write_done before this next test,
|
||||
* as it could result in the bio being freed.
|
||||
@ -1213,9 +1213,6 @@ read_again:
|
||||
|
||||
/* In case raid1d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
if (do_sync || !bitmap || !plugged)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
||||
static void status(struct seq_file *seq, struct mddev *mddev)
|
||||
@ -2621,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
|
||||
goto abort;
|
||||
}
|
||||
err = -ENOMEM;
|
||||
conf->thread = md_register_thread(raid1d, mddev, NULL);
|
||||
conf->thread = md_register_thread(raid1d, mddev, "raid1");
|
||||
if (!conf->thread) {
|
||||
printk(KERN_ERR
|
||||
"md/raid1:%s: couldn't allocate thread\n",
|
||||
|
@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
||||
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
|
||||
unsigned long flags;
|
||||
struct md_rdev *blocked_rdev;
|
||||
int plugged;
|
||||
int sectors_handled;
|
||||
int max_sectors;
|
||||
int sectors;
|
||||
@ -1239,7 +1238,6 @@ read_again:
|
||||
* of r10_bios is recored in bio->bi_phys_segments just as with
|
||||
* the read case.
|
||||
*/
|
||||
plugged = mddev_check_plugged(mddev);
|
||||
|
||||
r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
|
||||
raid10_find_phys(conf, r10_bio);
|
||||
@ -1396,6 +1394,8 @@ retry_write:
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
if (!mddev_check_plugged(mddev))
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
||||
if (!r10_bio->devs[i].repl_bio)
|
||||
continue;
|
||||
@ -1423,6 +1423,8 @@ retry_write:
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
if (!mddev_check_plugged(mddev))
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
||||
/* Don't remove the bias on 'remaining' (one_write_done) until
|
||||
@ -1448,9 +1450,6 @@ retry_write:
|
||||
|
||||
/* In case raid10d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
if (do_sync || !mddev->bitmap || !plugged)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
||||
static void status(struct seq_file *seq, struct mddev *mddev)
|
||||
@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
||||
if (r10_sync_page_io(rdev,
|
||||
r10_bio->devs[sl].addr +
|
||||
sect,
|
||||
s<<9, conf->tmppage, WRITE)
|
||||
s, conf->tmppage, WRITE)
|
||||
== 0) {
|
||||
/* Well, this device is dead */
|
||||
printk(KERN_NOTICE
|
||||
@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
||||
switch (r10_sync_page_io(rdev,
|
||||
r10_bio->devs[sl].addr +
|
||||
sect,
|
||||
s<<9, conf->tmppage,
|
||||
s, conf->tmppage,
|
||||
READ)) {
|
||||
case 0:
|
||||
/* Well, this device is dead */
|
||||
@ -2512,7 +2511,7 @@ read_more:
|
||||
slot = r10_bio->read_slot;
|
||||
printk_ratelimited(
|
||||
KERN_ERR
|
||||
"md/raid10:%s: %s: redirecting"
|
||||
"md/raid10:%s: %s: redirecting "
|
||||
"sector %llu to another mirror\n",
|
||||
mdname(mddev),
|
||||
bdevname(rdev->bdev, b),
|
||||
@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
|
||||
blk_start_plug(&plug);
|
||||
for (;;) {
|
||||
|
||||
flush_pending_writes(conf);
|
||||
if (atomic_read(&mddev->plug_cnt) == 0)
|
||||
flush_pending_writes(conf);
|
||||
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
if (list_empty(head)) {
|
||||
@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
/* want to reconstruct this device */
|
||||
rb2 = r10_bio;
|
||||
sect = raid10_find_virt(conf, sector_nr, i);
|
||||
if (sect >= mddev->resync_max_sectors) {
|
||||
/* last stripe is not complete - don't
|
||||
* try to recover this sector.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
/* Unless we are doing a full sync, or a replacement
|
||||
* we only need to recover the block if it is set in
|
||||
* the bitmap
|
||||
@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
|
||||
spin_lock_init(&conf->resync_lock);
|
||||
init_waitqueue_head(&conf->wait_barrier);
|
||||
|
||||
conf->thread = md_register_thread(raid10d, mddev, NULL);
|
||||
conf->thread = md_register_thread(raid10d, mddev, "raid10");
|
||||
if (!conf->thread)
|
||||
goto out;
|
||||
|
||||
|
@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
|
||||
BUG_ON(!list_empty(&sh->lru));
|
||||
BUG_ON(atomic_read(&conf->active_stripes)==0);
|
||||
if (test_bit(STRIPE_HANDLE, &sh->state)) {
|
||||
if (test_bit(STRIPE_DELAYED, &sh->state))
|
||||
if (test_bit(STRIPE_DELAYED, &sh->state) &&
|
||||
!test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
|
||||
list_add_tail(&sh->lru, &conf->delayed_list);
|
||||
else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
|
||||
sh->bm_seq - conf->seq_write > 0)
|
||||
list_add_tail(&sh->lru, &conf->bitmap_list);
|
||||
else {
|
||||
clear_bit(STRIPE_DELAYED, &sh->state);
|
||||
clear_bit(STRIPE_BIT_DELAY, &sh->state);
|
||||
list_add_tail(&sh->lru, &conf->handle_list);
|
||||
}
|
||||
@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
* a chance*/
|
||||
md_check_recovery(conf->mddev);
|
||||
}
|
||||
/*
|
||||
* Because md_wait_for_blocked_rdev
|
||||
* will dec nr_pending, we must
|
||||
* increment it first.
|
||||
*/
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
md_wait_for_blocked_rdev(rdev, conf->mddev);
|
||||
} else {
|
||||
/* Acknowledged bad block - skip the write */
|
||||
@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
|
||||
} else {
|
||||
const char *bdn = bdevname(rdev->bdev, b);
|
||||
int retry = 0;
|
||||
int set_bad = 0;
|
||||
|
||||
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
|
||||
atomic_inc(&rdev->read_errors);
|
||||
@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
|
||||
mdname(conf->mddev),
|
||||
(unsigned long long)s,
|
||||
bdn);
|
||||
else if (conf->mddev->degraded >= conf->max_degraded)
|
||||
else if (conf->mddev->degraded >= conf->max_degraded) {
|
||||
set_bad = 1;
|
||||
printk_ratelimited(
|
||||
KERN_WARNING
|
||||
"md/raid:%s: read error not correctable "
|
||||
@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
|
||||
mdname(conf->mddev),
|
||||
(unsigned long long)s,
|
||||
bdn);
|
||||
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
|
||||
} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
|
||||
/* Oh, no!!! */
|
||||
set_bad = 1;
|
||||
printk_ratelimited(
|
||||
KERN_WARNING
|
||||
"md/raid:%s: read error NOT corrected!! "
|
||||
@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
|
||||
mdname(conf->mddev),
|
||||
(unsigned long long)s,
|
||||
bdn);
|
||||
else if (atomic_read(&rdev->read_errors)
|
||||
} else if (atomic_read(&rdev->read_errors)
|
||||
> conf->max_nr_stripes)
|
||||
printk(KERN_WARNING
|
||||
"md/raid:%s: Too many read errors, failing device %s.\n",
|
||||
@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
|
||||
else {
|
||||
clear_bit(R5_ReadError, &sh->dev[i].flags);
|
||||
clear_bit(R5_ReWrite, &sh->dev[i].flags);
|
||||
md_error(conf->mddev, rdev);
|
||||
if (!(set_bad
|
||||
&& test_bit(In_sync, &rdev->flags)
|
||||
&& rdev_set_badblocks(
|
||||
rdev, sh->sector, STRIPE_SECTORS, 0)))
|
||||
md_error(conf->mddev, rdev);
|
||||
}
|
||||
}
|
||||
rdev_dec_pending(rdev, conf->mddev);
|
||||
@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
|
||||
|
||||
finish:
|
||||
/* wait for this device to become unblocked */
|
||||
if (conf->mddev->external && unlikely(s.blocked_rdev))
|
||||
md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
|
||||
if (unlikely(s.blocked_rdev)) {
|
||||
if (conf->mddev->external)
|
||||
md_wait_for_blocked_rdev(s.blocked_rdev,
|
||||
conf->mddev);
|
||||
else
|
||||
/* Internal metadata will immediately
|
||||
* be written by raid5d, so we don't
|
||||
* need to wait here.
|
||||
*/
|
||||
rdev_dec_pending(s.blocked_rdev,
|
||||
conf->mddev);
|
||||
}
|
||||
|
||||
if (s.handle_bad_blocks)
|
||||
for (i = disks; i--; ) {
|
||||
@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
|
||||
raid_bio->bi_next = (void*)rdev;
|
||||
align_bi->bi_bdev = rdev->bdev;
|
||||
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
|
||||
/* No reshape active, so we can trust rdev->data_offset */
|
||||
align_bi->bi_sector += rdev->data_offset;
|
||||
|
||||
if (!bio_fits_rdev(align_bi) ||
|
||||
is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
|
||||
@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* No reshape active, so we can trust rdev->data_offset */
|
||||
align_bi->bi_sector += rdev->data_offset;
|
||||
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
wait_event_lock_irq(conf->wait_for_stripe,
|
||||
conf->quiesce == 0,
|
||||
@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
||||
struct stripe_head *sh;
|
||||
const int rw = bio_data_dir(bi);
|
||||
int remaining;
|
||||
int plugged;
|
||||
|
||||
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
|
||||
md_flush_request(mddev, bi);
|
||||
@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
||||
bi->bi_next = NULL;
|
||||
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
|
||||
|
||||
plugged = mddev_check_plugged(mddev);
|
||||
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
|
||||
DEFINE_WAIT(w);
|
||||
int previous;
|
||||
@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
||||
if ((bi->bi_rw & REQ_SYNC) &&
|
||||
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
|
||||
atomic_inc(&conf->preread_active_stripes);
|
||||
mddev_check_plugged(mddev);
|
||||
release_stripe(sh);
|
||||
} else {
|
||||
/* cannot get stripe for read-ahead, just give-up */
|
||||
@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
||||
finish_wait(&conf->wait_for_overlap, &w);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
if (!plugged)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
remaining = raid5_dec_bi_phys_segments(bi);
|
||||
@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
|
||||
int raid_disk, memory, max_disks;
|
||||
struct md_rdev *rdev;
|
||||
struct disk_info *disk;
|
||||
char pers_name[6];
|
||||
|
||||
if (mddev->new_level != 5
|
||||
&& mddev->new_level != 4
|
||||
@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
|
||||
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
|
||||
mdname(mddev), memory);
|
||||
|
||||
conf->thread = md_register_thread(raid5d, mddev, NULL);
|
||||
sprintf(pers_name, "raid%d", mddev->new_level);
|
||||
conf->thread = md_register_thread(raid5d, mddev, pers_name);
|
||||
if (!conf->thread) {
|
||||
printk(KERN_ERR
|
||||
"md/raid:%s: couldn't allocate thread.\n",
|
||||
@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
||||
if (rdev->saved_raid_disk >= 0 &&
|
||||
rdev->saved_raid_disk >= first &&
|
||||
conf->disks[rdev->saved_raid_disk].rdev == NULL)
|
||||
disk = rdev->saved_raid_disk;
|
||||
else
|
||||
disk = first;
|
||||
for ( ; disk <= last ; disk++) {
|
||||
first = rdev->saved_raid_disk;
|
||||
|
||||
for (disk = first; disk <= last; disk++) {
|
||||
p = conf->disks + disk;
|
||||
if (p->rdev == NULL) {
|
||||
clear_bit(In_sync, &rdev->flags);
|
||||
@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
||||
if (rdev->saved_raid_disk != disk)
|
||||
conf->fullsync = 1;
|
||||
rcu_assign_pointer(p->rdev, rdev);
|
||||
break;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
for (disk = first; disk <= last; disk++) {
|
||||
p = conf->disks + disk;
|
||||
if (test_bit(WantReplacement, &p->rdev->flags) &&
|
||||
p->replacement == NULL) {
|
||||
clear_bit(In_sync, &rdev->flags);
|
||||
@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
print_raid5_conf(conf);
|
||||
return err;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
|
||||
static int cafe_device_ready(struct mtd_info *mtd)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
|
||||
int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
|
||||
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
|
||||
|
||||
cafe_writel(cafe, irqs, NAND_IRQ);
|
||||
|
@ -3501,6 +3501,13 @@ int nand_scan_tail(struct mtd_info *mtd)
|
||||
/* propagate ecc info to mtd_info */
|
||||
mtd->ecclayout = chip->ecc.layout;
|
||||
mtd->ecc_strength = chip->ecc.strength;
|
||||
/*
|
||||
* Initialize bitflip_threshold to its default prior scan_bbt() call.
|
||||
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
|
||||
* properly set.
|
||||
*/
|
||||
if (!mtd->bitflip_threshold)
|
||||
mtd->bitflip_threshold = mtd->ecc_strength;
|
||||
|
||||
/* Check, if we should skip the bad block table scan */
|
||||
if (chip->options & NAND_SKIP_BBTSCAN)
|
||||
|
@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
|
||||
if (priv->mode == MQ_MG_MODE) {
|
||||
baddr = ®s->txic0;
|
||||
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
|
||||
if (likely(priv->tx_queue[i]->txcoalescing)) {
|
||||
gfar_write(baddr + i, 0);
|
||||
gfar_write(baddr + i, 0);
|
||||
if (likely(priv->tx_queue[i]->txcoalescing))
|
||||
gfar_write(baddr + i, priv->tx_queue[i]->txic);
|
||||
}
|
||||
}
|
||||
|
||||
baddr = ®s->rxic0;
|
||||
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
|
||||
if (likely(priv->rx_queue[i]->rxcoalescing)) {
|
||||
gfar_write(baddr + i, 0);
|
||||
gfar_write(baddr + i, 0);
|
||||
if (likely(priv->rx_queue[i]->rxcoalescing))
|
||||
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -103,6 +103,7 @@
|
||||
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
|
||||
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
|
||||
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
|
||||
#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
|
||||
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
|
||||
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
|
||||
|
||||
|
@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
|
||||
* @sk_buff: socket buffer with received data
|
||||
**/
|
||||
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
|
||||
__le16 csum, struct sk_buff *skb)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u16 status = (u16)status_err;
|
||||
u8 errors = (u8)(status_err >> 24);
|
||||
@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
|
||||
if (status & E1000_RXD_STAT_IXSM)
|
||||
return;
|
||||
|
||||
/* TCP/UDP checksum error bit is set */
|
||||
if (errors & E1000_RXD_ERR_TCPE) {
|
||||
/* TCP/UDP checksum error bit or IP checksum error bit is set */
|
||||
if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
|
||||
/* let the stack verify checksum errors */
|
||||
adapter->hw_csum_err++;
|
||||
return;
|
||||
@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
|
||||
return;
|
||||
|
||||
/* It must be a TCP or UDP packet with a valid checksum */
|
||||
if (status & E1000_RXD_STAT_TCPCS) {
|
||||
/* TCP checksum is good */
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
} else {
|
||||
/*
|
||||
* IP fragment with UDP payload
|
||||
* Hardware complements the payload checksum, so we undo it
|
||||
* and then put the value in host order for further stack use.
|
||||
*/
|
||||
__sum16 sum = (__force __sum16)swab16((__force u16)csum);
|
||||
skb->csum = csum_unfold(~sum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
}
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
adapter->hw_csum_good++;
|
||||
}
|
||||
|
||||
@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
||||
skb_put(skb, length);
|
||||
|
||||
/* Receive Checksum Offload */
|
||||
e1000_rx_checksum(adapter, staterr,
|
||||
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
|
||||
e1000_rx_checksum(adapter, staterr, skb);
|
||||
|
||||
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
|
||||
|
||||
@ -1341,8 +1328,7 @@ copydone:
|
||||
total_rx_bytes += skb->len;
|
||||
total_rx_packets++;
|
||||
|
||||
e1000_rx_checksum(adapter, staterr,
|
||||
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
|
||||
e1000_rx_checksum(adapter, staterr, skb);
|
||||
|
||||
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
|
||||
|
||||
@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
||||
}
|
||||
}
|
||||
|
||||
/* Receive Checksum Offload XXX recompute due to CRC strip? */
|
||||
e1000_rx_checksum(adapter, staterr,
|
||||
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
|
||||
/* Receive Checksum Offload */
|
||||
e1000_rx_checksum(adapter, staterr, skb);
|
||||
|
||||
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
|
||||
|
||||
@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
|
||||
|
||||
/* Enable Receive Checksum Offload for TCP and UDP */
|
||||
rxcsum = er32(RXCSUM);
|
||||
if (adapter->netdev->features & NETIF_F_RXCSUM) {
|
||||
if (adapter->netdev->features & NETIF_F_RXCSUM)
|
||||
rxcsum |= E1000_RXCSUM_TUOFL;
|
||||
|
||||
/*
|
||||
* IPv4 payload checksum for UDP fragments must be
|
||||
* used in conjunction with packet-split.
|
||||
*/
|
||||
if (adapter->rx_ps_pages)
|
||||
rxcsum |= E1000_RXCSUM_IPPCSE;
|
||||
} else {
|
||||
else
|
||||
rxcsum &= ~E1000_RXCSUM_TUOFL;
|
||||
/* no need to clear IPPCSE as it defaults to 0 */
|
||||
}
|
||||
ew32(RXCSUM, rxcsum);
|
||||
|
||||
if (adapter->hw.mac.type == e1000_pch2lan) {
|
||||
@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
/* Jumbo frame support */
|
||||
if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
|
||||
if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
|
||||
e_err("Jumbo Frames not supported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* IP payload checksum (enabled with jumbos/packet-split when
|
||||
* Rx checksum is enabled) and generation of RSS hash is
|
||||
* mutually exclusive in the hardware.
|
||||
*/
|
||||
if ((netdev->features & NETIF_F_RXCSUM) &&
|
||||
(netdev->features & NETIF_F_RXHASH)) {
|
||||
e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
|
||||
!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
|
||||
e_err("Jumbo Frames not supported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Supported frame sizes */
|
||||
@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
|
||||
NETIF_F_RXALL)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* IP payload checksum (enabled with jumbos/packet-split when Rx
|
||||
* checksum is enabled) and generation of RSS hash is mutually
|
||||
* exclusive in the hardware.
|
||||
*/
|
||||
if (adapter->rx_ps_pages &&
|
||||
(features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
|
||||
e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (changed & NETIF_F_RXFCS) {
|
||||
if (features & NETIF_F_RXFCS) {
|
||||
adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
|
||||
|
@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
|
||||
struct igbvf_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
|
||||
((ec->rx_coalesce_usecs > 3) &&
|
||||
(ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
|
||||
(ec->rx_coalesce_usecs == 2))
|
||||
return -EINVAL;
|
||||
|
||||
/* convert to rate of irq's per second */
|
||||
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
|
||||
adapter->current_itr = IGBVF_START_ITR;
|
||||
adapter->requested_itr = ec->rx_coalesce_usecs;
|
||||
} else {
|
||||
if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
|
||||
(ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
|
||||
adapter->current_itr = ec->rx_coalesce_usecs << 2;
|
||||
adapter->requested_itr = 1000000000 /
|
||||
(adapter->current_itr * 256);
|
||||
}
|
||||
} else if ((ec->rx_coalesce_usecs == 3) ||
|
||||
(ec->rx_coalesce_usecs == 2)) {
|
||||
adapter->current_itr = IGBVF_START_ITR;
|
||||
adapter->requested_itr = ec->rx_coalesce_usecs;
|
||||
} else if (ec->rx_coalesce_usecs == 0) {
|
||||
/*
|
||||
* The user's desire is to turn off interrupt throttling
|
||||
* altogether, but due to HW limitations, we can't do that.
|
||||
* Instead we set a very small value in EITR, which would
|
||||
* allow ~967k interrupts per second, but allow the adapter's
|
||||
* internal clocking to still function properly.
|
||||
*/
|
||||
adapter->current_itr = 4;
|
||||
adapter->requested_itr = 1000000000 /
|
||||
(adapter->current_itr * 256);
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
writel(adapter->current_itr,
|
||||
hw->hw_addr + adapter->rx_ring->itr_register);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
@ -197,6 +197,10 @@ err:
|
||||
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
|
||||
{
|
||||
struct usbnet *dev = usb_get_intfdata(intf);
|
||||
|
||||
/* can be called while disconnecting */
|
||||
if (!dev)
|
||||
return 0;
|
||||
return qmi_wwan_manage_power(dev, on);
|
||||
}
|
||||
|
||||
|
@ -143,6 +143,7 @@ struct ath_common {
|
||||
u32 keymax;
|
||||
DECLARE_BITMAP(keymap, ATH_KEYMAX);
|
||||
DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
|
||||
DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
|
||||
enum ath_crypt_caps crypt_caps;
|
||||
|
||||
unsigned int clockrate;
|
||||
|
@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
|
||||
|
||||
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
|
||||
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
|
||||
((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
|
||||
((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
|
||||
!ah->is_pciexpress)) {
|
||||
ah->config.serialize_regmode =
|
||||
SER_REG_MODE_ON;
|
||||
|
@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
|
||||
__skb_unlink(skb, &rx_edma->rx_fifo);
|
||||
list_add_tail(&bf->list, &sc->rx.rxbuf);
|
||||
ath_rx_edma_buf_link(sc, qtype);
|
||||
} else {
|
||||
bf = NULL;
|
||||
}
|
||||
|
||||
bf = NULL;
|
||||
}
|
||||
|
||||
*dest = bf;
|
||||
@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
||||
* descriptor does contain a valid key index. This has been observed
|
||||
* mostly with CCMP encryption.
|
||||
*/
|
||||
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
|
||||
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
|
||||
!test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
|
||||
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
|
||||
|
||||
if (!rx_stats->rs_datalen) {
|
||||
|
@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
|
||||
return -EIO;
|
||||
|
||||
set_bit(idx, common->keymap);
|
||||
if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
|
||||
set_bit(idx, common->ccmp_keymap);
|
||||
|
||||
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
|
||||
set_bit(idx + 64, common->keymap);
|
||||
set_bit(idx, common->tkip_keymap);
|
||||
@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
|
||||
return;
|
||||
|
||||
clear_bit(key->hw_key_idx, common->keymap);
|
||||
clear_bit(key->hw_key_idx, common->ccmp_keymap);
|
||||
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
|
||||
return;
|
||||
|
||||
|
@ -796,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
|
||||
switch (op) {
|
||||
case ADD:
|
||||
ret = iwlagn_mac_sta_add(hw, vif, sta);
|
||||
if (ret)
|
||||
break;
|
||||
/*
|
||||
* Clear the in-progress flag, the AP station entry was added
|
||||
* but we'll initialize LQ only when we've associated (which
|
||||
* would also clear the in-progress flag). This is necessary
|
||||
* in case we never initialize LQ because association fails.
|
||||
*/
|
||||
spin_lock_bh(&priv->sta_lock);
|
||||
priv->stations[iwl_sta_id(sta)].used &=
|
||||
~IWL_STA_UCODE_INPROGRESS;
|
||||
spin_unlock_bh(&priv->sta_lock);
|
||||
break;
|
||||
case REMOVE:
|
||||
ret = iwlagn_mac_sta_remove(hw, vif, sta);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user