mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock: hwspinlock: add MAINTAINERS entries hwspinlock/omap: omap_hwspinlock_remove should be __devexit hwspinlock/u8500: add hwspinlock driver hwspinlock/core: register a bank of hwspinlocks in a single API call hwspinlock/core: remove stubs for register/unregister hwspinlock/core: use a mutex to protect the radix tree hwspinlock/core/omap: fix id issues on multiple hwspinlock devices hwspinlock/omap: simplify allocation scheme hwspinlock/core: simplify 'owner' handling hwspinlock/core: simplify Kconfig Fix up trivial conflicts (addition of omap_hwspinlock_pdata, removal of omap_spinlock_latency) in arch/arm/mach-omap2/hwspinlock.c Also, do an "evil merge" to fix a compile error in omap_hsmmc.c which for some reason was reported in the same email thread as the "please pull hwspinlock changes".
This commit is contained in:
commit
3f8ddb032a
@ -39,23 +39,20 @@ independent, drivers.
|
||||
in case an unused hwspinlock isn't available. Users of this
|
||||
API will usually want to communicate the lock's id to the remote core
|
||||
before it can be used to achieve synchronization.
|
||||
Can be called from an atomic context (this function will not sleep) but
|
||||
not from within interrupt context.
|
||||
Should be called from a process context (might sleep).
|
||||
|
||||
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
||||
- assign a specific hwspinlock id and return its address, or NULL
|
||||
if that hwspinlock is already in use. Usually board code will
|
||||
be calling this function in order to reserve specific hwspinlock
|
||||
ids for predefined purposes.
|
||||
Can be called from an atomic context (this function will not sleep) but
|
||||
not from within interrupt context.
|
||||
Should be called from a process context (might sleep).
|
||||
|
||||
int hwspin_lock_free(struct hwspinlock *hwlock);
|
||||
- free a previously-assigned hwspinlock; returns 0 on success, or an
|
||||
appropriate error code on failure (e.g. -EINVAL if the hwspinlock
|
||||
is already free).
|
||||
Can be called from an atomic context (this function will not sleep) but
|
||||
not from within interrupt context.
|
||||
Should be called from a process context (might sleep).
|
||||
|
||||
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
|
||||
- lock a previously-assigned hwspinlock with a timeout limit (specified in
|
||||
@ -230,45 +227,62 @@ int hwspinlock_example2(void)
|
||||
|
||||
4. API for implementors
|
||||
|
||||
int hwspin_lock_register(struct hwspinlock *hwlock);
|
||||
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
||||
const struct hwspinlock_ops *ops, int base_id, int num_locks);
|
||||
- to be called from the underlying platform-specific implementation, in
|
||||
order to register a new hwspinlock instance. Can be called from an atomic
|
||||
context (this function will not sleep) but not from within interrupt
|
||||
context. Returns 0 on success, or appropriate error code on failure.
|
||||
order to register a new hwspinlock device (which is usually a bank of
|
||||
numerous locks). Should be called from a process context (this function
|
||||
might sleep).
|
||||
Returns 0 on success, or appropriate error code on failure.
|
||||
|
||||
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
|
||||
int hwspin_lock_unregister(struct hwspinlock_device *bank);
|
||||
- to be called from the underlying vendor-specific implementation, in order
|
||||
to unregister an existing (and unused) hwspinlock instance.
|
||||
Can be called from an atomic context (will not sleep) but not from
|
||||
within interrupt context.
|
||||
to unregister an hwspinlock device (which is usually a bank of numerous
|
||||
locks).
|
||||
Should be called from a process context (this function might sleep).
|
||||
Returns the address of hwspinlock on success, or NULL on error (e.g.
|
||||
if the hwspinlock is sill in use).
|
||||
|
||||
5. struct hwspinlock
|
||||
5. Important structs
|
||||
|
||||
This struct represents an hwspinlock instance. It is registered by the
|
||||
underlying hwspinlock implementation using the hwspin_lock_register() API.
|
||||
struct hwspinlock_device is a device which usually contains a bank
|
||||
of hardware locks. It is registered by the underlying hwspinlock
|
||||
implementation using the hwspin_lock_register() API.
|
||||
|
||||
/**
|
||||
* struct hwspinlock - vendor-specific hwspinlock implementation
|
||||
*
|
||||
* @dev: underlying device, will be used with runtime PM api
|
||||
* @ops: vendor-specific hwspinlock handlers
|
||||
* @id: a global, unique, system-wide, index of the lock.
|
||||
* @lock: initialized and used by hwspinlock core
|
||||
* @owner: underlying implementation module, used to maintain module ref count
|
||||
* struct hwspinlock_device - a device which usually spans numerous hwspinlocks
|
||||
* @dev: underlying device, will be used to invoke runtime PM api
|
||||
* @ops: platform-specific hwspinlock handlers
|
||||
* @base_id: id index of the first lock in this device
|
||||
* @num_locks: number of locks in this device
|
||||
* @lock: dynamically allocated array of 'struct hwspinlock'
|
||||
*/
|
||||
struct hwspinlock {
|
||||
struct hwspinlock_device {
|
||||
struct device *dev;
|
||||
const struct hwspinlock_ops *ops;
|
||||
int id;
|
||||
spinlock_t lock;
|
||||
struct module *owner;
|
||||
int base_id;
|
||||
int num_locks;
|
||||
struct hwspinlock lock[0];
|
||||
};
|
||||
|
||||
The underlying implementation is responsible to assign the dev, ops, id and
|
||||
owner members. The lock member, OTOH, is initialized and used by the hwspinlock
|
||||
core.
|
||||
struct hwspinlock_device contains an array of hwspinlock structs, each
|
||||
of which represents a single hardware lock:
|
||||
|
||||
/**
|
||||
* struct hwspinlock - this struct represents a single hwspinlock instance
|
||||
* @bank: the hwspinlock_device structure which owns this lock
|
||||
* @lock: initialized and used by hwspinlock core
|
||||
* @priv: private data, owned by the underlying platform-specific hwspinlock drv
|
||||
*/
|
||||
struct hwspinlock {
|
||||
struct hwspinlock_device *bank;
|
||||
spinlock_t lock;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
When registering a bank of locks, the hwspinlock driver only needs to
|
||||
set the priv members of the locks. The rest of the members are set and
|
||||
initialized by the hwspinlock core itself.
|
||||
|
||||
6. Implementation callbacks
|
||||
|
||||
|
14
MAINTAINERS
14
MAINTAINERS
@ -3018,6 +3018,13 @@ F: Documentation/hw_random.txt
|
||||
F: drivers/char/hw_random/
|
||||
F: include/linux/hw_random.h
|
||||
|
||||
HARDWARE SPINLOCK CORE
|
||||
M: Ohad Ben-Cohen <ohad@wizery.com>
|
||||
S: Maintained
|
||||
F: Documentation/hwspinlock.txt
|
||||
F: drivers/hwspinlock/hwspinlock_*
|
||||
F: include/linux/hwspinlock.h
|
||||
|
||||
HARMONY SOUND DRIVER
|
||||
M: Kyle McMartin <kyle@mcmartin.ca>
|
||||
L: linux-parisc@vger.kernel.org
|
||||
@ -4714,6 +4721,13 @@ S: Maintained
|
||||
F: drivers/video/omap2/
|
||||
F: Documentation/arm/OMAP/DSS
|
||||
|
||||
OMAP HARDWARE SPINLOCK SUPPORT
|
||||
M: Ohad Ben-Cohen <ohad@wizery.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/hwspinlock/omap_hwspinlock.c
|
||||
F: arch/arm/mach-omap2/hwspinlock.c
|
||||
|
||||
OMAP MMC SUPPORT
|
||||
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
|
@ -19,10 +19,15 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hwspinlock.h>
|
||||
|
||||
#include <plat/omap_hwmod.h>
|
||||
#include <plat/omap_device.h>
|
||||
|
||||
static struct hwspinlock_pdata omap_hwspinlock_pdata __initdata = {
|
||||
.base_id = 0,
|
||||
};
|
||||
|
||||
int __init hwspinlocks_init(void)
|
||||
{
|
||||
int retval = 0;
|
||||
@ -40,7 +45,9 @@ int __init hwspinlocks_init(void)
|
||||
if (oh == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
pdev = omap_device_build(dev_name, 0, oh, NULL, 0, NULL, 0, false);
|
||||
pdev = omap_device_build(dev_name, 0, oh, &omap_hwspinlock_pdata,
|
||||
sizeof(struct hwspinlock_pdata),
|
||||
NULL, 0, false);
|
||||
if (IS_ERR(pdev)) {
|
||||
pr_err("Can't build omap_device for %s:%s\n", dev_name,
|
||||
oh_name);
|
||||
|
@ -2,22 +2,31 @@
|
||||
# Generic HWSPINLOCK framework
|
||||
#
|
||||
|
||||
# HWSPINLOCK always gets selected by whoever wants it.
|
||||
config HWSPINLOCK
|
||||
tristate "Generic Hardware Spinlock framework"
|
||||
depends on ARCH_OMAP4
|
||||
help
|
||||
Say y here to support the generic hardware spinlock framework.
|
||||
You only need to enable this if you have hardware spinlock module
|
||||
on your system (usually only relevant if your system has remote slave
|
||||
coprocessors).
|
||||
tristate
|
||||
|
||||
If unsure, say N.
|
||||
menu "Hardware Spinlock drivers"
|
||||
|
||||
config HWSPINLOCK_OMAP
|
||||
tristate "OMAP Hardware Spinlock device"
|
||||
depends on HWSPINLOCK && ARCH_OMAP4
|
||||
depends on ARCH_OMAP4
|
||||
select HWSPINLOCK
|
||||
help
|
||||
Say y here to support the OMAP Hardware Spinlock device (firstly
|
||||
introduced in OMAP4).
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config HSEM_U8500
|
||||
tristate "STE Hardware Semaphore functionality"
|
||||
depends on ARCH_U8500
|
||||
select HWSPINLOCK
|
||||
help
|
||||
Say y here to support the STE Hardware Semaphore functionality, which
|
||||
provides a synchronisation mechanism for the various processor on the
|
||||
SoC.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
endmenu
|
||||
|
@ -4,3 +4,4 @@
|
||||
|
||||
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
|
||||
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
|
||||
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/hwspinlock.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include "hwspinlock_internal.h"
|
||||
|
||||
@ -52,10 +53,12 @@
|
||||
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* Synchronization of access to the tree is achieved using this spinlock,
|
||||
* Synchronization of access to the tree is achieved using this mutex,
|
||||
* as the radix-tree API requires that users provide all synchronisation.
|
||||
* A mutex is needed because we're using non-atomic radix tree allocations.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(hwspinlock_tree_lock);
|
||||
static DEFINE_MUTEX(hwspinlock_tree_lock);
|
||||
|
||||
|
||||
/**
|
||||
* __hwspin_trylock() - attempt to lock a specific hwspinlock
|
||||
@ -114,7 +117,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
||||
return -EBUSY;
|
||||
|
||||
/* try to take the hwspinlock device */
|
||||
ret = hwlock->ops->trylock(hwlock);
|
||||
ret = hwlock->bank->ops->trylock(hwlock);
|
||||
|
||||
/* if hwlock is already taken, undo spin_trylock_* and exit */
|
||||
if (!ret) {
|
||||
@ -196,8 +199,8 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
|
||||
* Allow platform-specific relax handlers to prevent
|
||||
* hogging the interconnect (no sleeping, though)
|
||||
*/
|
||||
if (hwlock->ops->relax)
|
||||
hwlock->ops->relax(hwlock);
|
||||
if (hwlock->bank->ops->relax)
|
||||
hwlock->bank->ops->relax(hwlock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -242,7 +245,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
||||
*/
|
||||
mb();
|
||||
|
||||
hwlock->ops->unlock(hwlock);
|
||||
hwlock->bank->ops->unlock(hwlock);
|
||||
|
||||
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
|
||||
if (mode == HWLOCK_IRQSTATE)
|
||||
@ -254,68 +257,37 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__hwspin_unlock);
|
||||
|
||||
/**
|
||||
* hwspin_lock_register() - register a new hw spinlock
|
||||
* @hwlock: hwspinlock to register.
|
||||
*
|
||||
* This function should be called from the underlying platform-specific
|
||||
* implementation, to register a new hwspinlock instance.
|
||||
*
|
||||
* Can be called from an atomic context (will not sleep) but not from
|
||||
* within interrupt context.
|
||||
*
|
||||
* Returns 0 on success, or an appropriate error code on failure
|
||||
*/
|
||||
int hwspin_lock_register(struct hwspinlock *hwlock)
|
||||
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
|
||||
{
|
||||
struct hwspinlock *tmp;
|
||||
int ret;
|
||||
|
||||
if (!hwlock || !hwlock->ops ||
|
||||
!hwlock->ops->trylock || !hwlock->ops->unlock) {
|
||||
pr_err("invalid parameters\n");
|
||||
return -EINVAL;
|
||||
mutex_lock(&hwspinlock_tree_lock);
|
||||
|
||||
ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST)
|
||||
pr_err("hwspinlock id %d already exists!\n", id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_init(&hwlock->lock);
|
||||
|
||||
spin_lock(&hwspinlock_tree_lock);
|
||||
|
||||
ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* mark this hwspinlock as available */
|
||||
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
|
||||
HWSPINLOCK_UNUSED);
|
||||
tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
||||
|
||||
/* self-sanity check which should never fail */
|
||||
WARN_ON(tmp != hwlock);
|
||||
|
||||
out:
|
||||
spin_unlock(&hwspinlock_tree_lock);
|
||||
return ret;
|
||||
mutex_unlock(&hwspinlock_tree_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_register);
|
||||
|
||||
/**
|
||||
* hwspin_lock_unregister() - unregister an hw spinlock
|
||||
* @id: index of the specific hwspinlock to unregister
|
||||
*
|
||||
* This function should be called from the underlying platform-specific
|
||||
* implementation, to unregister an existing (and unused) hwspinlock.
|
||||
*
|
||||
* Can be called from an atomic context (will not sleep) but not from
|
||||
* within interrupt context.
|
||||
*
|
||||
* Returns the address of hwspinlock @id on success, or NULL on failure
|
||||
*/
|
||||
struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
||||
static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
|
||||
{
|
||||
struct hwspinlock *hwlock = NULL;
|
||||
int ret;
|
||||
|
||||
spin_lock(&hwspinlock_tree_lock);
|
||||
mutex_lock(&hwspinlock_tree_lock);
|
||||
|
||||
/* make sure the hwspinlock is not in use (tag is set) */
|
||||
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
||||
@ -331,9 +303,91 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&hwspinlock_tree_lock);
|
||||
mutex_unlock(&hwspinlock_tree_lock);
|
||||
return hwlock;
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_lock_register() - register a new hw spinlock device
|
||||
* @bank: the hwspinlock device, which usually provides numerous hw locks
|
||||
* @dev: the backing device
|
||||
* @ops: hwspinlock handlers for this device
|
||||
* @base_id: id of the first hardware spinlock in this bank
|
||||
* @num_locks: number of hwspinlocks provided by this device
|
||||
*
|
||||
* This function should be called from the underlying platform-specific
|
||||
* implementation, to register a new hwspinlock device instance.
|
||||
*
|
||||
* Should be called from a process context (might sleep)
|
||||
*
|
||||
* Returns 0 on success, or an appropriate error code on failure
|
||||
*/
|
||||
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
||||
const struct hwspinlock_ops *ops, int base_id, int num_locks)
|
||||
{
|
||||
struct hwspinlock *hwlock;
|
||||
int ret = 0, i;
|
||||
|
||||
if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
|
||||
!ops->unlock) {
|
||||
pr_err("invalid parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bank->dev = dev;
|
||||
bank->ops = ops;
|
||||
bank->base_id = base_id;
|
||||
bank->num_locks = num_locks;
|
||||
|
||||
for (i = 0; i < num_locks; i++) {
|
||||
hwlock = &bank->lock[i];
|
||||
|
||||
spin_lock_init(&hwlock->lock);
|
||||
hwlock->bank = bank;
|
||||
|
||||
ret = hwspin_lock_register_single(hwlock, i);
|
||||
if (ret)
|
||||
goto reg_failed;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
reg_failed:
|
||||
while (--i >= 0)
|
||||
hwspin_lock_unregister_single(i);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_register);
|
||||
|
||||
/**
|
||||
* hwspin_lock_unregister() - unregister an hw spinlock device
|
||||
* @bank: the hwspinlock device, which usually provides numerous hw locks
|
||||
*
|
||||
* This function should be called from the underlying platform-specific
|
||||
* implementation, to unregister an existing (and unused) hwspinlock.
|
||||
*
|
||||
* Should be called from a process context (might sleep)
|
||||
*
|
||||
* Returns 0 on success, or an appropriate error code on failure
|
||||
*/
|
||||
int hwspin_lock_unregister(struct hwspinlock_device *bank)
|
||||
{
|
||||
struct hwspinlock *hwlock, *tmp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bank->num_locks; i++) {
|
||||
hwlock = &bank->lock[i];
|
||||
|
||||
tmp = hwspin_lock_unregister_single(bank->base_id + i);
|
||||
if (!tmp)
|
||||
return -EBUSY;
|
||||
|
||||
/* self-sanity check that should never fail */
|
||||
WARN_ON(tmp != hwlock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
|
||||
|
||||
/**
|
||||
@ -348,24 +402,25 @@ EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
|
||||
*/
|
||||
static int __hwspin_lock_request(struct hwspinlock *hwlock)
|
||||
{
|
||||
struct device *dev = hwlock->bank->dev;
|
||||
struct hwspinlock *tmp;
|
||||
int ret;
|
||||
|
||||
/* prevent underlying implementation from being removed */
|
||||
if (!try_module_get(hwlock->owner)) {
|
||||
dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
|
||||
if (!try_module_get(dev->driver->owner)) {
|
||||
dev_err(dev, "%s: can't get owner\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* notify PM core that power is now needed */
|
||||
ret = pm_runtime_get_sync(hwlock->dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
|
||||
dev_err(dev, "%s: can't power on device\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* mark hwspinlock as used, should not fail */
|
||||
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
|
||||
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
|
||||
HWSPINLOCK_UNUSED);
|
||||
|
||||
/* self-sanity check that should never fail */
|
||||
@ -387,7 +442,7 @@ int hwspin_lock_get_id(struct hwspinlock *hwlock)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return hwlock->id;
|
||||
return hwlock_to_id(hwlock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
|
||||
|
||||
@ -400,9 +455,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
|
||||
* to the remote core before it can be used for synchronization (to get the
|
||||
* id of a given hwlock, use hwspin_lock_get_id()).
|
||||
*
|
||||
* Can be called from an atomic context (will not sleep) but not from
|
||||
* within interrupt context (simply because there is no use case for
|
||||
* that yet).
|
||||
* Should be called from a process context (might sleep)
|
||||
*
|
||||
* Returns the address of the assigned hwspinlock, or NULL on error
|
||||
*/
|
||||
@ -411,7 +464,7 @@ struct hwspinlock *hwspin_lock_request(void)
|
||||
struct hwspinlock *hwlock;
|
||||
int ret;
|
||||
|
||||
spin_lock(&hwspinlock_tree_lock);
|
||||
mutex_lock(&hwspinlock_tree_lock);
|
||||
|
||||
/* look for an unused lock */
|
||||
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
|
||||
@ -431,7 +484,7 @@ struct hwspinlock *hwspin_lock_request(void)
|
||||
hwlock = NULL;
|
||||
|
||||
out:
|
||||
spin_unlock(&hwspinlock_tree_lock);
|
||||
mutex_unlock(&hwspinlock_tree_lock);
|
||||
return hwlock;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
||||
@ -445,9 +498,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
||||
* Usually early board code will be calling this function in order to
|
||||
* reserve specific hwspinlock ids for predefined purposes.
|
||||
*
|
||||
* Can be called from an atomic context (will not sleep) but not from
|
||||
* within interrupt context (simply because there is no use case for
|
||||
* that yet).
|
||||
* Should be called from a process context (might sleep)
|
||||
*
|
||||
* Returns the address of the assigned hwspinlock, or NULL on error
|
||||
*/
|
||||
@ -456,7 +507,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
||||
struct hwspinlock *hwlock;
|
||||
int ret;
|
||||
|
||||
spin_lock(&hwspinlock_tree_lock);
|
||||
mutex_lock(&hwspinlock_tree_lock);
|
||||
|
||||
/* make sure this hwspinlock exists */
|
||||
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
|
||||
@ -466,7 +517,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
||||
}
|
||||
|
||||
/* sanity check (this shouldn't happen) */
|
||||
WARN_ON(hwlock->id != id);
|
||||
WARN_ON(hwlock_to_id(hwlock) != id);
|
||||
|
||||
/* make sure this hwspinlock is unused */
|
||||
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
||||
@ -482,7 +533,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
||||
hwlock = NULL;
|
||||
|
||||
out:
|
||||
spin_unlock(&hwspinlock_tree_lock);
|
||||
mutex_unlock(&hwspinlock_tree_lock);
|
||||
return hwlock;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
||||
@ -495,14 +546,13 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
||||
* Should only be called with an @hwlock that was retrieved from
|
||||
* an earlier call to omap_hwspin_lock_request{_specific}.
|
||||
*
|
||||
* Can be called from an atomic context (will not sleep) but not from
|
||||
* within interrupt context (simply because there is no use case for
|
||||
* that yet).
|
||||
* Should be called from a process context (might sleep)
|
||||
*
|
||||
* Returns 0 on success, or an appropriate error code on failure
|
||||
*/
|
||||
int hwspin_lock_free(struct hwspinlock *hwlock)
|
||||
{
|
||||
struct device *dev = hwlock->bank->dev;
|
||||
struct hwspinlock *tmp;
|
||||
int ret;
|
||||
|
||||
@ -511,34 +561,34 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&hwspinlock_tree_lock);
|
||||
mutex_lock(&hwspinlock_tree_lock);
|
||||
|
||||
/* make sure the hwspinlock is used */
|
||||
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
|
||||
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
|
||||
HWSPINLOCK_UNUSED);
|
||||
if (ret == 1) {
|
||||
dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
|
||||
dev_err(dev, "%s: hwlock is already free\n", __func__);
|
||||
dump_stack();
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* notify the underlying device that power is not needed */
|
||||
ret = pm_runtime_put(hwlock->dev);
|
||||
ret = pm_runtime_put(dev);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* mark this hwspinlock as available */
|
||||
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
|
||||
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
|
||||
HWSPINLOCK_UNUSED);
|
||||
|
||||
/* sanity check (this shouldn't happen) */
|
||||
WARN_ON(tmp != hwlock);
|
||||
|
||||
module_put(hwlock->owner);
|
||||
module_put(dev->driver->owner);
|
||||
|
||||
out:
|
||||
spin_unlock(&hwspinlock_tree_lock);
|
||||
mutex_unlock(&hwspinlock_tree_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_free);
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
struct hwspinlock_device;
|
||||
|
||||
/**
|
||||
* struct hwspinlock_ops - platform-specific hwspinlock handlers
|
||||
*
|
||||
@ -39,23 +41,37 @@ struct hwspinlock_ops {
|
||||
|
||||
/**
|
||||
* struct hwspinlock - this struct represents a single hwspinlock instance
|
||||
*
|
||||
* @dev: underlying device, will be used to invoke runtime PM api
|
||||
* @ops: platform-specific hwspinlock handlers
|
||||
* @id: a global, unique, system-wide, index of the lock.
|
||||
* @bank: the hwspinlock_device structure which owns this lock
|
||||
* @lock: initialized and used by hwspinlock core
|
||||
* @owner: underlying implementation module, used to maintain module ref count
|
||||
*
|
||||
* Note: currently simplicity was opted for, but later we can squeeze some
|
||||
* memory bytes by grouping the dev, ops and owner members in a single
|
||||
* per-platform struct, and have all hwspinlocks point at it.
|
||||
* @priv: private data, owned by the underlying platform-specific hwspinlock drv
|
||||
*/
|
||||
struct hwspinlock {
|
||||
struct device *dev;
|
||||
const struct hwspinlock_ops *ops;
|
||||
int id;
|
||||
struct hwspinlock_device *bank;
|
||||
spinlock_t lock;
|
||||
struct module *owner;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hwspinlock_device - a device which usually spans numerous hwspinlocks
|
||||
* @dev: underlying device, will be used to invoke runtime PM api
|
||||
* @ops: platform-specific hwspinlock handlers
|
||||
* @base_id: id index of the first lock in this device
|
||||
* @num_locks: number of locks in this device
|
||||
* @lock: dynamically allocated array of 'struct hwspinlock'
|
||||
*/
|
||||
struct hwspinlock_device {
|
||||
struct device *dev;
|
||||
const struct hwspinlock_ops *ops;
|
||||
int base_id;
|
||||
int num_locks;
|
||||
struct hwspinlock lock[0];
|
||||
};
|
||||
|
||||
static inline int hwlock_to_id(struct hwspinlock *hwlock)
|
||||
{
|
||||
int local_id = hwlock - &hwlock->bank->lock[0];
|
||||
|
||||
return hwlock->bank->base_id + local_id;
|
||||
}
|
||||
|
||||
#endif /* __HWSPINLOCK_HWSPINLOCK_H */
|
||||
|
@ -41,33 +41,20 @@
|
||||
#define SPINLOCK_NOTTAKEN (0) /* free */
|
||||
#define SPINLOCK_TAKEN (1) /* locked */
|
||||
|
||||
#define to_omap_hwspinlock(lock) \
|
||||
container_of(lock, struct omap_hwspinlock, lock)
|
||||
|
||||
struct omap_hwspinlock {
|
||||
struct hwspinlock lock;
|
||||
void __iomem *addr;
|
||||
};
|
||||
|
||||
struct omap_hwspinlock_state {
|
||||
int num_locks; /* Total number of locks in system */
|
||||
void __iomem *io_base; /* Mapped base address */
|
||||
};
|
||||
|
||||
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
|
||||
{
|
||||
struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
|
||||
void __iomem *lock_addr = lock->priv;
|
||||
|
||||
/* attempt to acquire the lock by reading its value */
|
||||
return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
|
||||
return (SPINLOCK_NOTTAKEN == readl(lock_addr));
|
||||
}
|
||||
|
||||
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
|
||||
{
|
||||
struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
|
||||
void __iomem *lock_addr = lock->priv;
|
||||
|
||||
/* release the lock by writing 0 to it */
|
||||
writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
|
||||
writel(SPINLOCK_NOTTAKEN, lock_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -93,26 +80,23 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
|
||||
|
||||
static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct omap_hwspinlock *omap_lock;
|
||||
struct omap_hwspinlock_state *state;
|
||||
struct hwspinlock *lock;
|
||||
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
|
||||
struct hwspinlock_device *bank;
|
||||
struct hwspinlock *hwlock;
|
||||
struct resource *res;
|
||||
void __iomem *io_base;
|
||||
int i, ret;
|
||||
int num_locks, i, ret;
|
||||
|
||||
if (!pdata)
|
||||
return -ENODEV;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -ENODEV;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
io_base = ioremap(res->start, resource_size(res));
|
||||
if (!io_base) {
|
||||
ret = -ENOMEM;
|
||||
goto free_state;
|
||||
}
|
||||
if (!io_base)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Determine number of locks */
|
||||
i = readl(io_base + SYSSTATUS_OFFSET);
|
||||
@ -124,10 +108,18 @@ static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
|
||||
goto iounmap_base;
|
||||
}
|
||||
|
||||
state->num_locks = i * 32;
|
||||
state->io_base = io_base;
|
||||
num_locks = i * 32; /* actual number of locks in this device */
|
||||
|
||||
platform_set_drvdata(pdev, state);
|
||||
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
|
||||
if (!bank) {
|
||||
ret = -ENOMEM;
|
||||
goto iounmap_base;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, bank);
|
||||
|
||||
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
|
||||
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
|
||||
|
||||
/*
|
||||
* runtime PM will make sure the clock of this module is
|
||||
@ -135,79 +127,46 @@ static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
|
||||
*/
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
for (i = 0; i < state->num_locks; i++) {
|
||||
omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
|
||||
if (!omap_lock) {
|
||||
ret = -ENOMEM;
|
||||
goto free_locks;
|
||||
}
|
||||
|
||||
omap_lock->lock.dev = &pdev->dev;
|
||||
omap_lock->lock.owner = THIS_MODULE;
|
||||
omap_lock->lock.id = i;
|
||||
omap_lock->lock.ops = &omap_hwspinlock_ops;
|
||||
omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
|
||||
|
||||
ret = hwspin_lock_register(&omap_lock->lock);
|
||||
if (ret) {
|
||||
kfree(omap_lock);
|
||||
goto free_locks;
|
||||
}
|
||||
}
|
||||
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
|
||||
pdata->base_id, num_locks);
|
||||
if (ret)
|
||||
goto reg_fail;
|
||||
|
||||
return 0;
|
||||
|
||||
free_locks:
|
||||
while (--i >= 0) {
|
||||
lock = hwspin_lock_unregister(i);
|
||||
/* this should't happen, but let's give our best effort */
|
||||
if (!lock) {
|
||||
dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
|
||||
continue;
|
||||
}
|
||||
omap_lock = to_omap_hwspinlock(lock);
|
||||
kfree(omap_lock);
|
||||
}
|
||||
reg_fail:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
kfree(bank);
|
||||
iounmap_base:
|
||||
iounmap(io_base);
|
||||
free_state:
|
||||
kfree(state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int omap_hwspinlock_remove(struct platform_device *pdev)
|
||||
static int __devexit omap_hwspinlock_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
|
||||
struct hwspinlock *lock;
|
||||
struct omap_hwspinlock *omap_lock;
|
||||
int i;
|
||||
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
|
||||
void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < state->num_locks; i++) {
|
||||
lock = hwspin_lock_unregister(i);
|
||||
/* this shouldn't happen at this point. if it does, at least
|
||||
* don't continue with the remove */
|
||||
if (!lock) {
|
||||
dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
omap_lock = to_omap_hwspinlock(lock);
|
||||
kfree(omap_lock);
|
||||
ret = hwspin_lock_unregister(bank);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
iounmap(state->io_base);
|
||||
kfree(state);
|
||||
iounmap(io_base);
|
||||
kfree(bank);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver omap_hwspinlock_driver = {
|
||||
.probe = omap_hwspinlock_probe,
|
||||
.remove = omap_hwspinlock_remove,
|
||||
.remove = __devexit_p(omap_hwspinlock_remove),
|
||||
.driver = {
|
||||
.name = "omap_hwspinlock",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
|
198
drivers/hwspinlock/u8500_hsem.c
Normal file
198
drivers/hwspinlock/u8500_hsem.c
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
* u8500 HWSEM driver
|
||||
*
|
||||
* Copyright (C) 2010-2011 ST-Ericsson
|
||||
*
|
||||
* Implements u8500 semaphore handling for protocol 1, no interrupts.
|
||||
*
|
||||
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
* Heavily borrowed from the work of :
|
||||
* Simon Que <sque@ti.com>
|
||||
* Hari Kanigeri <h-kanigeri2@ti.com>
|
||||
* Ohad Ben-Cohen <ohad@wizery.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/hwspinlock.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "hwspinlock_internal.h"
|
||||
|
||||
/*
|
||||
* Implementation of STE's HSem protocol 1 without interrutps.
|
||||
* The only masterID we allow is '0x01' to force people to use
|
||||
* HSems for synchronisation between processors rather than processes
|
||||
* on the ARM core.
|
||||
*/
|
||||
|
||||
#define U8500_MAX_SEMAPHORE 32 /* a total of 32 semaphore */
|
||||
#define RESET_SEMAPHORE (0) /* free */
|
||||
|
||||
/*
|
||||
* CPU ID for master running u8500 kernel.
|
||||
* Hswpinlocks should only be used to synchonise operations
|
||||
* between the Cortex A9 core and the other CPUs. Hence
|
||||
* forcing the masterID to a preset value.
|
||||
*/
|
||||
#define HSEM_MASTER_ID 0x01
|
||||
|
||||
#define HSEM_REGISTER_OFFSET 0x08
|
||||
|
||||
#define HSEM_CTRL_REG 0x00
|
||||
#define HSEM_ICRALL 0x90
|
||||
#define HSEM_PROTOCOL_1 0x01
|
||||
|
||||
static int u8500_hsem_trylock(struct hwspinlock *lock)
|
||||
{
|
||||
void __iomem *lock_addr = lock->priv;
|
||||
|
||||
writel(HSEM_MASTER_ID, lock_addr);
|
||||
|
||||
/* get only first 4 bit and compare to masterID.
|
||||
* if equal, we have the semaphore, otherwise
|
||||
* someone else has it.
|
||||
*/
|
||||
return (HSEM_MASTER_ID == (0x0F & readl(lock_addr)));
|
||||
}
|
||||
|
||||
static void u8500_hsem_unlock(struct hwspinlock *lock)
|
||||
{
|
||||
void __iomem *lock_addr = lock->priv;
|
||||
|
||||
/* release the lock by writing 0 to it */
|
||||
writel(RESET_SEMAPHORE, lock_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* u8500: what value is recommended here ?
|
||||
*/
|
||||
static void u8500_hsem_relax(struct hwspinlock *lock)
|
||||
{
|
||||
ndelay(50);
|
||||
}
|
||||
|
||||
static const struct hwspinlock_ops u8500_hwspinlock_ops = {
|
||||
.trylock = u8500_hsem_trylock,
|
||||
.unlock = u8500_hsem_unlock,
|
||||
.relax = u8500_hsem_relax,
|
||||
};
|
||||
|
||||
static int __devinit u8500_hsem_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
|
||||
struct hwspinlock_device *bank;
|
||||
struct hwspinlock *hwlock;
|
||||
struct resource *res;
|
||||
void __iomem *io_base;
|
||||
int i, ret, num_locks = U8500_MAX_SEMAPHORE;
|
||||
ulong val;
|
||||
|
||||
if (!pdata)
|
||||
return -ENODEV;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -ENODEV;
|
||||
|
||||
io_base = ioremap(res->start, resource_size(res));
|
||||
if (!io_base) {
|
||||
ret = -ENOMEM;
|
||||
goto free_state;
|
||||
}
|
||||
|
||||
/* make sure protocol 1 is selected */
|
||||
val = readl(io_base + HSEM_CTRL_REG);
|
||||
writel((val & ~HSEM_PROTOCOL_1), io_base + HSEM_CTRL_REG);
|
||||
|
||||
/* clear all interrupts */
|
||||
writel(0xFFFF, io_base + HSEM_ICRALL);
|
||||
|
||||
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
|
||||
if (!bank) {
|
||||
ret = -ENOMEM;
|
||||
goto iounmap_base;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, bank);
|
||||
|
||||
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
|
||||
hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i;
|
||||
|
||||
/* no pm needed for HSem but required to comply with hwspilock core */
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops,
|
||||
pdata->base_id, num_locks);
|
||||
if (ret)
|
||||
goto reg_fail;
|
||||
|
||||
return 0;
|
||||
|
||||
reg_fail:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
kfree(bank);
|
||||
iounmap_base:
|
||||
iounmap(io_base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __devexit u8500_hsem_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
|
||||
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
|
||||
int ret;
|
||||
|
||||
/* clear all interrupts */
|
||||
writel(0xFFFF, io_base + HSEM_ICRALL);
|
||||
|
||||
ret = hwspin_lock_unregister(bank);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
iounmap(io_base);
|
||||
kfree(bank);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver u8500_hsem_driver = {
|
||||
.probe = u8500_hsem_probe,
|
||||
.remove = __devexit_p(u8500_hsem_remove),
|
||||
.driver = {
|
||||
.name = "u8500_hsem",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init u8500_hsem_init(void)
|
||||
{
|
||||
return platform_driver_register(&u8500_hsem_driver);
|
||||
}
|
||||
/* board init code might need to reserve hwspinlocks for predefined purposes */
|
||||
postcore_initcall(u8500_hsem_init);
|
||||
|
||||
static void __exit u8500_hsem_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&u8500_hsem_driver);
|
||||
}
|
||||
module_exit(u8500_hsem_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Hardware Spinlock driver for u8500");
|
||||
MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
|
@ -1270,7 +1270,7 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
|
||||
}
|
||||
} else {
|
||||
if (!host->protect_card) {
|
||||
pr_info"%s: cover is open, "
|
||||
pr_info("%s: cover is open, "
|
||||
"card is now inaccessible\n",
|
||||
mmc_hostname(host->mmc));
|
||||
host->protect_card = 1;
|
||||
|
@ -20,17 +20,49 @@
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/* hwspinlock mode argument */
|
||||
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
||||
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
||||
|
||||
struct hwspinlock;
|
||||
struct hwspinlock_device;
|
||||
struct hwspinlock_ops;
|
||||
|
||||
/**
|
||||
* struct hwspinlock_pdata - platform data for hwspinlock drivers
|
||||
* @base_id: base id for this hwspinlock device
|
||||
*
|
||||
* hwspinlock devices provide system-wide hardware locks that are used
|
||||
* by remote processors that have no other way to achieve synchronization.
|
||||
*
|
||||
* To achieve that, each physical lock must have a system-wide id number
|
||||
* that is agreed upon, otherwise remote processors can't possibly assume
|
||||
* they're using the same hardware lock.
|
||||
*
|
||||
* Usually boards have a single hwspinlock device, which provides several
|
||||
* hwspinlocks, and in this case, they can be trivially numbered 0 to
|
||||
* (num-of-locks - 1).
|
||||
*
|
||||
* In case boards have several hwspinlocks devices, a different base id
|
||||
* should be used for each hwspinlock device (they can't all use 0 as
|
||||
* a starting id!).
|
||||
*
|
||||
* This platform data structure should be used to provide the base id
|
||||
* for each device (which is trivially 0 when only a single hwspinlock
|
||||
* device exists). It can be shared between different platforms, hence
|
||||
* its location.
|
||||
*/
|
||||
struct hwspinlock_pdata {
|
||||
int base_id;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
|
||||
|
||||
int hwspin_lock_register(struct hwspinlock *lock);
|
||||
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
|
||||
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
||||
const struct hwspinlock_ops *ops, int base_id, int num_locks);
|
||||
int hwspin_lock_unregister(struct hwspinlock_device *bank);
|
||||
struct hwspinlock *hwspin_lock_request(void);
|
||||
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
||||
int hwspin_lock_free(struct hwspinlock *hwlock);
|
||||
@ -94,16 +126,6 @@ static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hwspin_lock_register(struct hwspinlock *hwlock)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_HWSPINLOCK */
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user