forked from Minki/linux
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: lockdep: Check the depth of subclass lockdep: Add improved subclass caching affs: Use sema_init instead of init_MUTEX hfs: Convert tree_lock to mutex arm: Bcmring: semaphore cleanup printk: Make console_sem a semaphore not a pseudo mutex drivers/macintosh/adb: Do not claim that the semaphore is a mutex parport: Semaphore cleanup irda: Semaphore cleanup net: Wan/cosa.c: Convert "mutex" to semaphore net: Ppp_async: semaphore cleanup hamradio: Mkiss: semaphore cleanup hamradio: 6pack: semaphore cleanup net: 3c527: semaphore cleanup input: Serio/hp_sdc: semaphore cleanup input: Serio/hil_mlc: semaphore cleanup input: Misc/hp_sdc_rtc: semaphore cleanup lockup_detector: Make callback function static lockup detector: Fix grammar by adding a missing "to" in the comments lockdep: Remove __debug_show_held_locks
This commit is contained in:
commit
31b7eab27a
@ -691,7 +691,7 @@ int dma_init(void)
|
|||||||
|
|
||||||
memset(&gDMA, 0, sizeof(gDMA));
|
memset(&gDMA, 0, sizeof(gDMA));
|
||||||
|
|
||||||
init_MUTEX_LOCKED(&gDMA.lock);
|
sema_init(&gDMA.lock, 0);
|
||||||
init_waitqueue_head(&gDMA.freeChannelQ);
|
init_waitqueue_head(&gDMA.freeChannelQ);
|
||||||
|
|
||||||
/* Initialize the Hardware */
|
/* Initialize the Hardware */
|
||||||
@ -1574,7 +1574,7 @@ int dma_init_mem_map(DMA_MemMap_t *memMap)
|
|||||||
{
|
{
|
||||||
memset(memMap, 0, sizeof(*memMap));
|
memset(memMap, 0, sizeof(*memMap));
|
||||||
|
|
||||||
init_MUTEX(&memMap->lock);
|
sema_init(&memMap->lock, 1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,7 @@ static int hp_sdc_rtc_do_read_bbrtc (struct rtc_time *rtctm)
|
|||||||
t.endidx = 91;
|
t.endidx = 91;
|
||||||
t.seq = tseq;
|
t.seq = tseq;
|
||||||
t.act.semaphore = &tsem;
|
t.act.semaphore = &tsem;
|
||||||
init_MUTEX_LOCKED(&tsem);
|
sema_init(&tsem, 0);
|
||||||
|
|
||||||
if (hp_sdc_enqueue_transaction(&t)) return -1;
|
if (hp_sdc_enqueue_transaction(&t)) return -1;
|
||||||
|
|
||||||
@ -698,7 +698,7 @@ static int __init hp_sdc_rtc_init(void)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
init_MUTEX(&i8042tregs);
|
sema_init(&i8042tregs, 1);
|
||||||
|
|
||||||
if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
|
if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -915,15 +915,15 @@ int hil_mlc_register(hil_mlc *mlc)
|
|||||||
mlc->ostarted = 0;
|
mlc->ostarted = 0;
|
||||||
|
|
||||||
rwlock_init(&mlc->lock);
|
rwlock_init(&mlc->lock);
|
||||||
init_MUTEX(&mlc->osem);
|
sema_init(&mlc->osem, 1);
|
||||||
|
|
||||||
init_MUTEX(&mlc->isem);
|
sema_init(&mlc->isem, 1);
|
||||||
mlc->icount = -1;
|
mlc->icount = -1;
|
||||||
mlc->imatch = 0;
|
mlc->imatch = 0;
|
||||||
|
|
||||||
mlc->opercnt = 0;
|
mlc->opercnt = 0;
|
||||||
|
|
||||||
init_MUTEX_LOCKED(&(mlc->csem));
|
sema_init(&(mlc->csem), 0);
|
||||||
|
|
||||||
hil_mlc_clear_di_scratch(mlc);
|
hil_mlc_clear_di_scratch(mlc);
|
||||||
hil_mlc_clear_di_map(mlc, 0);
|
hil_mlc_clear_di_map(mlc, 0);
|
||||||
|
@ -905,7 +905,7 @@ static int __init hp_sdc_init(void)
|
|||||||
ts_sync[1] = 0x0f;
|
ts_sync[1] = 0x0f;
|
||||||
ts_sync[2] = ts_sync[3] = ts_sync[4] = ts_sync[5] = 0;
|
ts_sync[2] = ts_sync[3] = ts_sync[4] = ts_sync[5] = 0;
|
||||||
t_sync.act.semaphore = &s_sync;
|
t_sync.act.semaphore = &s_sync;
|
||||||
init_MUTEX_LOCKED(&s_sync);
|
sema_init(&s_sync, 0);
|
||||||
hp_sdc_enqueue_transaction(&t_sync);
|
hp_sdc_enqueue_transaction(&t_sync);
|
||||||
down(&s_sync); /* Wait for t_sync to complete */
|
down(&s_sync); /* Wait for t_sync to complete */
|
||||||
|
|
||||||
@ -1039,7 +1039,7 @@ static int __init hp_sdc_register(void)
|
|||||||
return hp_sdc.dev_err;
|
return hp_sdc.dev_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_MUTEX_LOCKED(&tq_init_sem);
|
sema_init(&tq_init_sem, 0);
|
||||||
|
|
||||||
tq_init.actidx = 0;
|
tq_init.actidx = 0;
|
||||||
tq_init.idx = 1;
|
tq_init.idx = 1;
|
||||||
|
@ -83,7 +83,7 @@ static struct adb_driver *adb_controller;
|
|||||||
BLOCKING_NOTIFIER_HEAD(adb_client_list);
|
BLOCKING_NOTIFIER_HEAD(adb_client_list);
|
||||||
static int adb_got_sleep;
|
static int adb_got_sleep;
|
||||||
static int adb_inited;
|
static int adb_inited;
|
||||||
static DECLARE_MUTEX(adb_probe_mutex);
|
static DEFINE_SEMAPHORE(adb_probe_mutex);
|
||||||
static int sleepy_trackpad;
|
static int sleepy_trackpad;
|
||||||
static int autopoll_devs;
|
static int autopoll_devs;
|
||||||
int __adb_probe_sync;
|
int __adb_probe_sync;
|
||||||
|
@ -522,7 +522,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
|
|||||||
lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
|
lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
|
||||||
lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
|
lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
|
||||||
|
|
||||||
init_MUTEX_LOCKED(&lp->cmd_mutex);
|
sema_init(&lp->cmd_mutex, 0);
|
||||||
init_completion(&lp->execution_cmd);
|
init_completion(&lp->execution_cmd);
|
||||||
init_completion(&lp->xceiver_cmd);
|
init_completion(&lp->xceiver_cmd);
|
||||||
|
|
||||||
|
@ -608,7 +608,7 @@ static int sixpack_open(struct tty_struct *tty)
|
|||||||
|
|
||||||
spin_lock_init(&sp->lock);
|
spin_lock_init(&sp->lock);
|
||||||
atomic_set(&sp->refcnt, 1);
|
atomic_set(&sp->refcnt, 1);
|
||||||
init_MUTEX_LOCKED(&sp->dead_sem);
|
sema_init(&sp->dead_sem, 0);
|
||||||
|
|
||||||
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
|
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
|
||||||
|
|
||||||
|
@ -747,7 +747,7 @@ static int mkiss_open(struct tty_struct *tty)
|
|||||||
|
|
||||||
spin_lock_init(&ax->buflock);
|
spin_lock_init(&ax->buflock);
|
||||||
atomic_set(&ax->refcnt, 1);
|
atomic_set(&ax->refcnt, 1);
|
||||||
init_MUTEX_LOCKED(&ax->dead_sem);
|
sema_init(&ax->dead_sem, 0);
|
||||||
|
|
||||||
ax->tty = tty;
|
ax->tty = tty;
|
||||||
tty->disc_data = ax;
|
tty->disc_data = ax;
|
||||||
|
@ -909,7 +909,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
|
|||||||
dev->tx_skb = NULL;
|
dev->tx_skb = NULL;
|
||||||
|
|
||||||
spin_lock_init(&dev->tx_lock);
|
spin_lock_init(&dev->tx_lock);
|
||||||
init_MUTEX(&dev->fsm.sem);
|
sema_init(&dev->fsm.sem, 1);
|
||||||
|
|
||||||
dev->drv = drv;
|
dev->drv = drv;
|
||||||
dev->netdev = ndev;
|
dev->netdev = ndev;
|
||||||
|
@ -184,7 +184,7 @@ ppp_asynctty_open(struct tty_struct *tty)
|
|||||||
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
|
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
|
||||||
|
|
||||||
atomic_set(&ap->refcnt, 1);
|
atomic_set(&ap->refcnt, 1);
|
||||||
init_MUTEX_LOCKED(&ap->dead_sem);
|
sema_init(&ap->dead_sem, 0);
|
||||||
|
|
||||||
ap->chan.private = ap;
|
ap->chan.private = ap;
|
||||||
ap->chan.ops = &async_ops;
|
ap->chan.ops = &async_ops;
|
||||||
|
@ -575,7 +575,7 @@ static int cosa_probe(int base, int irq, int dma)
|
|||||||
|
|
||||||
/* Initialize the chardev data structures */
|
/* Initialize the chardev data structures */
|
||||||
mutex_init(&chan->rlock);
|
mutex_init(&chan->rlock);
|
||||||
init_MUTEX(&chan->wsem);
|
sema_init(&chan->wsem, 1);
|
||||||
|
|
||||||
/* Register the network interface */
|
/* Register the network interface */
|
||||||
if (!(chan->netdev = alloc_hdlcdev(chan))) {
|
if (!(chan->netdev = alloc_hdlcdev(chan))) {
|
||||||
|
@ -306,7 +306,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
|
|||||||
spin_lock_init(&tmp->pardevice_lock);
|
spin_lock_init(&tmp->pardevice_lock);
|
||||||
tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
|
tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
|
||||||
tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
|
tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
|
||||||
init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
|
sema_init(&tmp->ieee1284.irq, 0);
|
||||||
tmp->spintime = parport_default_spintime;
|
tmp->spintime = parport_default_spintime;
|
||||||
atomic_set (&tmp->ref_count, 1);
|
atomic_set (&tmp->ref_count, 1);
|
||||||
INIT_LIST_HEAD(&tmp->full_list);
|
INIT_LIST_HEAD(&tmp->full_list);
|
||||||
|
@ -109,8 +109,8 @@ static void init_once(void *foo)
|
|||||||
{
|
{
|
||||||
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
|
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
|
||||||
|
|
||||||
init_MUTEX(&ei->i_link_lock);
|
sema_init(&ei->i_link_lock, 1);
|
||||||
init_MUTEX(&ei->i_ext_lock);
|
sema_init(&ei->i_ext_lock, 1);
|
||||||
inode_init_once(&ei->vfs_inode);
|
inode_init_once(&ei->vfs_inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
|
|||||||
fd->search_key = ptr;
|
fd->search_key = ptr;
|
||||||
fd->key = ptr + tree->max_key_len + 2;
|
fd->key = ptr + tree->max_key_len + 2;
|
||||||
dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
|
dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
|
||||||
down(&tree->tree_lock);
|
mutex_lock(&tree->tree_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
|
|||||||
hfs_bnode_put(fd->bnode);
|
hfs_bnode_put(fd->bnode);
|
||||||
kfree(fd->search_key);
|
kfree(fd->search_key);
|
||||||
dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
|
dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
|
||||||
up(&fd->tree->tree_lock);
|
mutex_unlock(&fd->tree->tree_lock);
|
||||||
fd->tree = NULL;
|
fd->tree = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
|
|||||||
if (!tree)
|
if (!tree)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
init_MUTEX(&tree->tree_lock);
|
mutex_init(&tree->tree_lock);
|
||||||
spin_lock_init(&tree->hash_lock);
|
spin_lock_init(&tree->hash_lock);
|
||||||
/* Set the correct compare function */
|
/* Set the correct compare function */
|
||||||
tree->sb = sb;
|
tree->sb = sb;
|
||||||
|
@ -33,7 +33,7 @@ struct hfs_btree {
|
|||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
|
|
||||||
//unsigned int map1_size, map_size;
|
//unsigned int map1_size, map_size;
|
||||||
struct semaphore tree_lock;
|
struct mutex tree_lock;
|
||||||
|
|
||||||
unsigned int pages_per_bnode;
|
unsigned int pages_per_bnode;
|
||||||
spinlock_t hash_lock;
|
spinlock_t hash_lock;
|
||||||
|
@ -49,7 +49,6 @@ struct task_struct;
|
|||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
extern void debug_show_all_locks(void);
|
extern void debug_show_all_locks(void);
|
||||||
extern void __debug_show_held_locks(struct task_struct *task);
|
|
||||||
extern void debug_show_held_locks(struct task_struct *task);
|
extern void debug_show_held_locks(struct task_struct *task);
|
||||||
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
|
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
|
||||||
extern void debug_check_no_locks_held(struct task_struct *task);
|
extern void debug_check_no_locks_held(struct task_struct *task);
|
||||||
@ -58,10 +57,6 @@ static inline void debug_show_all_locks(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __debug_show_held_locks(struct task_struct *task)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void debug_show_held_locks(struct task_struct *task)
|
static inline void debug_show_held_locks(struct task_struct *task)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,17 @@ extern int lock_stat;
|
|||||||
|
|
||||||
#define MAX_LOCKDEP_SUBCLASSES 8UL
|
#define MAX_LOCKDEP_SUBCLASSES 8UL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
|
||||||
|
* cached in the instance of lockdep_map
|
||||||
|
*
|
||||||
|
* Currently main class (subclass == 0) and signle depth subclass
|
||||||
|
* are cached in lockdep_map. This optimization is mainly targeting
|
||||||
|
* on rq->lock. double_rq_lock() acquires this highly competitive with
|
||||||
|
* single depth.
|
||||||
|
*/
|
||||||
|
#define NR_LOCKDEP_CACHING_CLASSES 2
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock-classes are keyed via unique addresses, by embedding the
|
* Lock-classes are keyed via unique addresses, by embedding the
|
||||||
* lockclass-key into the kernel (or module) .data section. (For
|
* lockclass-key into the kernel (or module) .data section. (For
|
||||||
@ -138,7 +149,7 @@ void clear_lock_stats(struct lock_class *class);
|
|||||||
*/
|
*/
|
||||||
struct lockdep_map {
|
struct lockdep_map {
|
||||||
struct lock_class_key *key;
|
struct lock_class_key *key;
|
||||||
struct lock_class *class_cache;
|
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
|
||||||
const char *name;
|
const char *name;
|
||||||
#ifdef CONFIG_LOCK_STAT
|
#ifdef CONFIG_LOCK_STAT
|
||||||
int cpu;
|
int cpu;
|
||||||
|
@ -98,7 +98,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
|||||||
printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
|
printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
|
||||||
" disables this message.\n");
|
" disables this message.\n");
|
||||||
sched_show_task(t);
|
sched_show_task(t);
|
||||||
__debug_show_held_locks(t);
|
debug_show_held_locks(t);
|
||||||
|
|
||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
|||||||
* periodically exit the critical section and enter a new one.
|
* periodically exit the critical section and enter a new one.
|
||||||
*
|
*
|
||||||
* For preemptible RCU it is sufficient to call rcu_read_unlock in order
|
* For preemptible RCU it is sufficient to call rcu_read_unlock in order
|
||||||
* exit the grace period. For classic RCU, a reschedule is required.
|
* to exit the grace period. For classic RCU, a reschedule is required.
|
||||||
*/
|
*/
|
||||||
static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
|
static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
|
||||||
{
|
{
|
||||||
|
@ -639,6 +639,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
||||||
|
debug_locks_off();
|
||||||
|
printk(KERN_ERR
|
||||||
|
"BUG: looking up invalid subclass: %u\n", subclass);
|
||||||
|
printk(KERN_ERR
|
||||||
|
"turning off the locking correctness validator.\n");
|
||||||
|
dump_stack();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Static locks do not have their class-keys yet - for them the key
|
* Static locks do not have their class-keys yet - for them the key
|
||||||
* is the lock object itself:
|
* is the lock object itself:
|
||||||
@ -774,7 +784,9 @@ out_unlock_set:
|
|||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
|
|
||||||
if (!subclass || force)
|
if (!subclass || force)
|
||||||
lock->class_cache = class;
|
lock->class_cache[0] = class;
|
||||||
|
else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
||||||
|
lock->class_cache[subclass] = class;
|
||||||
|
|
||||||
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
|
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -2679,7 +2691,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
|||||||
void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||||
struct lock_class_key *key, int subclass)
|
struct lock_class_key *key, int subclass)
|
||||||
{
|
{
|
||||||
lock->class_cache = NULL;
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
|
||||||
|
lock->class_cache[i] = NULL;
|
||||||
|
|
||||||
#ifdef CONFIG_LOCK_STAT
|
#ifdef CONFIG_LOCK_STAT
|
||||||
lock->cpu = raw_smp_processor_id();
|
lock->cpu = raw_smp_processor_id();
|
||||||
#endif
|
#endif
|
||||||
@ -2739,21 +2755,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
|
||||||
debug_locks_off();
|
|
||||||
printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
|
|
||||||
printk("turning off the locking correctness validator.\n");
|
|
||||||
dump_stack();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (lock->key == &__lockdep_no_validate__)
|
if (lock->key == &__lockdep_no_validate__)
|
||||||
check = 1;
|
check = 1;
|
||||||
|
|
||||||
if (!subclass)
|
if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
||||||
class = lock->class_cache;
|
class = lock->class_cache[subclass];
|
||||||
/*
|
/*
|
||||||
* Not cached yet or subclass?
|
* Not cached?
|
||||||
*/
|
*/
|
||||||
if (unlikely(!class)) {
|
if (unlikely(!class)) {
|
||||||
class = register_lock_class(lock, subclass, 0);
|
class = register_lock_class(lock, subclass, 0);
|
||||||
@ -2918,7 +2926,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (hlock->references) {
|
if (hlock->references) {
|
||||||
struct lock_class *class = lock->class_cache;
|
struct lock_class *class = lock->class_cache[0];
|
||||||
|
|
||||||
if (!class)
|
if (!class)
|
||||||
class = look_up_lock_class(lock, 0);
|
class = look_up_lock_class(lock, 0);
|
||||||
@ -3559,7 +3567,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
|
|||||||
if (list_empty(head))
|
if (list_empty(head))
|
||||||
continue;
|
continue;
|
||||||
list_for_each_entry_safe(class, next, head, hash_entry) {
|
list_for_each_entry_safe(class, next, head, hash_entry) {
|
||||||
if (unlikely(class == lock->class_cache)) {
|
int match = 0;
|
||||||
|
|
||||||
|
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
|
||||||
|
match |= class == lock->class_cache[j];
|
||||||
|
|
||||||
|
if (unlikely(match)) {
|
||||||
if (debug_locks_off_graph_unlock())
|
if (debug_locks_off_graph_unlock())
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
goto out_restore;
|
goto out_restore;
|
||||||
@ -3775,7 +3788,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
|
|||||||
* Careful: only use this function if you are sure that
|
* Careful: only use this function if you are sure that
|
||||||
* the task cannot run in parallel!
|
* the task cannot run in parallel!
|
||||||
*/
|
*/
|
||||||
void __debug_show_held_locks(struct task_struct *task)
|
void debug_show_held_locks(struct task_struct *task)
|
||||||
{
|
{
|
||||||
if (unlikely(!debug_locks)) {
|
if (unlikely(!debug_locks)) {
|
||||||
printk("INFO: lockdep is turned off.\n");
|
printk("INFO: lockdep is turned off.\n");
|
||||||
@ -3783,12 +3796,6 @@ void __debug_show_held_locks(struct task_struct *task)
|
|||||||
}
|
}
|
||||||
lockdep_print_held_locks(task);
|
lockdep_print_held_locks(task);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__debug_show_held_locks);
|
|
||||||
|
|
||||||
void debug_show_held_locks(struct task_struct *task)
|
|
||||||
{
|
|
||||||
__debug_show_held_locks(task);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(debug_show_held_locks);
|
EXPORT_SYMBOL_GPL(debug_show_held_locks);
|
||||||
|
|
||||||
void lockdep_sys_exit(void)
|
void lockdep_sys_exit(void)
|
||||||
|
@ -85,7 +85,7 @@ EXPORT_SYMBOL(oops_in_progress);
|
|||||||
* provides serialisation for access to the entire console
|
* provides serialisation for access to the entire console
|
||||||
* driver system.
|
* driver system.
|
||||||
*/
|
*/
|
||||||
static DECLARE_MUTEX(console_sem);
|
static DEFINE_SEMAPHORE(console_sem);
|
||||||
struct console *console_drivers;
|
struct console *console_drivers;
|
||||||
EXPORT_SYMBOL_GPL(console_drivers);
|
EXPORT_SYMBOL_GPL(console_drivers);
|
||||||
|
|
||||||
@ -556,7 +556,7 @@ static void zap_locks(void)
|
|||||||
/* If a crash is occurring, make sure we can't deadlock */
|
/* If a crash is occurring, make sure we can't deadlock */
|
||||||
spin_lock_init(&logbuf_lock);
|
spin_lock_init(&logbuf_lock);
|
||||||
/* And make sure that we print immediately */
|
/* And make sure that we print immediately */
|
||||||
init_MUTEX(&console_sem);
|
sema_init(&console_sem, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_PRINTK_TIME)
|
#if defined(CONFIG_PRINTK_TIME)
|
||||||
|
@ -209,7 +209,7 @@ static struct perf_event_attr wd_hw_attr = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Callback function for perf event subsystem */
|
/* Callback function for perf event subsystem */
|
||||||
void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||||
struct perf_sample_data *data,
|
struct perf_sample_data *data,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user