forked from Minki/linux
mm, slub: make slab_lock() disable irqs with PREEMPT_RT
We need to disable irqs around slab_lock() (a bit spinlock) to make it irq-safe. Most calls to slab_lock() are nested under spin_lock_irqsave() which doesn't disable irqs on PREEMPT_RT, so add explicit disabling with PREEMPT_RT. The exception is cmpxchg_double_slab() which already disables irqs, so use a __slab_[un]lock() variant without irq disable there. slab_[un]lock() thus needs a flags pointer parameter, which is unused on !RT. free_debug_processing() now has two flags variables, which looks odd, but only one is actually used - the one used in spin_lock_irqsave() on !RT and the one used in slab_lock() on RT. As a result, __cmpxchg_double_slab() and cmpxchg_double_slab() become effectively identical on RT, as both will disable irqs, which is necessary on RT as most callers of this function also rely on irqsaving lock operations. Thus, assert that irqs are already disabled in __cmpxchg_double_slab() only on !RT and also change the VM_BUG_ON assertion to the more standard lockdep_assert one. Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
94ef0304e2
commit
a2b4ae8bfd
58
mm/slub.c
58
mm/slub.c
@ -359,25 +359,44 @@ static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
|
||||
/*
|
||||
* Per slab locking using the pagelock
|
||||
*/
|
||||
static __always_inline void slab_lock(struct page *page)
|
||||
static __always_inline void __slab_lock(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
bit_spin_lock(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
static __always_inline void slab_unlock(struct page *page)
|
||||
static __always_inline void __slab_unlock(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
__bit_spin_unlock(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
/* Interrupts must be disabled (for the fallback code to work right) */
|
||||
static __always_inline void slab_lock(struct page *page, unsigned long *flags)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_save(*flags);
|
||||
__slab_lock(page);
|
||||
}
|
||||
|
||||
static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
|
||||
{
|
||||
__slab_unlock(page);
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts must be disabled (for the fallback code to work right), typically
|
||||
* by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
|
||||
* so we disable interrupts as part of slab_[un]lock().
|
||||
*/
|
||||
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
|
||||
void *freelist_old, unsigned long counters_old,
|
||||
void *freelist_new, unsigned long counters_new,
|
||||
const char *n)
|
||||
{
|
||||
VM_BUG_ON(!irqs_disabled());
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
lockdep_assert_irqs_disabled();
|
||||
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
|
||||
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
|
||||
if (s->flags & __CMPXCHG_DOUBLE) {
|
||||
@ -388,15 +407,18 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
slab_lock(page);
|
||||
/* init to 0 to prevent spurious warnings */
|
||||
unsigned long flags = 0;
|
||||
|
||||
slab_lock(page, &flags);
|
||||
if (page->freelist == freelist_old &&
|
||||
page->counters == counters_old) {
|
||||
page->freelist = freelist_new;
|
||||
page->counters = counters_new;
|
||||
slab_unlock(page);
|
||||
slab_unlock(page, &flags);
|
||||
return true;
|
||||
}
|
||||
slab_unlock(page);
|
||||
slab_unlock(page, &flags);
|
||||
}
|
||||
|
||||
cpu_relax();
|
||||
@ -427,16 +449,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
slab_lock(page);
|
||||
__slab_lock(page);
|
||||
if (page->freelist == freelist_old &&
|
||||
page->counters == counters_old) {
|
||||
page->freelist = freelist_new;
|
||||
page->counters = counters_new;
|
||||
slab_unlock(page);
|
||||
__slab_unlock(page);
|
||||
local_irq_restore(flags);
|
||||
return true;
|
||||
}
|
||||
slab_unlock(page);
|
||||
__slab_unlock(page);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -1269,11 +1291,11 @@ static noinline int free_debug_processing(
|
||||
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
||||
void *object = head;
|
||||
int cnt = 0;
|
||||
unsigned long flags;
|
||||
unsigned long flags, flags2;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&n->list_lock, flags);
|
||||
slab_lock(page);
|
||||
slab_lock(page, &flags2);
|
||||
|
||||
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
|
||||
if (!check_slab(s, page))
|
||||
@ -1306,7 +1328,7 @@ out:
|
||||
slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
|
||||
bulk_cnt, cnt);
|
||||
|
||||
slab_unlock(page);
|
||||
slab_unlock(page, &flags2);
|
||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||
if (!ret)
|
||||
slab_fix(s, "Object at 0x%p not freed", object);
|
||||
@ -4087,11 +4109,12 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||
{
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
void *addr = page_address(page);
|
||||
unsigned long flags;
|
||||
unsigned long *map;
|
||||
void *p;
|
||||
|
||||
slab_err(s, page, text, s->name);
|
||||
slab_lock(page);
|
||||
slab_lock(page, &flags);
|
||||
|
||||
map = get_map(s, page);
|
||||
for_each_object(p, s, addr, page->objects) {
|
||||
@ -4102,7 +4125,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||
}
|
||||
}
|
||||
put_map(map);
|
||||
slab_unlock(page);
|
||||
slab_unlock(page, &flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -4834,8 +4857,9 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
|
||||
{
|
||||
void *p;
|
||||
void *addr = page_address(page);
|
||||
unsigned long flags;
|
||||
|
||||
slab_lock(page);
|
||||
slab_lock(page, &flags);
|
||||
|
||||
if (!check_slab(s, page) || !on_freelist(s, page, NULL))
|
||||
goto unlock;
|
||||
@ -4850,7 +4874,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
|
||||
break;
|
||||
}
|
||||
unlock:
|
||||
slab_unlock(page);
|
||||
slab_unlock(page, &flags);
|
||||
}
|
||||
|
||||
static int validate_slab_node(struct kmem_cache *s,
|
||||
|
Loading…
Reference in New Issue
Block a user