mm/slub: Convert detached_freelist to use a struct slab
This gives us a little bit of extra typesafety as we know that nobody called virt_to_page() instead of virt_to_head_page(). [ vbabka@suse.cz: Use folio as intermediate step when filtering out large kmalloc pages ] Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com>
This commit is contained in:
parent
0b3eb091d5
commit
cc465c3b23
31
mm/slub.c
31
mm/slub.c
@ -3532,7 +3532,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
|||||||
EXPORT_SYMBOL(kmem_cache_free);
|
EXPORT_SYMBOL(kmem_cache_free);
|
||||||
|
|
||||||
struct detached_freelist {
|
struct detached_freelist {
|
||||||
struct page *page;
|
struct slab *slab;
|
||||||
void *tail;
|
void *tail;
|
||||||
void *freelist;
|
void *freelist;
|
||||||
int cnt;
|
int cnt;
|
||||||
@ -3554,8 +3554,8 @@ static inline void free_nonslab_page(struct page *page, void *object)
|
|||||||
/*
|
/*
|
||||||
* This function progressively scans the array with free objects (with
|
* This function progressively scans the array with free objects (with
|
||||||
* a limited look ahead) and extract objects belonging to the same
|
* a limited look ahead) and extract objects belonging to the same
|
||||||
* page. It builds a detached freelist directly within the given
|
* slab. It builds a detached freelist directly within the given
|
||||||
* page/objects. This can happen without any need for
|
* slab/objects. This can happen without any need for
|
||||||
* synchronization, because the objects are owned by running process.
|
* synchronization, because the objects are owned by running process.
|
||||||
* The freelist is build up as a single linked list in the objects.
|
* The freelist is build up as a single linked list in the objects.
|
||||||
* The idea is, that this detached freelist can then be bulk
|
* The idea is, that this detached freelist can then be bulk
|
||||||
@ -3570,10 +3570,11 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||||||
size_t first_skipped_index = 0;
|
size_t first_skipped_index = 0;
|
||||||
int lookahead = 3;
|
int lookahead = 3;
|
||||||
void *object;
|
void *object;
|
||||||
struct page *page;
|
struct folio *folio;
|
||||||
|
struct slab *slab;
|
||||||
|
|
||||||
/* Always re-init detached_freelist */
|
/* Always re-init detached_freelist */
|
||||||
df->page = NULL;
|
df->slab = NULL;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
object = p[--size];
|
object = p[--size];
|
||||||
@ -3583,17 +3584,19 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||||||
if (!object)
|
if (!object)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
page = virt_to_head_page(object);
|
folio = virt_to_folio(object);
|
||||||
if (!s) {
|
if (!s) {
|
||||||
/* Handle kalloc'ed objects */
|
/* Handle kalloc'ed objects */
|
||||||
if (unlikely(!PageSlab(page))) {
|
if (unlikely(!folio_test_slab(folio))) {
|
||||||
free_nonslab_page(page, object);
|
free_nonslab_page(folio_page(folio, 0), object);
|
||||||
p[size] = NULL; /* mark object processed */
|
p[size] = NULL; /* mark object processed */
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
/* Derive kmem_cache from object */
|
/* Derive kmem_cache from object */
|
||||||
df->s = page->slab_cache;
|
slab = folio_slab(folio);
|
||||||
|
df->s = slab->slab_cache;
|
||||||
} else {
|
} else {
|
||||||
|
slab = folio_slab(folio);
|
||||||
df->s = cache_from_obj(s, object); /* Support for memcg */
|
df->s = cache_from_obj(s, object); /* Support for memcg */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3605,7 +3608,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Start new detached freelist */
|
/* Start new detached freelist */
|
||||||
df->page = page;
|
df->slab = slab;
|
||||||
set_freepointer(df->s, object, NULL);
|
set_freepointer(df->s, object, NULL);
|
||||||
df->tail = object;
|
df->tail = object;
|
||||||
df->freelist = object;
|
df->freelist = object;
|
||||||
@ -3617,8 +3620,8 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||||||
if (!object)
|
if (!object)
|
||||||
continue; /* Skip processed objects */
|
continue; /* Skip processed objects */
|
||||||
|
|
||||||
/* df->page is always set at this point */
|
/* df->slab is always set at this point */
|
||||||
if (df->page == virt_to_head_page(object)) {
|
if (df->slab == virt_to_slab(object)) {
|
||||||
/* Opportunity build freelist */
|
/* Opportunity build freelist */
|
||||||
set_freepointer(df->s, object, df->freelist);
|
set_freepointer(df->s, object, df->freelist);
|
||||||
df->freelist = object;
|
df->freelist = object;
|
||||||
@ -3650,10 +3653,10 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
|
|||||||
struct detached_freelist df;
|
struct detached_freelist df;
|
||||||
|
|
||||||
size = build_detached_freelist(s, size, p, &df);
|
size = build_detached_freelist(s, size, p, &df);
|
||||||
if (!df.page)
|
if (!df.slab)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
|
slab_free(df.s, slab_page(df.slab), df.freelist, df.tail, df.cnt, _RET_IP_);
|
||||||
} while (likely(size));
|
} while (likely(size));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_free_bulk);
|
EXPORT_SYMBOL(kmem_cache_free_bulk);
|
||||||
|
Loading…
Reference in New Issue
Block a user