mirror of
https://github.com/torvalds/linux.git
synced 2024-12-18 00:53:40 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slab: document SLAB_DESTROY_BY_RCU Kconfig: SLUB is the default slab allocator
This commit is contained in:
commit
2d1595ad87
@ -23,6 +23,34 @@
|
||||
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
|
||||
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
|
||||
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
|
||||
/*
|
||||
* SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
|
||||
*
|
||||
* This delays freeing the SLAB page by a grace period, it does _NOT_
|
||||
* delay object freeing. This means that if you do kmem_cache_free()
|
||||
* that memory location is free to be reused at any time. Thus it may
|
||||
* be possible to see another object there in the same RCU grace period.
|
||||
*
|
||||
* This feature only ensures the memory location backing the object
|
||||
* stays valid, the trick to using this is relying on an independent
|
||||
* object validation pass. Something like:
|
||||
*
|
||||
* rcu_read_lock()
|
||||
* again:
|
||||
* obj = lockless_lookup(key);
|
||||
* if (obj) {
|
||||
* if (!try_get_ref(obj)) // might fail for free objects
|
||||
* goto again;
|
||||
*
|
||||
* if (obj->key != key) { // not the object we expected
|
||||
* put_ref(obj);
|
||||
* goto again;
|
||||
* }
|
||||
* }
|
||||
* rcu_read_unlock();
|
||||
*
|
||||
* See also the comment on struct slab_rcu in mm/slab.c.
|
||||
*/
|
||||
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
|
||||
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
|
||||
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
|
||||
|
@ -771,8 +771,7 @@ config SLAB
|
||||
help
|
||||
The regular slab allocator that is established and known to work
|
||||
well in all environments. It organizes cache hot objects in
|
||||
per cpu and per node queues. SLAB is the default choice for
|
||||
a slab allocator.
|
||||
per cpu and per node queues.
|
||||
|
||||
config SLUB
|
||||
bool "SLUB (Unqueued Allocator)"
|
||||
@ -781,7 +780,8 @@ config SLUB
|
||||
instead of managing queues of cached objects (SLAB approach).
|
||||
Per cpu caching is realized using slabs of objects instead
|
||||
of queues of objects. SLUB can use memory efficiently
|
||||
and has enhanced diagnostics.
|
||||
and has enhanced diagnostics. SLUB is the default choice for
|
||||
a slab allocator.
|
||||
|
||||
config SLOB
|
||||
depends on EMBEDDED
|
||||
|
Loading…
Reference in New Issue
Block a user