mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm/slub.c: trivial typo fixes
s/operatios/operations/ s/Mininum/Minimum/ s/mininum/minimum/ ......two different places. Link: https://lkml.kernel.org/r/20210325044940.14516-1-unixbhaskar@gmail.com Signed-off-by: Bhaskar Chowdhury <unixbhaskar@gmail.com> Acked-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1f0723a4c0
commit
dc84207d00
@ -3,7 +3,7 @@
|
||||
* SLUB: A slab allocator that limits cache line use instead of queuing
|
||||
* objects in per cpu and per node lists.
|
||||
*
|
||||
* The allocator synchronizes using per slab locks or atomic operatios
|
||||
* The allocator synchronizes using per slab locks or atomic operations
|
||||
* and only uses a centralized lock to manage a pool of partial slabs.
|
||||
*
|
||||
* (C) 2007 SGI, Christoph Lameter
|
||||
@ -160,7 +160,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
|
||||
#undef SLUB_DEBUG_CMPXCHG
|
||||
|
||||
/*
|
||||
* Mininum number of partial slabs. These will be left on the partial
|
||||
* Minimum number of partial slabs. These will be left on the partial
|
||||
* lists even if they are empty. kmem_cache_shrink may reclaim them.
|
||||
*/
|
||||
#define MIN_PARTIAL 5
|
||||
@ -833,7 +833,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
|
||||
*
|
||||
* A. Free pointer (if we cannot overwrite object on free)
|
||||
* B. Tracking data for SLAB_STORE_USER
|
||||
* C. Padding to reach required alignment boundary or at mininum
|
||||
* C. Padding to reach required alignment boundary or at minimum
|
||||
* one word if debugging is on to be able to detect writes
|
||||
* before the word boundary.
|
||||
*
|
||||
@ -3422,7 +3422,7 @@ static unsigned int slub_min_objects;
|
||||
*
|
||||
* Higher order allocations also allow the placement of more objects in a
|
||||
* slab and thereby reduce object handling overhead. If the user has
|
||||
* requested a higher mininum order then we start with that one instead of
|
||||
* requested a higher minimum order then we start with that one instead of
|
||||
* the smallest order which will fit the object.
|
||||
*/
|
||||
static inline unsigned int slab_order(unsigned int size,
|
||||
|
Loading…
Reference in New Issue
Block a user