mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 16:11:38 +00:00
ce91f6ee5b
kvmalloc warned about incompatible gfp_mask to catch abusers (mostly GFP_NOFS) with an intention that this will motivate authors of the code to fix those. Linus argues that this just motivates people to do even more hacks like if (gfp == GFP_KERNEL) kvmalloc else kmalloc I haven't seen this happening much (Linus pointed to bucket_lock special cases an atomic allocation but my git foo hasn't found much more) but it is true that we can grow those in future. Therefore Linus suggested to simply not fallback to vmalloc for incompatible gfp flags and rather stick with the kmalloc path. Link: http://lkml.kernel.org/r/20180601115329.27807-1-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Tom Herbert <tom@quantonium.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
52 lines
1.3 KiB
C
52 lines
1.3 KiB
C
#include <linux/export.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
|
|
* indicate the number of elements to allocate in the array. max_size
|
|
* gives the maximum number of elements to allocate. cpu_mult gives
|
|
* the number of locks per CPU to allocate. The size is rounded up
|
|
* to a power of 2 to be suitable as a hash table.
|
|
*/
|
|
|
|
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
|
|
size_t max_size, unsigned int cpu_mult, gfp_t gfp)
|
|
{
|
|
spinlock_t *tlocks = NULL;
|
|
unsigned int i, size;
|
|
#if defined(CONFIG_PROVE_LOCKING)
|
|
unsigned int nr_pcpus = 2;
|
|
#else
|
|
unsigned int nr_pcpus = num_possible_cpus();
|
|
#endif
|
|
|
|
if (cpu_mult) {
|
|
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
|
|
size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
|
|
} else {
|
|
size = max_size;
|
|
}
|
|
|
|
if (sizeof(spinlock_t) != 0) {
|
|
tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
|
|
if (!tlocks)
|
|
return -ENOMEM;
|
|
for (i = 0; i < size; i++)
|
|
spin_lock_init(&tlocks[i]);
|
|
}
|
|
|
|
*locks = tlocks;
|
|
*locks_mask = size - 1;
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(alloc_bucket_spinlocks);
|
|
|
|
void free_bucket_spinlocks(spinlock_t *locks)
|
|
{
|
|
kvfree(locks);
|
|
}
|
|
EXPORT_SYMBOL(free_bucket_spinlocks);
|