forked from Minki/linux
percpu: kill percpu_alloc() and friends
Impact: kill unused functions percpu_alloc() and its friends never saw much action. It was supposed to replace the cpu-mask unaware __alloc_percpu() but it never happened and in fact __percpu_alloc_mask() itself never really grew proper up/down handling interface either (no exported interface for populate/depopulate). percpu allocation is about to go through major reimplementation and there's no reason to carry this unused interface around. Replace it with __alloc_percpu() and free_percpu(). Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
313e458f81
commit
f2a8205c4e
@ -82,41 +82,10 @@ struct percpu_data {
|
||||
|
||||
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
|
||||
|
||||
extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
|
||||
extern void percpu_free(void *__pdata);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
|
||||
static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
|
||||
{
|
||||
return kzalloc(size, gfp);
|
||||
}
|
||||
|
||||
static inline void percpu_free(void *__pdata)
|
||||
{
|
||||
kfree(__pdata);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define percpu_alloc_mask(size, gfp, mask) \
|
||||
__percpu_alloc_mask((size), (gfp), &(mask))
|
||||
|
||||
#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
|
||||
|
||||
/* (legacy) interface for use without CPU hotplug handling */
|
||||
|
||||
#define __alloc_percpu(size, align) percpu_alloc_mask((size), GFP_KERNEL, \
|
||||
cpu_possible_map)
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
|
||||
__alignof__(type))
|
||||
#define free_percpu(ptr) percpu_free((ptr))
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object dynamically
|
||||
* allocated. Non-atomic access to the current CPU's version should
|
||||
* probably be combined with get_cpu()/put_cpu().
|
||||
* Use this to get to a cpu's version of the per-cpu object
|
||||
* dynamically allocated. Non-atomic access to the current CPU's
|
||||
* version should probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define per_cpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
@ -124,4 +93,32 @@ static inline void percpu_free(void *__pdata)
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
})
|
||||
|
||||
extern void *__alloc_percpu(size_t size, size_t align);
|
||||
extern void free_percpu(void *__pdata);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
|
||||
static inline void *__alloc_percpu(size_t size, size_t align)
|
||||
{
|
||||
/*
|
||||
* Can't easily make larger alignment work with kmalloc. WARN
|
||||
* on it. Larger alignment should only be used for module
|
||||
* percpu sections on SMP for which this path isn't used.
|
||||
*/
|
||||
WARN_ON_ONCE(align > __alignof__(unsigned long long));
|
||||
return kzalloc(size, gfp);
|
||||
}
|
||||
|
||||
static inline void free_percpu(void *p)
|
||||
{
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
|
||||
__alignof__(type))
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
|
||||
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
|
||||
|
||||
/**
|
||||
* percpu_alloc_mask - initial setup of per-cpu data
|
||||
* alloc_percpu - initial setup of per-cpu data
|
||||
* @size: size of per-cpu object
|
||||
* @gfp: may sleep or not etc.
|
||||
* @mask: populate per-data for cpu's selected through mask bits
|
||||
* @align: alignment
|
||||
*
|
||||
* Populating per-cpu data for all online cpu's would be a typical use case,
|
||||
* which is simplified by the percpu_alloc() wrapper.
|
||||
* Per-cpu objects are populated with zeroed buffers.
|
||||
* Allocate dynamic percpu area. Percpu objects are populated with
|
||||
* zeroed buffers.
|
||||
*/
|
||||
void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
|
||||
void *__alloc_percpu(size_t size, size_t align)
|
||||
{
|
||||
/*
|
||||
* We allocate whole cache lines to avoid false sharing
|
||||
*/
|
||||
size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
|
||||
void *pdata = kzalloc(sz, gfp);
|
||||
void *pdata = kzalloc(sz, GFP_KERNEL);
|
||||
void *__pdata = __percpu_disguise(pdata);
|
||||
|
||||
/*
|
||||
* Can't easily make larger alignment work with kmalloc. WARN
|
||||
* on it. Larger alignment should only be used for module
|
||||
* percpu sections on SMP for which this path isn't used.
|
||||
*/
|
||||
WARN_ON_ONCE(align > __alignof__(unsigned long long));
|
||||
|
||||
if (unlikely(!pdata))
|
||||
return NULL;
|
||||
if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
|
||||
if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
|
||||
&cpu_possible_map)))
|
||||
return __pdata;
|
||||
kfree(pdata);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
|
||||
EXPORT_SYMBOL_GPL(__alloc_percpu);
|
||||
|
||||
/**
|
||||
* percpu_free - final cleanup of per-cpu data
|
||||
* free_percpu - final cleanup of per-cpu data
|
||||
* @__pdata: object to clean up
|
||||
*
|
||||
* We simply clean up any per-cpu object left. No need for the client to
|
||||
* track and specify through a bis mask which per-cpu objects are to free.
|
||||
*/
|
||||
void percpu_free(void *__pdata)
|
||||
void free_percpu(void *__pdata)
|
||||
{
|
||||
if (unlikely(!__pdata))
|
||||
return;
|
||||
__percpu_depopulate_mask(__pdata, &cpu_possible_map);
|
||||
kfree(__percpu_disguise(__pdata));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_free);
|
||||
EXPORT_SYMBOL_GPL(free_percpu);
|
||||
|
Loading…
Reference in New Issue
Block a user