mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
554fef1c39
The prior patch added support for passing gfp flags through to the underlying allocators. This patch allows users to pass along gfp flags (currently only __GFP_NORETRY and __GFP_NOWARN) to the underlying allocators. This should allow users to decide if they are ok with failing allocations recovering in a more graceful way. Additionally, gfp passing was done as additional flags in the previous patch. Instead, change this to caller passed semantics. GFP_KERNEL is also removed as the default flag. It continues to be used for internally caused underlying percpu allocations. V2: Removed gfp_percpu_mask in favor of doing it inline. Removed GFP_KERNEL as a default flag for __alloc_percpu_gfp. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
380 lines
10 KiB
C
380 lines
10 KiB
C
/*
|
|
* mm/percpu-vm.c - vmalloc area based chunk allocation
|
|
*
|
|
* Copyright (C) 2010 SUSE Linux Products GmbH
|
|
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
|
|
*
|
|
* This file is released under the GPLv2.
|
|
*
|
|
* Chunks are mapped into vmalloc areas and populated page by page.
|
|
* This is the default chunk allocator.
|
|
*/
|
|
|
|
static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
|
|
unsigned int cpu, int page_idx)
|
|
{
|
|
/* must not be used on pre-mapped chunk */
|
|
WARN_ON(chunk->immutable);
|
|
|
|
return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
|
|
}
|
|
|
|
/**
|
|
* pcpu_get_pages - get temp pages array
|
|
*
|
|
* Returns pointer to array of pointers to struct page which can be indexed
|
|
* with pcpu_page_idx(). Note that there is only one array and accesses
|
|
* should be serialized by pcpu_alloc_mutex.
|
|
*
|
|
* RETURNS:
|
|
* Pointer to temp pages array on success.
|
|
*/
|
|
static struct page **pcpu_get_pages(void)
|
|
{
|
|
static struct page **pages;
|
|
size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
|
|
|
|
lockdep_assert_held(&pcpu_alloc_mutex);
|
|
|
|
if (!pages)
|
|
pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
|
|
return pages;
|
|
}
|
|
|
|
/**
|
|
* pcpu_free_pages - free pages which were allocated for @chunk
|
|
* @chunk: chunk pages were allocated for
|
|
* @pages: array of pages to be freed, indexed by pcpu_page_idx()
|
|
* @page_start: page index of the first page to be freed
|
|
* @page_end: page index of the last page to be freed + 1
|
|
*
|
|
* Free pages [@page_start and @page_end) in @pages for all units.
|
|
* The pages were allocated for @chunk.
|
|
*/
|
|
static void pcpu_free_pages(struct pcpu_chunk *chunk,
|
|
struct page **pages, int page_start, int page_end)
|
|
{
|
|
unsigned int cpu;
|
|
int i;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
for (i = page_start; i < page_end; i++) {
|
|
struct page *page = pages[pcpu_page_idx(cpu, i)];
|
|
|
|
if (page)
|
|
__free_page(page);
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* pcpu_alloc_pages - allocates pages for @chunk
|
|
* @chunk: target chunk
|
|
* @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
|
|
* @page_start: page index of the first page to be allocated
|
|
* @page_end: page index of the last page to be allocated + 1
|
|
* @gfp: allocation flags passed to the underlying allocator
|
|
*
|
|
* Allocate pages [@page_start,@page_end) into @pages for all units.
|
|
* The allocation is for @chunk. Percpu core doesn't care about the
|
|
* content of @pages and will pass it verbatim to pcpu_map_pages().
|
|
*/
|
|
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
|
|
struct page **pages, int page_start, int page_end,
|
|
gfp_t gfp)
|
|
{
|
|
unsigned int cpu, tcpu;
|
|
int i;
|
|
|
|
gfp |= __GFP_HIGHMEM;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
for (i = page_start; i < page_end; i++) {
|
|
struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
|
|
|
|
*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
|
|
if (!*pagep)
|
|
goto err;
|
|
}
|
|
}
|
|
return 0;
|
|
|
|
err:
|
|
while (--i >= page_start)
|
|
__free_page(pages[pcpu_page_idx(cpu, i)]);
|
|
|
|
for_each_possible_cpu(tcpu) {
|
|
if (tcpu == cpu)
|
|
break;
|
|
for (i = page_start; i < page_end; i++)
|
|
__free_page(pages[pcpu_page_idx(tcpu, i)]);
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
|
|
/**
|
|
* pcpu_pre_unmap_flush - flush cache prior to unmapping
|
|
* @chunk: chunk the regions to be flushed belongs to
|
|
* @page_start: page index of the first page to be flushed
|
|
* @page_end: page index of the last page to be flushed + 1
|
|
*
|
|
* Pages in [@page_start,@page_end) of @chunk are about to be
|
|
* unmapped. Flush cache. As each flushing trial can be very
|
|
* expensive, issue flush on the whole region at once rather than
|
|
* doing it for each cpu. This could be an overkill but is more
|
|
* scalable.
|
|
*/
|
|
static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
|
|
int page_start, int page_end)
|
|
{
|
|
flush_cache_vunmap(
|
|
pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
|
|
pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
|
|
}
|
|
|
|
static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
|
|
{
|
|
unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
|
|
}
|
|
|
|
/**
|
|
* pcpu_unmap_pages - unmap pages out of a pcpu_chunk
|
|
* @chunk: chunk of interest
|
|
* @pages: pages array which can be used to pass information to free
|
|
* @page_start: page index of the first page to unmap
|
|
* @page_end: page index of the last page to unmap + 1
|
|
*
|
|
* For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
|
|
* Corresponding elements in @pages were cleared by the caller and can
|
|
* be used to carry information to pcpu_free_pages() which will be
|
|
* called after all unmaps are finished. The caller should call
|
|
* proper pre/post flush functions.
|
|
*/
|
|
static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
|
|
struct page **pages, int page_start, int page_end)
|
|
{
|
|
unsigned int cpu;
|
|
int i;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
for (i = page_start; i < page_end; i++) {
|
|
struct page *page;
|
|
|
|
page = pcpu_chunk_page(chunk, cpu, i);
|
|
WARN_ON(!page);
|
|
pages[pcpu_page_idx(cpu, i)] = page;
|
|
}
|
|
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
|
|
page_end - page_start);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* pcpu_post_unmap_tlb_flush - flush TLB after unmapping
|
|
* @chunk: pcpu_chunk the regions to be flushed belong to
|
|
* @page_start: page index of the first page to be flushed
|
|
* @page_end: page index of the last page to be flushed + 1
|
|
*
|
|
* Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
|
|
* TLB for the regions. This can be skipped if the area is to be
|
|
* returned to vmalloc as vmalloc will handle TLB flushing lazily.
|
|
*
|
|
* As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
|
|
* for the whole region.
|
|
*/
|
|
static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
|
|
int page_start, int page_end)
|
|
{
|
|
flush_tlb_kernel_range(
|
|
pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
|
|
pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
|
|
}
|
|
|
|
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
|
|
int nr_pages)
|
|
{
|
|
return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
|
|
PAGE_KERNEL, pages);
|
|
}
|
|
|
|
/**
|
|
* pcpu_map_pages - map pages into a pcpu_chunk
|
|
* @chunk: chunk of interest
|
|
* @pages: pages array containing pages to be mapped
|
|
* @page_start: page index of the first page to map
|
|
* @page_end: page index of the last page to map + 1
|
|
*
|
|
* For each cpu, map pages [@page_start,@page_end) into @chunk. The
|
|
* caller is responsible for calling pcpu_post_map_flush() after all
|
|
* mappings are complete.
|
|
*
|
|
* This function is responsible for setting up whatever is necessary for
|
|
* reverse lookup (addr -> chunk).
|
|
*/
|
|
static int pcpu_map_pages(struct pcpu_chunk *chunk,
|
|
struct page **pages, int page_start, int page_end)
|
|
{
|
|
unsigned int cpu, tcpu;
|
|
int i, err;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
|
|
&pages[pcpu_page_idx(cpu, page_start)],
|
|
page_end - page_start);
|
|
if (err < 0)
|
|
goto err;
|
|
|
|
for (i = page_start; i < page_end; i++)
|
|
pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
|
|
chunk);
|
|
}
|
|
return 0;
|
|
err:
|
|
for_each_possible_cpu(tcpu) {
|
|
if (tcpu == cpu)
|
|
break;
|
|
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
|
|
page_end - page_start);
|
|
}
|
|
pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
|
|
return err;
|
|
}
|
|
|
|
/**
|
|
* pcpu_post_map_flush - flush cache after mapping
|
|
* @chunk: pcpu_chunk the regions to be flushed belong to
|
|
* @page_start: page index of the first page to be flushed
|
|
* @page_end: page index of the last page to be flushed + 1
|
|
*
|
|
* Pages [@page_start,@page_end) of @chunk have been mapped. Flush
|
|
* cache.
|
|
*
|
|
* As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
|
|
* for the whole region.
|
|
*/
|
|
static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
|
|
int page_start, int page_end)
|
|
{
|
|
flush_cache_vmap(
|
|
pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
|
|
pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
|
|
}
|
|
|
|
/**
|
|
* pcpu_populate_chunk - populate and map an area of a pcpu_chunk
|
|
* @chunk: chunk of interest
|
|
* @page_start: the start page
|
|
* @page_end: the end page
|
|
* @gfp: allocation flags passed to the underlying memory allocator
|
|
*
|
|
* For each cpu, populate and map pages [@page_start,@page_end) into
|
|
* @chunk.
|
|
*
|
|
* CONTEXT:
|
|
* pcpu_alloc_mutex, does GFP_KERNEL allocation.
|
|
*/
|
|
static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
|
|
int page_start, int page_end, gfp_t gfp)
|
|
{
|
|
struct page **pages;
|
|
|
|
pages = pcpu_get_pages();
|
|
if (!pages)
|
|
return -ENOMEM;
|
|
|
|
if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
|
|
return -ENOMEM;
|
|
|
|
if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
|
|
pcpu_free_pages(chunk, pages, page_start, page_end);
|
|
return -ENOMEM;
|
|
}
|
|
pcpu_post_map_flush(chunk, page_start, page_end);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
|
|
* @chunk: chunk to depopulate
|
|
* @page_start: the start page
|
|
* @page_end: the end page
|
|
*
|
|
* For each cpu, depopulate and unmap pages [@page_start,@page_end)
|
|
* from @chunk.
|
|
*
|
|
* CONTEXT:
|
|
* pcpu_alloc_mutex.
|
|
*/
|
|
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
|
|
int page_start, int page_end)
|
|
{
|
|
struct page **pages;
|
|
|
|
/*
|
|
* If control reaches here, there must have been at least one
|
|
* successful population attempt so the temp pages array must
|
|
* be available now.
|
|
*/
|
|
pages = pcpu_get_pages();
|
|
BUG_ON(!pages);
|
|
|
|
/* unmap and free */
|
|
pcpu_pre_unmap_flush(chunk, page_start, page_end);
|
|
|
|
pcpu_unmap_pages(chunk, pages, page_start, page_end);
|
|
|
|
/* no need to flush tlb, vmalloc will handle it lazily */
|
|
|
|
pcpu_free_pages(chunk, pages, page_start, page_end);
|
|
}
|
|
|
|
static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
|
|
{
|
|
struct pcpu_chunk *chunk;
|
|
struct vm_struct **vms;
|
|
|
|
chunk = pcpu_alloc_chunk(gfp);
|
|
if (!chunk)
|
|
return NULL;
|
|
|
|
vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
|
|
pcpu_nr_groups, pcpu_atom_size);
|
|
if (!vms) {
|
|
pcpu_free_chunk(chunk);
|
|
return NULL;
|
|
}
|
|
|
|
chunk->data = vms;
|
|
chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
|
|
|
|
pcpu_stats_chunk_alloc();
|
|
trace_percpu_create_chunk(chunk->base_addr);
|
|
|
|
return chunk;
|
|
}
|
|
|
|
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
|
|
{
|
|
if (!chunk)
|
|
return;
|
|
|
|
pcpu_stats_chunk_dealloc();
|
|
trace_percpu_destroy_chunk(chunk->base_addr);
|
|
|
|
if (chunk->data)
|
|
pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
|
|
pcpu_free_chunk(chunk);
|
|
}
|
|
|
|
static struct page *pcpu_addr_to_page(void *addr)
|
|
{
|
|
return vmalloc_to_page(addr);
|
|
}
|
|
|
|
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
|
|
{
|
|
/* no extra restriction */
|
|
return 0;
|
|
}
|