[PATCH] page_alloc: fix kernel-doc and func. declaration

Fix kernel-doc and function declaration (missing "void") in
mm/page_alloc.c.

Add mm/page_alloc.c to kernel-api.tmpl in DocBook.

mm/page_alloc.c:2589:38: warning: non-ANSI function declaration of function 'remove_all_active_ranges'

Signed-off-by: Randy Dunlap <rdunlap@xenotime.net>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Randy Dunlap 2006-10-04 02:15:25 -07:00 committed by Linus Torvalds
parent fe1668ae5b
commit 88ca3b94e8
2 changed files with 26 additions and 25 deletions

View File

@ -158,6 +158,7 @@ X!Ilib/string.c
!Emm/filemap.c !Emm/filemap.c
!Emm/memory.c !Emm/memory.c
!Emm/vmalloc.c !Emm/vmalloc.c
!Imm/page_alloc.c
!Emm/mempool.c !Emm/mempool.c
!Emm/page-writeback.c !Emm/page-writeback.c
!Emm/truncate.c !Emm/truncate.c

View File

@ -2051,8 +2051,8 @@ int __init early_pfn_to_nid(unsigned long pfn)
/** /**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
* @max_low_pfn: The highest PFN that till be passed to free_bootmem_node * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
* *
* If an architecture guarantees that all ranges registered with * If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this * add_active_ranges() contain no holes and may be freed, this
@ -2082,11 +2082,11 @@ void __init free_bootmem_with_active_regions(int nid,
/** /**
* sparse_memory_present_with_active_regions - Call memory_present for each active range * sparse_memory_present_with_active_regions - Call memory_present for each active range
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
* *
* If an architecture guarantees that all ranges registered with * If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this * add_active_ranges() contain no holes and may be freed, this
* this function may be used instead of calling memory_present() manually. * function may be used instead of calling memory_present() manually.
*/ */
void __init sparse_memory_present_with_active_regions(int nid) void __init sparse_memory_present_with_active_regions(int nid)
{ {
@ -2156,14 +2156,14 @@ static void __init account_node_boundary(unsigned int nid,
/** /**
* get_pfn_range_for_nid - Return the start and end page frames for a node * get_pfn_range_for_nid - Return the start and end page frames for a node
* @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
* @start_pfn: Passed by reference. On return, it will have the node start_pfn * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
* @end_pfn: Passed by reference. On return, it will have the node end_pfn * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
* *
* It returns the start and end page frame of a node based on information * It returns the start and end page frame of a node based on information
* provided by an arch calling add_active_range(). If called for a node * provided by an arch calling add_active_range(). If called for a node
* with no available memory, a warning is printed and the start and end * with no available memory, a warning is printed and the start and end
* PFNs will be 0 * PFNs will be 0.
*/ */
void __init get_pfn_range_for_nid(unsigned int nid, void __init get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn) unsigned long *start_pfn, unsigned long *end_pfn)
@ -2216,7 +2216,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
/* /*
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES, * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for * then all holes in the requested range will be accounted for.
*/ */
unsigned long __init __absent_pages_in_range(int nid, unsigned long __init __absent_pages_in_range(int nid,
unsigned long range_start_pfn, unsigned long range_start_pfn,
@ -2269,7 +2269,7 @@ unsigned long __init __absent_pages_in_range(int nid,
* @start_pfn: The start PFN to start searching for holes * @start_pfn: The start PFN to start searching for holes
* @end_pfn: The end PFN to stop searching for holes * @end_pfn: The end PFN to stop searching for holes
* *
* It returns the number of pages frames in memory holes within a range * It returns the number of pages frames in memory holes within a range.
*/ */
unsigned long __init absent_pages_in_range(unsigned long start_pfn, unsigned long __init absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn) unsigned long end_pfn)
@ -2583,11 +2583,12 @@ void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
/** /**
* remove_all_active_ranges - Remove all currently registered regions * remove_all_active_ranges - Remove all currently registered regions
*
* During discovery, it may be found that a table like SRAT is invalid * During discovery, it may be found that a table like SRAT is invalid
* and an alternative discovery method must be used. This function removes * and an alternative discovery method must be used. This function removes
* all currently registered regions. * all currently registered regions.
*/ */
void __init remove_all_active_ranges() void __init remove_all_active_ranges(void)
{ {
memset(early_node_map, 0, sizeof(early_node_map)); memset(early_node_map, 0, sizeof(early_node_map));
nr_nodemap_entries = 0; nr_nodemap_entries = 0;
@ -2637,7 +2638,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
* find_min_pfn_with_active_regions - Find the minimum PFN registered * find_min_pfn_with_active_regions - Find the minimum PFN registered
* *
* It returns the minimum PFN based on information provided via * It returns the minimum PFN based on information provided via
* add_active_range() * add_active_range().
*/ */
unsigned long __init find_min_pfn_with_active_regions(void) unsigned long __init find_min_pfn_with_active_regions(void)
{ {
@ -2648,7 +2649,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
* find_max_pfn_with_active_regions - Find the maximum PFN registered * find_max_pfn_with_active_regions - Find the maximum PFN registered
* *
* It returns the maximum PFN based on information provided via * It returns the maximum PFN based on information provided via
* add_active_range() * add_active_range().
*/ */
unsigned long __init find_max_pfn_with_active_regions(void) unsigned long __init find_max_pfn_with_active_regions(void)
{ {
@ -2663,10 +2664,7 @@ unsigned long __init find_max_pfn_with_active_regions(void)
/** /**
* free_area_init_nodes - Initialise all pg_data_t and zone data * free_area_init_nodes - Initialise all pg_data_t and zone data
* @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA * @max_zone_pfn: an array of max PFNs for each zone
* @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32
* @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL
* @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM
* *
* This will call free_area_init_node() for each active node in the system. * This will call free_area_init_node() for each active node in the system.
* Using the page ranges provided by add_active_range(), the size of each * Using the page ranges provided by add_active_range(), the size of each
@ -2724,14 +2722,15 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
/** /**
* set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA * set_dma_reserve - set the specified number of pages reserved in the first zone
* @new_dma_reserve - The number of pages to mark reserved * @new_dma_reserve: The number of pages to mark reserved
* *
* The per-cpu batchsize and zone watermarks are determined by present_pages. * The per-cpu batchsize and zone watermarks are determined by present_pages.
* In the DMA zone, a significant percentage may be consumed by kernel image * In the DMA zone, a significant percentage may be consumed by kernel image
* and other unfreeable allocations which can skew the watermarks badly. This * and other unfreeable allocations which can skew the watermarks badly. This
* function may optionally be used to account for unfreeable pages in * function may optionally be used to account for unfreeable pages in the
* ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
* smaller per-cpu batchsize.
*/ */
void __init set_dma_reserve(unsigned long new_dma_reserve) void __init set_dma_reserve(unsigned long new_dma_reserve)
{ {
@ -2844,10 +2843,11 @@ static void setup_per_zone_lowmem_reserve(void)
calculate_totalreserve_pages(); calculate_totalreserve_pages();
} }
/* /**
* setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures * setup_per_zone_pages_min - called when min_free_kbytes changes.
* that the pages_{min,low,high} values for each zone are set correctly *
* with respect to min_free_kbytes. * Ensures that the pages_{min,low,high} values for each zone are set correctly
* with respect to min_free_kbytes.
*/ */
void setup_per_zone_pages_min(void) void setup_per_zone_pages_min(void)
{ {