mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 23:25:54 +00:00
90572890d2
Change the per page last fault tracking to use cpu,pid instead of nid,pid. This will allow us to try and lookup the alternate task more easily. Note that even though it is the cpu that is store in the page flags that the mpol_misplaced decision is still based on the node. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de [ Fixed build failure on 32-bit systems. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
117 lines
2.4 KiB
C
117 lines
2.4 KiB
C
/*
|
|
* linux/mm/mmzone.c
|
|
*
|
|
* management codes for pgdats, zones and page flags
|
|
*/
|
|
|
|
|
|
#include <linux/stddef.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mmzone.h>
|
|
|
|
struct pglist_data *first_online_pgdat(void)
|
|
{
|
|
return NODE_DATA(first_online_node);
|
|
}
|
|
|
|
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
|
|
{
|
|
int nid = next_online_node(pgdat->node_id);
|
|
|
|
if (nid == MAX_NUMNODES)
|
|
return NULL;
|
|
return NODE_DATA(nid);
|
|
}
|
|
|
|
/*
|
|
* next_zone - helper magic for for_each_zone()
|
|
*/
|
|
struct zone *next_zone(struct zone *zone)
|
|
{
|
|
pg_data_t *pgdat = zone->zone_pgdat;
|
|
|
|
if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
|
|
zone++;
|
|
else {
|
|
pgdat = next_online_pgdat(pgdat);
|
|
if (pgdat)
|
|
zone = pgdat->node_zones;
|
|
else
|
|
zone = NULL;
|
|
}
|
|
return zone;
|
|
}
|
|
|
|
static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
|
|
{
|
|
#ifdef CONFIG_NUMA
|
|
return node_isset(zonelist_node_idx(zref), *nodes);
|
|
#else
|
|
return 1;
|
|
#endif /* CONFIG_NUMA */
|
|
}
|
|
|
|
/* Returns the next zone at or below highest_zoneidx in a zonelist */
|
|
struct zoneref *next_zones_zonelist(struct zoneref *z,
|
|
enum zone_type highest_zoneidx,
|
|
nodemask_t *nodes,
|
|
struct zone **zone)
|
|
{
|
|
/*
|
|
* Find the next suitable zone to use for the allocation.
|
|
* Only filter based on nodemask if it's set
|
|
*/
|
|
if (likely(nodes == NULL))
|
|
while (zonelist_zone_idx(z) > highest_zoneidx)
|
|
z++;
|
|
else
|
|
while (zonelist_zone_idx(z) > highest_zoneidx ||
|
|
(z->zone && !zref_in_nodemask(z, nodes)))
|
|
z++;
|
|
|
|
*zone = zonelist_zone(z);
|
|
return z;
|
|
}
|
|
|
|
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
|
|
int memmap_valid_within(unsigned long pfn,
|
|
struct page *page, struct zone *zone)
|
|
{
|
|
if (page_to_pfn(page) != pfn)
|
|
return 0;
|
|
|
|
if (page_zone(page) != zone)
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
|
|
|
|
void lruvec_init(struct lruvec *lruvec)
|
|
{
|
|
enum lru_list lru;
|
|
|
|
memset(lruvec, 0, sizeof(struct lruvec));
|
|
|
|
for_each_lru(lru)
|
|
INIT_LIST_HEAD(&lruvec->lists[lru]);
|
|
}
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
|
|
int page_cpupid_xchg_last(struct page *page, int cpupid)
|
|
{
|
|
unsigned long old_flags, flags;
|
|
int last_cpupid;
|
|
|
|
do {
|
|
old_flags = flags = page->flags;
|
|
last_cpupid = page_cpupid_last(page);
|
|
|
|
flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
|
|
flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
|
|
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
|
|
|
|
return last_cpupid;
|
|
}
|
|
#endif
|