mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 23:25:54 +00:00
ee99c71c59
Impact: cleanup In almost cases, for_each_zone() is used with populated_zone(). It's because almost function doesn't need memoryless node information. Therefore, for_each_populated_zone() can help to make code simplify. This patch has no functional change. [akpm@linux-foundation.org: small cleanup] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
386 lines
9.2 KiB
C
386 lines
9.2 KiB
C
/*
|
|
* linux/kernel/power/swsusp.c
|
|
*
|
|
* This file provides code to write suspend image to swap and read it back.
|
|
*
|
|
* Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
|
|
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
|
|
*
|
|
* This file is released under the GPLv2.
|
|
*
|
|
* I'd like to thank the following people for their work:
|
|
*
|
|
* Pavel Machek <pavel@ucw.cz>:
|
|
* Modifications, defectiveness pointing, being with me at the very beginning,
|
|
* suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17.
|
|
*
|
|
* Steve Doddi <dirk@loth.demon.co.uk>:
|
|
* Support the possibility of hardware state restoring.
|
|
*
|
|
* Raph <grey.havens@earthling.net>:
|
|
* Support for preserving states of network devices and virtual console
|
|
* (including X and svgatextmode)
|
|
*
|
|
* Kurt Garloff <garloff@suse.de>:
|
|
* Straightened the critical function in order to prevent compilers from
|
|
* playing tricks with local variables.
|
|
*
|
|
* Andreas Mohr <a.mohr@mailto.de>
|
|
*
|
|
* Alex Badea <vampire@go.ro>:
|
|
* Fixed runaway init
|
|
*
|
|
* Rafael J. Wysocki <rjw@sisk.pl>
|
|
* Reworked the freeing of memory and the handling of swap
|
|
*
|
|
* More state savers are welcome. Especially for the scsi layer...
|
|
*
|
|
* For TODOs,FIXMEs also look in Documentation/power/swsusp.txt
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/major.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/swapops.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/time.h>
|
|
#include <linux/rbtree.h>
|
|
|
|
#include "power.h"
|
|
|
|
/*
|
|
* Preferred image size in bytes (tunable via /sys/power/image_size).
|
|
* When it is set to N, swsusp will do its best to ensure the image
|
|
* size will not exceed N bytes, but if that is impossible, it will
|
|
* try to create the smallest image possible.
|
|
*/
|
|
unsigned long image_size = 500 * 1024 * 1024;
|
|
|
|
int in_suspend __nosavedata = 0;
|
|
|
|
/**
|
|
* The following functions are used for tracing the allocated
|
|
* swap pages, so that they can be freed in case of an error.
|
|
*/
|
|
|
|
struct swsusp_extent {
|
|
struct rb_node node;
|
|
unsigned long start;
|
|
unsigned long end;
|
|
};
|
|
|
|
static struct rb_root swsusp_extents = RB_ROOT;
|
|
|
|
static int swsusp_extents_insert(unsigned long swap_offset)
|
|
{
|
|
struct rb_node **new = &(swsusp_extents.rb_node);
|
|
struct rb_node *parent = NULL;
|
|
struct swsusp_extent *ext;
|
|
|
|
/* Figure out where to put the new node */
|
|
while (*new) {
|
|
ext = container_of(*new, struct swsusp_extent, node);
|
|
parent = *new;
|
|
if (swap_offset < ext->start) {
|
|
/* Try to merge */
|
|
if (swap_offset == ext->start - 1) {
|
|
ext->start--;
|
|
return 0;
|
|
}
|
|
new = &((*new)->rb_left);
|
|
} else if (swap_offset > ext->end) {
|
|
/* Try to merge */
|
|
if (swap_offset == ext->end + 1) {
|
|
ext->end++;
|
|
return 0;
|
|
}
|
|
new = &((*new)->rb_right);
|
|
} else {
|
|
/* It already is in the tree */
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
/* Add the new node and rebalance the tree. */
|
|
ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
|
|
if (!ext)
|
|
return -ENOMEM;
|
|
|
|
ext->start = swap_offset;
|
|
ext->end = swap_offset;
|
|
rb_link_node(&ext->node, parent, new);
|
|
rb_insert_color(&ext->node, &swsusp_extents);
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* alloc_swapdev_block - allocate a swap page and register that it has
|
|
* been allocated, so that it can be freed in case of an error.
|
|
*/
|
|
|
|
sector_t alloc_swapdev_block(int swap)
|
|
{
|
|
unsigned long offset;
|
|
|
|
offset = swp_offset(get_swap_page_of_type(swap));
|
|
if (offset) {
|
|
if (swsusp_extents_insert(offset))
|
|
swap_free(swp_entry(swap, offset));
|
|
else
|
|
return swapdev_block(swap, offset);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* free_all_swap_pages - free swap pages allocated for saving image data.
|
|
* It also frees the extents used to register which swap entres had been
|
|
* allocated.
|
|
*/
|
|
|
|
void free_all_swap_pages(int swap)
|
|
{
|
|
struct rb_node *node;
|
|
|
|
while ((node = swsusp_extents.rb_node)) {
|
|
struct swsusp_extent *ext;
|
|
unsigned long offset;
|
|
|
|
ext = container_of(node, struct swsusp_extent, node);
|
|
rb_erase(node, &swsusp_extents);
|
|
for (offset = ext->start; offset <= ext->end; offset++)
|
|
swap_free(swp_entry(swap, offset));
|
|
|
|
kfree(ext);
|
|
}
|
|
}
|
|
|
|
int swsusp_swap_in_use(void)
|
|
{
|
|
return (swsusp_extents.rb_node != NULL);
|
|
}
|
|
|
|
/**
|
|
* swsusp_show_speed - print the time elapsed between two events represented by
|
|
* @start and @stop
|
|
*
|
|
* @nr_pages - number of pages processed between @start and @stop
|
|
* @msg - introductory message to print
|
|
*/
|
|
|
|
void swsusp_show_speed(struct timeval *start, struct timeval *stop,
|
|
unsigned nr_pages, char *msg)
|
|
{
|
|
s64 elapsed_centisecs64;
|
|
int centisecs;
|
|
int k;
|
|
int kps;
|
|
|
|
elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
|
|
do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
|
|
centisecs = elapsed_centisecs64;
|
|
if (centisecs == 0)
|
|
centisecs = 1; /* avoid div-by-zero */
|
|
k = nr_pages * (PAGE_SIZE / 1024);
|
|
kps = (k * 100) / centisecs;
|
|
printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
|
|
msg, k,
|
|
centisecs / 100, centisecs % 100,
|
|
kps / 1000, (kps % 1000) / 10);
|
|
}
|
|
|
|
/**
|
|
* swsusp_shrink_memory - Try to free as much memory as needed
|
|
*
|
|
* ... but do not OOM-kill anyone
|
|
*
|
|
* Notice: all userland should be stopped before it is called, or
|
|
* livelock is possible.
|
|
*/
|
|
|
|
#define SHRINK_BITE 10000
|
|
static inline unsigned long __shrink_memory(long tmp)
|
|
{
|
|
if (tmp > SHRINK_BITE)
|
|
tmp = SHRINK_BITE;
|
|
return shrink_all_memory(tmp);
|
|
}
|
|
|
|
int swsusp_shrink_memory(void)
|
|
{
|
|
long tmp;
|
|
struct zone *zone;
|
|
unsigned long pages = 0;
|
|
unsigned int i = 0;
|
|
char *p = "-\\|/";
|
|
struct timeval start, stop;
|
|
|
|
printk(KERN_INFO "PM: Shrinking memory... ");
|
|
do_gettimeofday(&start);
|
|
do {
|
|
long size, highmem_size;
|
|
|
|
highmem_size = count_highmem_pages();
|
|
size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
|
|
tmp = size;
|
|
size += highmem_size;
|
|
for_each_populated_zone(zone) {
|
|
tmp += snapshot_additional_pages(zone);
|
|
if (is_highmem(zone)) {
|
|
highmem_size -=
|
|
zone_page_state(zone, NR_FREE_PAGES);
|
|
} else {
|
|
tmp -= zone_page_state(zone, NR_FREE_PAGES);
|
|
tmp += zone->lowmem_reserve[ZONE_NORMAL];
|
|
}
|
|
}
|
|
|
|
if (highmem_size < 0)
|
|
highmem_size = 0;
|
|
|
|
tmp += highmem_size;
|
|
if (tmp > 0) {
|
|
tmp = __shrink_memory(tmp);
|
|
if (!tmp)
|
|
return -ENOMEM;
|
|
pages += tmp;
|
|
} else if (size > image_size / PAGE_SIZE) {
|
|
tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
|
|
pages += tmp;
|
|
}
|
|
printk("\b%c", p[i++%4]);
|
|
} while (tmp > 0);
|
|
do_gettimeofday(&stop);
|
|
printk("\bdone (%lu pages freed)\n", pages);
|
|
swsusp_show_speed(&start, &stop, pages, "Freed");
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Platforms, like ACPI, may want us to save some memory used by them during
|
|
* hibernation and to restore the contents of this memory during the subsequent
|
|
* resume. The code below implements a mechanism allowing us to do that.
|
|
*/
|
|
|
|
struct nvs_page {
|
|
unsigned long phys_start;
|
|
unsigned int size;
|
|
void *kaddr;
|
|
void *data;
|
|
struct list_head node;
|
|
};
|
|
|
|
static LIST_HEAD(nvs_list);
|
|
|
|
/**
|
|
* hibernate_nvs_register - register platform NVS memory region to save
|
|
* @start - physical address of the region
|
|
* @size - size of the region
|
|
*
|
|
* The NVS region need not be page-aligned (both ends) and we arrange
|
|
* things so that the data from page-aligned addresses in this region will
|
|
* be copied into separate RAM pages.
|
|
*/
|
|
int hibernate_nvs_register(unsigned long start, unsigned long size)
|
|
{
|
|
struct nvs_page *entry, *next;
|
|
|
|
while (size > 0) {
|
|
unsigned int nr_bytes;
|
|
|
|
entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
|
|
if (!entry)
|
|
goto Error;
|
|
|
|
list_add_tail(&entry->node, &nvs_list);
|
|
entry->phys_start = start;
|
|
nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
|
|
entry->size = (size < nr_bytes) ? size : nr_bytes;
|
|
|
|
start += entry->size;
|
|
size -= entry->size;
|
|
}
|
|
return 0;
|
|
|
|
Error:
|
|
list_for_each_entry_safe(entry, next, &nvs_list, node) {
|
|
list_del(&entry->node);
|
|
kfree(entry);
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
|
|
/**
|
|
* hibernate_nvs_free - free data pages allocated for saving NVS regions
|
|
*/
|
|
void hibernate_nvs_free(void)
|
|
{
|
|
struct nvs_page *entry;
|
|
|
|
list_for_each_entry(entry, &nvs_list, node)
|
|
if (entry->data) {
|
|
free_page((unsigned long)entry->data);
|
|
entry->data = NULL;
|
|
if (entry->kaddr) {
|
|
iounmap(entry->kaddr);
|
|
entry->kaddr = NULL;
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
|
|
*/
|
|
int hibernate_nvs_alloc(void)
|
|
{
|
|
struct nvs_page *entry;
|
|
|
|
list_for_each_entry(entry, &nvs_list, node) {
|
|
entry->data = (void *)__get_free_page(GFP_KERNEL);
|
|
if (!entry->data) {
|
|
hibernate_nvs_free();
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* hibernate_nvs_save - save NVS memory regions
|
|
*/
|
|
void hibernate_nvs_save(void)
|
|
{
|
|
struct nvs_page *entry;
|
|
|
|
printk(KERN_INFO "PM: Saving platform NVS memory\n");
|
|
|
|
list_for_each_entry(entry, &nvs_list, node)
|
|
if (entry->data) {
|
|
entry->kaddr = ioremap(entry->phys_start, entry->size);
|
|
memcpy(entry->data, entry->kaddr, entry->size);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* hibernate_nvs_restore - restore NVS memory regions
|
|
*
|
|
* This function is going to be called with interrupts disabled, so it
|
|
* cannot iounmap the virtual addresses used to access the NVS region.
|
|
*/
|
|
void hibernate_nvs_restore(void)
|
|
{
|
|
struct nvs_page *entry;
|
|
|
|
printk(KERN_INFO "PM: Restoring platform NVS memory\n");
|
|
|
|
list_for_each_entry(entry, &nvs_list, node)
|
|
if (entry->data)
|
|
memcpy(entry->kaddr, entry->data, entry->size);
|
|
}
|