2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/highmem.h>
|
2005-06-23 07:08:33 +00:00
|
|
|
#include <linux/module.h>
|
2009-03-03 12:10:12 +00:00
|
|
|
#include <linux/swap.h> /* for totalram_pages */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
void *kmap(struct page *page)
|
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return page_address(page);
|
|
|
|
return kmap_high(page);
|
|
|
|
}
|
2010-10-26 21:21:51 +00:00
|
|
|
EXPORT_SYMBOL(kmap);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
void kunmap(struct page *page)
|
|
|
|
{
|
|
|
|
if (in_interrupt())
|
|
|
|
BUG();
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return;
|
|
|
|
kunmap_high(page);
|
|
|
|
}
|
2010-10-26 21:21:51 +00:00
|
|
|
EXPORT_SYMBOL(kunmap);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
|
|
|
* no global lock is needed and because the kmap code must perform a global TLB
|
|
|
|
* invalidation when the kmap pool wraps.
|
|
|
|
*
|
2009-06-29 04:02:55 +00:00
|
|
|
* However when holding an atomic kmap it is not legal to sleep, so atomic
|
2005-04-16 22:20:36 +00:00
|
|
|
* kmaps are appropriate for short, tight code paths only.
|
|
|
|
*/
|
2010-10-26 21:21:51 +00:00
|
|
|
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long vaddr;
|
2010-10-26 21:21:51 +00:00
|
|
|
int idx, type;
|
2008-01-30 12:30:47 +00:00
|
|
|
|
2008-03-28 18:47:34 +00:00
|
|
|
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
2006-12-07 04:32:20 +00:00
|
|
|
pagefault_disable();
|
2007-02-10 09:46:36 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!PageHighMem(page))
|
|
|
|
return page_address(page);
|
|
|
|
|
2010-10-26 21:21:51 +00:00
|
|
|
type = kmap_atomic_idx_push();
|
2007-09-11 22:24:10 +00:00
|
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
2005-04-16 22:20:36 +00:00
|
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
2007-09-11 22:24:10 +00:00
|
|
|
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
2007-05-02 17:27:15 +00:00
|
|
|
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
2011-11-15 22:49:09 +00:00
|
|
|
arch_flush_lazy_mmu_mode();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-11 22:24:10 +00:00
|
|
|
return (void *)vaddr;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-10-26 21:21:51 +00:00
|
|
|
EXPORT_SYMBOL(kmap_atomic_prot);
|
|
|
|
|
2011-11-26 02:53:39 +00:00
|
|
|
void *kmap_atomic(struct page *page)
|
2010-10-26 21:21:51 +00:00
|
|
|
{
|
|
|
|
return kmap_atomic_prot(page, kmap_prot);
|
|
|
|
}
|
2011-11-26 02:53:39 +00:00
|
|
|
EXPORT_SYMBOL(kmap_atomic);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-10-26 21:21:51 +00:00
|
|
|
/*
|
|
|
|
* This is the same as kmap_atomic() but can map memory that doesn't
|
|
|
|
* have a struct page associated with it.
|
|
|
|
*/
|
|
|
|
void *kmap_atomic_pfn(unsigned long pfn)
|
2007-05-02 17:27:15 +00:00
|
|
|
{
|
2010-10-26 21:21:51 +00:00
|
|
|
return kmap_atomic_prot_pfn(pfn, kmap_prot);
|
2007-05-02 17:27:15 +00:00
|
|
|
}
|
2010-10-26 21:21:51 +00:00
|
|
|
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
|
2007-05-02 17:27:15 +00:00
|
|
|
|
2010-10-26 21:21:51 +00:00
|
|
|
void __kunmap_atomic(void *kvaddr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
2010-10-26 21:21:51 +00:00
|
|
|
|
|
|
|
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
|
|
|
|
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
|
|
|
|
int idx, type;
|
|
|
|
|
2010-10-27 22:32:58 +00:00
|
|
|
type = kmap_atomic_idx();
|
2010-10-26 21:21:51 +00:00
|
|
|
idx = type + KM_TYPE_NR * smp_processor_id();
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
|
|
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Force other mappings to Oops if they'll try to access this
|
|
|
|
* pte without first remap it. Keeping stale mappings around
|
|
|
|
* is a bad idea also, in case the page changes cacheability
|
|
|
|
* attributes or becomes a protected page in a hypervisor.
|
|
|
|
*/
|
2006-12-07 04:32:22 +00:00
|
|
|
kpte_clear_flush(kmap_pte-idx, vaddr);
|
2010-10-27 22:32:58 +00:00
|
|
|
kmap_atomic_idx_pop();
|
2011-11-15 22:49:09 +00:00
|
|
|
arch_flush_lazy_mmu_mode();
|
2010-10-26 21:21:51 +00:00
|
|
|
}
|
2006-12-07 04:32:22 +00:00
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
2010-10-26 21:21:51 +00:00
|
|
|
else {
|
2006-12-07 04:32:22 +00:00
|
|
|
BUG_ON(vaddr < PAGE_OFFSET);
|
|
|
|
BUG_ON(vaddr >= (unsigned long)high_memory);
|
|
|
|
}
|
2010-10-26 21:21:51 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-12-07 04:32:20 +00:00
|
|
|
pagefault_enable();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-10-26 21:21:51 +00:00
|
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
2005-06-25 21:58:19 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct page *kmap_atomic_to_page(void *ptr)
|
|
|
|
{
|
|
|
|
unsigned long idx, vaddr = (unsigned long)ptr;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
if (vaddr < FIXADDR_START)
|
|
|
|
return virt_to_page(ptr);
|
|
|
|
|
|
|
|
idx = virt_to_fix(vaddr);
|
|
|
|
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
|
|
|
|
return pte_page(*pte);
|
|
|
|
}
|
2009-08-10 07:41:27 +00:00
|
|
|
EXPORT_SYMBOL(kmap_atomic_to_page);
|
2009-03-03 12:10:12 +00:00
|
|
|
|
|
|
|
void __init set_highmem_pages_init(void)
|
|
|
|
{
|
|
|
|
struct zone *zone;
|
|
|
|
int nid;
|
|
|
|
|
|
|
|
for_each_zone(zone) {
|
|
|
|
unsigned long zone_start_pfn, zone_end_pfn;
|
|
|
|
|
|
|
|
if (!is_highmem(zone))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
zone_start_pfn = zone->zone_start_pfn;
|
|
|
|
zone_end_pfn = zone_start_pfn + zone->spanned_pages;
|
|
|
|
|
|
|
|
nid = zone_to_nid(zone);
|
|
|
|
printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
|
|
|
|
zone->name, nid, zone_start_pfn, zone_end_pfn);
|
|
|
|
|
|
|
|
add_highpages_with_active_regions(nid, zone_start_pfn,
|
|
|
|
zone_end_pfn);
|
|
|
|
}
|
|
|
|
totalram_pages += totalhigh_pages;
|
|
|
|
}
|