mirror of
https://github.com/torvalds/linux.git
synced 2024-11-08 21:21:47 +00:00
9c312058b2
I believe http://bugzilla.kernel.org/show_bug.cgi?id=10318 is a false positive. There's no way in which networking will be using highmem pages here, so it won't be taking the KM_USER0 kmap slot, so there's no point in performing these checks. Cc: Pawel Staszewski <pstaszewski@artcom.pl> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: Christoph Lameter <clameter@sgi.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> [ Really sad. We lose almost all real-life coverage of the debug tests with this patch. Now it will only report problems for the cases where people actually end up using a HIGHMEM page, not when they just _might_ use one. - Linus ] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
159 lines
3.8 KiB
C
159 lines
3.8 KiB
C
#include <linux/highmem.h>
|
|
#include <linux/module.h>
|
|
|
|
void *kmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
return kmap_high(page);
|
|
}
|
|
|
|
void kunmap(struct page *page)
|
|
{
|
|
if (in_interrupt())
|
|
BUG();
|
|
if (!PageHighMem(page))
|
|
return;
|
|
kunmap_high(page);
|
|
}
|
|
|
|
static void debug_kmap_atomic_prot(enum km_type type)
|
|
{
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
static unsigned warn_count = 10;
|
|
|
|
if (unlikely(warn_count == 0))
|
|
return;
|
|
|
|
if (unlikely(in_interrupt())) {
|
|
if (in_irq()) {
|
|
if (type != KM_IRQ0 && type != KM_IRQ1 &&
|
|
type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
|
|
type != KM_BOUNCE_READ) {
|
|
WARN_ON(1);
|
|
warn_count--;
|
|
}
|
|
} else if (!irqs_disabled()) { /* softirq */
|
|
if (type != KM_IRQ0 && type != KM_IRQ1 &&
|
|
type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
|
|
type != KM_SKB_SUNRPC_DATA &&
|
|
type != KM_SKB_DATA_SOFTIRQ &&
|
|
type != KM_BOUNCE_READ) {
|
|
WARN_ON(1);
|
|
warn_count--;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
|
|
type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
|
|
if (!irqs_disabled()) {
|
|
WARN_ON(1);
|
|
warn_count--;
|
|
}
|
|
} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
|
|
if (irq_count() == 0 && !irqs_disabled()) {
|
|
WARN_ON(1);
|
|
warn_count--;
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
|
* no global lock is needed and because the kmap code must perform a global TLB
|
|
* invalidation when the kmap pool wraps.
|
|
*
|
|
* However when holding an atomic kmap is is not legal to sleep, so atomic
|
|
* kmaps are appropriate for short, tight code paths only.
|
|
*/
|
|
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
|
|
{
|
|
enum fixed_addresses idx;
|
|
unsigned long vaddr;
|
|
|
|
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
pagefault_disable();
|
|
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
|
|
debug_kmap_atomic_prot(type);
|
|
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void *)vaddr;
|
|
}
|
|
|
|
void *kmap_atomic(struct page *page, enum km_type type)
|
|
{
|
|
return kmap_atomic_prot(page, type, kmap_prot);
|
|
}
|
|
|
|
void kunmap_atomic(void *kvaddr, enum km_type type)
|
|
{
|
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
|
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
|
|
|
|
/*
|
|
* Force other mappings to Oops if they'll try to access this pte
|
|
* without first remap it. Keeping stale mappings around is a bad idea
|
|
* also, in case the page changes cacheability attributes or becomes
|
|
* a protected page in a hypervisor.
|
|
*/
|
|
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
|
|
kpte_clear_flush(kmap_pte-idx, vaddr);
|
|
else {
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
BUG_ON(vaddr < PAGE_OFFSET);
|
|
BUG_ON(vaddr >= (unsigned long)high_memory);
|
|
#endif
|
|
}
|
|
|
|
arch_flush_lazy_mmu_mode();
|
|
pagefault_enable();
|
|
}
|
|
|
|
/* This is the same as kmap_atomic() but can map memory that doesn't
|
|
* have a struct page associated with it.
|
|
*/
|
|
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
|
|
{
|
|
enum fixed_addresses idx;
|
|
unsigned long vaddr;
|
|
|
|
pagefault_disable();
|
|
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void*) vaddr;
|
|
}
|
|
|
|
struct page *kmap_atomic_to_page(void *ptr)
|
|
{
|
|
unsigned long idx, vaddr = (unsigned long)ptr;
|
|
pte_t *pte;
|
|
|
|
if (vaddr < FIXADDR_START)
|
|
return virt_to_page(ptr);
|
|
|
|
idx = virt_to_fix(vaddr);
|
|
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
|
|
return pte_page(*pte);
|
|
}
|
|
|
|
EXPORT_SYMBOL(kmap);
|
|
EXPORT_SYMBOL(kunmap);
|
|
EXPORT_SYMBOL(kmap_atomic);
|
|
EXPORT_SYMBOL(kunmap_atomic);
|
|
EXPORT_SYMBOL(kmap_atomic_to_page);
|