mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 00:21:59 +00:00
59bd00c850
The patch added a special get_unmapped_area for framebuffer which was hooked to the file ops in drivers/video/fbmem.c. This is needed since v2.6.29-rc1 where nommu vma management was updated, and mmap of framebuffer caused kernel BUG panic. You may turn on "Debug the global anon/private NOMMU mapping region tree" config to such message. As Documentation/nommu-mmap.txt said, "To provide shareable character device support, a driver must provide a file->f_op->get_unmapped_area() operation. The mmap() routines will call this to get a proposed address for the mapping." With this change, user space should call mmap for framebuffer using shared map. Or it can try shared map first, then private map if failed. This shared map usage is now consistent between mmu and nommu. The sys_ file may not be a good place for this patch. But there is a similar one for sparc. I tested a similar patch on nios2nommu, though I don't have a blackfin board to test. Signed-off-by: Thomas Chou <thomas@wytron.com.tw> Signed-off-by: Graf Yang <graf.yang@analog.com> Signed-off-by: Michael Hennerich <michael.hennerich@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
110 lines
3.1 KiB
C
110 lines
3.1 KiB
C
/*
|
|
* Copyright 2004-2009 Analog Devices Inc.
|
|
*
|
|
* Licensed under the GPL-2 or later.
|
|
*/
|
|
|
|
#ifndef _BLACKFIN_PGTABLE_H
|
|
#define _BLACKFIN_PGTABLE_H
|
|
|
|
#include <asm-generic/4level-fixup.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/def_LPBlackfin.h>
|
|
|
|
typedef pte_t *pte_addr_t;
|
|
/*
|
|
* Trivial page table functions.
|
|
*/
|
|
#define pgd_present(pgd) (1)
|
|
#define pgd_none(pgd) (0)
|
|
#define pgd_bad(pgd) (0)
|
|
#define pgd_clear(pgdp)
|
|
#define kern_addr_valid(addr) (1)
|
|
|
|
#define pmd_offset(a, b) ((void *)0)
|
|
#define pmd_none(x) (!pmd_val(x))
|
|
#define pmd_present(x) (pmd_val(x))
|
|
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
|
|
#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
|
|
|
|
#define kern_addr_valid(addr) (1)
|
|
|
|
#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
|
|
#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
|
|
#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
|
|
#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
|
|
#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
|
|
#define pgprot_noncached(prot) (prot)
|
|
|
|
extern void paging_init(void);
|
|
|
|
#define __swp_type(x) (0)
|
|
#define __swp_offset(x) (0)
|
|
#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
|
|
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
|
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
|
|
|
static inline int pte_file(pte_t pte)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
|
|
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
|
|
|
|
/*
|
|
* Page assess control based on Blackfin CPLB management
|
|
*/
|
|
#define _PAGE_RD (CPLB_USER_RD)
|
|
#define _PAGE_WR (CPLB_USER_WR)
|
|
#define _PAGE_USER (CPLB_USER_RD | CPLB_USER_WR)
|
|
#define _PAGE_ACCESSED CPLB_ALL_ACCESS
|
|
#define _PAGE_DIRTY (CPLB_DIRTY)
|
|
|
|
#define PTE_BIT_FUNC(fn, op) \
|
|
static inline pte_t pte_##fn(pte_t _pte) { _pte.pte op; return _pte; }
|
|
|
|
PTE_BIT_FUNC(rdprotect, &= ~_PAGE_RD);
|
|
PTE_BIT_FUNC(mkread, |= _PAGE_RD);
|
|
PTE_BIT_FUNC(wrprotect, &= ~_PAGE_WR);
|
|
PTE_BIT_FUNC(mkwrite, |= _PAGE_WR);
|
|
PTE_BIT_FUNC(exprotect, &= ~_PAGE_USER);
|
|
PTE_BIT_FUNC(mkexec, |= _PAGE_USER);
|
|
PTE_BIT_FUNC(mkclean, &= ~_PAGE_DIRTY);
|
|
PTE_BIT_FUNC(mkdirty, |= _PAGE_DIRTY);
|
|
PTE_BIT_FUNC(mkold, &= ~_PAGE_ACCESSED);
|
|
PTE_BIT_FUNC(mkyoung, |= _PAGE_ACCESSED);
|
|
|
|
/*
|
|
* ZERO_PAGE is a global shared page that is always zero: used
|
|
* for zero-mapped memory areas etc..
|
|
*/
|
|
#define ZERO_PAGE(vaddr) (virt_to_page(0))
|
|
|
|
extern unsigned int kobjsize(const void *objp);
|
|
|
|
#define swapper_pg_dir ((pgd_t *) 0)
|
|
/*
|
|
* No page table caches to initialise.
|
|
*/
|
|
#define pgtable_cache_init() do { } while (0)
|
|
#define io_remap_pfn_range remap_pfn_range
|
|
|
|
/*
|
|
* All 32bit addresses are effectively valid for vmalloc...
|
|
* Sort of meaningless for non-VM targets.
|
|
*/
|
|
#define VMALLOC_START 0
|
|
#define VMALLOC_END 0xffffffff
|
|
|
|
/* provide a special get_unmapped_area for framebuffer mmaps of nommu */
|
|
extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
|
|
unsigned long, unsigned long,
|
|
unsigned long);
|
|
#define HAVE_ARCH_FB_UNMAPPED_AREA
|
|
|
|
#include <asm-generic/pgtable.h>
|
|
|
|
#endif /* _BLACKFIN_PGTABLE_H */
|