forked from Minki/linux
x86: Use new cache mode type in asm/pgtable.h
Instead of directly using the cache mode bits in the pte switch to using the cache mode type. This requires changing some callers of is_new_memtype_allowed() to be changed as well. Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: stefan.bader@canonical.com Cc: xen-devel@lists.xensource.com Cc: konrad.wilk@oracle.com Cc: ville.syrjala@linux.intel.com Cc: david.vrabel@citrix.com Cc: jbeulich@suse.com Cc: toshi.kani@hp.com Cc: plagnioj@jcrosoft.com Cc: tomi.valkeinen@ti.com Cc: bhelgaas@google.com Link: http://lkml.kernel.org/r/1415019724-4317-8-git-send-email-jgross@suse.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
2df58b6d35
commit
d85f33342a
@ -9,9 +9,10 @@
|
|||||||
/*
|
/*
|
||||||
* Macro to mark a page protection value as UC-
|
* Macro to mark a page protection value as UC-
|
||||||
*/
|
*/
|
||||||
#define pgprot_noncached(prot) \
|
#define pgprot_noncached(prot) \
|
||||||
((boot_cpu_data.x86 > 3) \
|
((boot_cpu_data.x86 > 3) \
|
||||||
? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
|
? (__pgprot(pgprot_val(prot) | \
|
||||||
|
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
|
||||||
: (prot))
|
: (prot))
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
@ -404,8 +405,8 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
|||||||
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
|
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
|
||||||
|
|
||||||
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
|
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
|
||||||
unsigned long flags,
|
enum page_cache_mode pcm,
|
||||||
unsigned long new_flags)
|
enum page_cache_mode new_pcm)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* PAT type is always WB for untracked ranges, so no need to check.
|
* PAT type is always WB for untracked ranges, so no need to check.
|
||||||
@ -419,10 +420,10 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
|
|||||||
* - request is uncached, return cannot be write-back
|
* - request is uncached, return cannot be write-back
|
||||||
* - request is write-combine, return cannot be write-back
|
* - request is write-combine, return cannot be write-back
|
||||||
*/
|
*/
|
||||||
if ((flags == _PAGE_CACHE_UC_MINUS &&
|
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
|
||||||
new_flags == _PAGE_CACHE_WB) ||
|
new_pcm == _PAGE_CACHE_MODE_WB) ||
|
||||||
(flags == _PAGE_CACHE_WC &&
|
(pcm == _PAGE_CACHE_MODE_WC &&
|
||||||
new_flags == _PAGE_CACHE_WB)) {
|
new_pcm == _PAGE_CACHE_MODE_WB)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +142,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|||||||
|
|
||||||
if (prot_val != new_prot_val) {
|
if (prot_val != new_prot_val) {
|
||||||
if (!is_new_memtype_allowed(phys_addr, size,
|
if (!is_new_memtype_allowed(phys_addr, size,
|
||||||
prot_val, new_prot_val)) {
|
pgprot2cachemode(__pgprot(prot_val)),
|
||||||
|
pgprot2cachemode(__pgprot(new_prot_val)))) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
|
"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
|
||||||
(unsigned long long)phys_addr,
|
(unsigned long long)phys_addr,
|
||||||
|
@ -455,7 +455,9 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
if (!is_new_memtype_allowed(start, size, req_type, new_type))
|
if (!is_new_memtype_allowed(start, size,
|
||||||
|
pgprot2cachemode(__pgprot(req_type)),
|
||||||
|
pgprot2cachemode(__pgprot(new_type))))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
if (kernel_map_sync_memtype(start, size, new_type) < 0)
|
if (kernel_map_sync_memtype(start, size, new_type) < 0)
|
||||||
@ -630,7 +632,9 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|||||||
|
|
||||||
if (flags != want_flags) {
|
if (flags != want_flags) {
|
||||||
if (strict_prot ||
|
if (strict_prot ||
|
||||||
!is_new_memtype_allowed(paddr, size, want_flags, flags)) {
|
!is_new_memtype_allowed(paddr, size,
|
||||||
|
pgprot2cachemode(__pgprot(want_flags)),
|
||||||
|
pgprot2cachemode(__pgprot(flags)))) {
|
||||||
free_memtype(paddr, paddr + size);
|
free_memtype(paddr, paddr + size);
|
||||||
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
|
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
|
||||||
" for [mem %#010Lx-%#010Lx], got %s\n",
|
" for [mem %#010Lx-%#010Lx], got %s\n",
|
||||||
|
Loading…
Reference in New Issue
Block a user