forked from Minki/linux
x86/mm/pat: Drop the rbt_ prefix from external memtype calls
Drop the rbt_memtype_*() call rbt_ prefix, as we no longer use an rbtree directly. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lkml.kernel.org/r/20191121011601.20611-4-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6a9930b1c5
commit
511aaca834
@ -603,7 +603,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
|
||||
err = rbt_memtype_check_insert(new, new_type);
|
||||
err = memtype_check_insert(new, new_type);
|
||||
if (err) {
|
||||
pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
|
||||
start, end - 1,
|
||||
@ -650,7 +650,7 @@ int free_memtype(u64 start, u64 end)
|
||||
}
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
entry = rbt_memtype_erase(start, end);
|
||||
entry = memtype_erase(start, end);
|
||||
spin_unlock(&memtype_lock);
|
||||
|
||||
if (IS_ERR(entry)) {
|
||||
@ -693,7 +693,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
|
||||
entry = rbt_memtype_lookup(paddr);
|
||||
entry = memtype_lookup(paddr);
|
||||
if (entry != NULL)
|
||||
rettype = entry->type;
|
||||
else
|
||||
@ -1109,7 +1109,7 @@ static struct memtype *memtype_get_idx(loff_t pos)
|
||||
return NULL;
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
ret = rbt_memtype_copy_nth_element(print_entry, pos);
|
||||
ret = memtype_copy_nth_element(print_entry, pos);
|
||||
spin_unlock(&memtype_lock);
|
||||
|
||||
if (!ret) {
|
||||
|
@ -29,20 +29,20 @@ static inline char *cattr_name(enum page_cache_mode pcm)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
extern int rbt_memtype_check_insert(struct memtype *new,
|
||||
enum page_cache_mode *new_type);
|
||||
extern struct memtype *rbt_memtype_erase(u64 start, u64 end);
|
||||
extern struct memtype *rbt_memtype_lookup(u64 addr);
|
||||
extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
|
||||
extern int memtype_check_insert(struct memtype *new,
|
||||
enum page_cache_mode *new_type);
|
||||
extern struct memtype *memtype_erase(u64 start, u64 end);
|
||||
extern struct memtype *memtype_lookup(u64 addr);
|
||||
extern int memtype_copy_nth_element(struct memtype *out, loff_t pos);
|
||||
#else
|
||||
static inline int rbt_memtype_check_insert(struct memtype *new,
|
||||
enum page_cache_mode *new_type)
|
||||
static inline int memtype_check_insert(struct memtype *new,
|
||||
enum page_cache_mode *new_type)
|
||||
{ return 0; }
|
||||
static inline struct memtype *rbt_memtype_erase(u64 start, u64 end)
|
||||
static inline struct memtype *memtype_erase(u64 start, u64 end)
|
||||
{ return NULL; }
|
||||
static inline struct memtype *rbt_memtype_lookup(u64 addr)
|
||||
static inline struct memtype *memtype_lookup(u64 addr)
|
||||
{ return NULL; }
|
||||
static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
|
||||
static inline int memtype_copy_nth_element(struct memtype *out, loff_t pos)
|
||||
{ return 0; }
|
||||
#endif
|
||||
|
||||
|
@ -109,8 +109,8 @@ failure:
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
int rbt_memtype_check_insert(struct memtype *new,
|
||||
enum page_cache_mode *ret_type)
|
||||
int memtype_check_insert(struct memtype *new,
|
||||
enum page_cache_mode *ret_type)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@ -125,13 +125,13 @@ int rbt_memtype_check_insert(struct memtype *new,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct memtype *rbt_memtype_erase(u64 start, u64 end)
|
||||
struct memtype *memtype_erase(u64 start, u64 end)
|
||||
{
|
||||
struct memtype *data;
|
||||
|
||||
/*
|
||||
* Since the memtype_rbroot tree allows overlapping ranges,
|
||||
* rbt_memtype_erase() checks with EXACT_MATCH first, i.e. free
|
||||
* memtype_erase() checks with EXACT_MATCH first, i.e. free
|
||||
* a whole node for the munmap case. If no such entry is found,
|
||||
* it then checks with END_MATCH, i.e. shrink the size of a node
|
||||
* from the end for the mremap case.
|
||||
@ -157,14 +157,14 @@ struct memtype *rbt_memtype_erase(u64 start, u64 end)
|
||||
return data;
|
||||
}
|
||||
|
||||
struct memtype *rbt_memtype_lookup(u64 addr)
|
||||
struct memtype *memtype_lookup(u64 addr)
|
||||
{
|
||||
return memtype_interval_iter_first(&memtype_rbroot, addr,
|
||||
addr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
|
||||
int memtype_copy_nth_element(struct memtype *out, loff_t pos)
|
||||
{
|
||||
struct memtype *match;
|
||||
int i = 1;
|
||||
|
Loading…
Reference in New Issue
Block a user