mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
mm: remove checks for pte_index
Since pte_index is always defined, we don't need to check whether it's defined or not. Delete the slow version that doesn't depend on it and remove the #define since nobody needs to test for it. Link: https://lkml.kernel.org/r/20230819031837.3160096-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Christian Dietrich <stettberger@dokucode.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
14a405c3a9
commit
bb7dbaafff
@ -63,7 +63,6 @@ static inline unsigned long pte_index(unsigned long address)
|
||||
{
|
||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
}
|
||||
#define pte_index pte_index
|
||||
|
||||
#ifndef pmd_index
|
||||
static inline unsigned long pmd_index(unsigned long address)
|
||||
|
17
mm/memory.c
17
mm/memory.c
@ -1869,7 +1869,6 @@ out:
|
||||
return retval;
|
||||
}
|
||||
|
||||
#ifdef pte_index
|
||||
static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
|
||||
unsigned long addr, struct page *page, pgprot_t prot)
|
||||
{
|
||||
@ -1884,7 +1883,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
|
||||
}
|
||||
|
||||
/* insert_pages() amortizes the cost of spinlock operations
|
||||
* when inserting pages in a loop. Arch *must* define pte_index.
|
||||
* when inserting pages in a loop.
|
||||
*/
|
||||
static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
|
||||
struct page **pages, unsigned long *num, pgprot_t prot)
|
||||
@ -1943,7 +1942,6 @@ out:
|
||||
*num = remaining_pages_total;
|
||||
return ret;
|
||||
}
|
||||
#endif /* ifdef pte_index */
|
||||
|
||||
/**
|
||||
* vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
|
||||
@ -1963,7 +1961,6 @@ out:
|
||||
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
|
||||
struct page **pages, unsigned long *num)
|
||||
{
|
||||
#ifdef pte_index
|
||||
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
|
||||
|
||||
if (addr < vma->vm_start || end_addr >= vma->vm_end)
|
||||
@ -1975,18 +1972,6 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
|
||||
}
|
||||
/* Defer page refcount checking till we're about to map that page. */
|
||||
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
|
||||
#else
|
||||
unsigned long idx = 0, pgcount = *num;
|
||||
int err = -EINVAL;
|
||||
|
||||
for (; idx < pgcount; ++idx) {
|
||||
err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
*num = pgcount - idx;
|
||||
return err;
|
||||
#endif /* ifdef pte_index */
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_pages);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user