mm/hugetlb: avoid calculating fault_mutex_hash in truncate_op case
The fault_mutex hashing overhead can be avoided in truncate_op case because page faults can not race with truncation in this routine. So calculate hash for fault_mutex only in !truncate_op case to save some cpu cycles. Link: https://lkml.kernel.org/r/20210308112809.26107-6-linmiaohe@huawei.com Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d83e6c8a9b
commit
d4241a049a
@ -482,10 +482,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
||||
|
||||
for (i = 0; i < pagevec_count(&pvec); ++i) {
|
||||
struct page *page = pvec.pages[i];
|
||||
u32 hash;
|
||||
u32 hash = 0;
|
||||
|
||||
index = page->index;
|
||||
hash = hugetlb_fault_mutex_hash(mapping, index);
|
||||
if (!truncate_op) {
|
||||
/*
|
||||
* Only need to hold the fault mutex in the
|
||||
@ -493,6 +492,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
||||
* page faults. Races are not possible in the
|
||||
* case of truncation.
|
||||
*/
|
||||
hash = hugetlb_fault_mutex_hash(mapping, index);
|
||||
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user