10 hotfixes, 7 of which are cc:stable. 7 are MM, 3 are not. All

singletons.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZzP1ZAAKCRDdBJ7gKXxA
 jmBUAP9n2zTKoNeF/WpS0aSg+SpG78mtyMIwSUW2PPfGObYTBwD/bncG9U3fnno1
 v6Sey0OjAKwGdV+gTd+5ymWJKPSQbgA=
 =HxTA
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2024-11-12-16-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "10 hotfixes, 7 of which are cc:stable. 7 are MM, 3 are not. All
  singletons"

* tag 'mm-hotfixes-stable-2024-11-12-16-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: swapfile: fix cluster reclaim work crash on rotational devices
  selftests: hugetlb_dio: fixup check for initial conditions to skip in the start
  mm/thp: fix deferred split queue not partially_mapped: fix
  mm/gup: avoid an unnecessary allocation call for FOLL_LONGTERM cases
  nommu: pass NULL argument to vma_iter_prealloc()
  ocfs2: fix UBSAN warning in ocfs2_verify_volume()
  nilfs2: fix null-ptr-deref in block_dirty_buffer tracepoint
  nilfs2: fix null-ptr-deref in block_touch_buffer tracepoint
  mm: page_alloc: move mlocked flag clearance into free_pages_prepare()
  mm: count zeromap read and set for swapout and swapin
This commit is contained in:
Linus Torvalds 2024-11-13 08:58:11 -08:00
commit 4b49c0ba4e
19 changed files with 158 additions and 75 deletions

View File

@ -1599,6 +1599,15 @@ The following nested keys are defined.
pglazyfreed (npn) pglazyfreed (npn)
Amount of reclaimed lazyfree pages Amount of reclaimed lazyfree pages
swpin_zero
Number of pages swapped into memory and filled with zero, where I/O
was optimized out because the page content was detected to be zero
during swapout.
swpout_zero
Number of zero-filled pages swapped out with I/O skipped due to the
content being detected as zero.
zswpin zswpin
Number of pages moved in to memory from zswap. Number of pages moved in to memory from zswap.

View File

@ -68,7 +68,6 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
goto failed; goto failed;
} }
memset(bh->b_data, 0, i_blocksize(inode)); memset(bh->b_data, 0, i_blocksize(inode));
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = blocknr; bh->b_blocknr = blocknr;
set_buffer_mapped(bh); set_buffer_mapped(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
@ -133,7 +132,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
goto found; goto found;
} }
set_buffer_mapped(bh); set_buffer_mapped(bh);
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = pblocknr; /* set block address for read */ bh->b_blocknr = pblocknr; /* set block address for read */
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
get_bh(bh); get_bh(bh);

View File

@ -83,10 +83,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
goto out; goto out;
} }
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh))
bh->b_bdev = inode->i_sb->s_bdev;
set_buffer_mapped(bh); set_buffer_mapped(bh);
}
bh->b_blocknr = pbn; bh->b_blocknr = pbn;
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
get_bh(bh); get_bh(bh);

View File

@ -89,7 +89,6 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
goto failed_bh; goto failed_bh;
bh->b_bdev = sb->s_bdev;
err = nilfs_mdt_insert_new_block(inode, block, bh, init_block); err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
if (likely(!err)) { if (likely(!err)) {
get_bh(bh); get_bh(bh);

View File

@ -39,7 +39,6 @@ static struct buffer_head *__nilfs_get_folio_block(struct folio *folio,
first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
bh = get_nth_bh(bh, block - first_block); bh = get_nth_bh(bh, block - first_block);
touch_buffer(bh);
wait_on_buffer(bh); wait_on_buffer(bh);
return bh; return bh;
} }
@ -64,6 +63,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
folio_put(folio); folio_put(folio);
return NULL; return NULL;
} }
bh->b_bdev = inode->i_sb->s_bdev;
return bh; return bh;
} }

View File

@ -2319,6 +2319,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
struct ocfs2_blockcheck_stats *stats) struct ocfs2_blockcheck_stats *stats)
{ {
int status = -EAGAIN; int status = -EAGAIN;
u32 blksz_bits;
if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
@ -2333,11 +2334,15 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
goto out; goto out;
} }
status = -EINVAL; status = -EINVAL;
if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) { /* Acceptable block sizes are 512 bytes, 1K, 2K and 4K. */
blksz_bits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
if (blksz_bits < 9 || blksz_bits > 12) {
mlog(ML_ERROR, "found superblock with incorrect block " mlog(ML_ERROR, "found superblock with incorrect block "
"size: found %u, should be %u\n", "size bits: found %u, should be 9, 10, 11, or 12\n",
1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits), blksz_bits);
blksz); } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) {
mlog(ML_ERROR, "found superblock with incorrect block "
"size: found %u, should be %u\n", 1 << blksz_bits, blksz);
} else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) != } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
OCFS2_MAJOR_REV_LEVEL || OCFS2_MAJOR_REV_LEVEL ||
le16_to_cpu(di->id2.i_super.s_minor_rev_level) != le16_to_cpu(di->id2.i_super.s_minor_rev_level) !=

View File

@ -1760,8 +1760,9 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
static inline void count_objcg_event(struct obj_cgroup *objcg, static inline void count_objcg_events(struct obj_cgroup *objcg,
enum vm_event_item idx) enum vm_event_item idx,
unsigned long count)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
@ -1770,7 +1771,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_lock(); rcu_read_lock();
memcg = obj_cgroup_memcg(objcg); memcg = obj_cgroup_memcg(objcg);
count_memcg_events(memcg, idx, 1); count_memcg_events(memcg, idx, count);
rcu_read_unlock(); rcu_read_unlock();
} }
@ -1825,8 +1826,9 @@ static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
return NULL; return NULL;
} }
static inline void count_objcg_event(struct obj_cgroup *objcg, static inline void count_objcg_events(struct obj_cgroup *objcg,
enum vm_event_item idx) enum vm_event_item idx,
unsigned long count)
{ {
} }

View File

@ -134,6 +134,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
SWAP_RA, SWAP_RA,
SWAP_RA_HIT, SWAP_RA_HIT,
SWPIN_ZERO,
SWPOUT_ZERO,
#ifdef CONFIG_KSM #ifdef CONFIG_KSM
KSM_SWPIN_COPY, KSM_SWPIN_COPY,
#endif #endif

116
mm/gup.c
View File

@ -2273,20 +2273,57 @@ struct page *get_dump_page(unsigned long addr)
#endif /* CONFIG_ELF_CORE */ #endif /* CONFIG_ELF_CORE */
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
/*
* An array of either pages or folios ("pofs"). Although it may seem tempting to
* avoid this complication, by simply interpreting a list of folios as a list of
* pages, that approach won't work in the longer term, because eventually the
* layouts of struct page and struct folio will become completely different.
* Furthermore, this pof approach avoids excessive page_folio() calls.
*/
struct pages_or_folios {
union {
struct page **pages;
struct folio **folios;
void **entries;
};
bool has_folios;
long nr_entries;
};
static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i)
{
if (pofs->has_folios)
return pofs->folios[i];
return page_folio(pofs->pages[i]);
}
static void pofs_clear_entry(struct pages_or_folios *pofs, long i)
{
pofs->entries[i] = NULL;
}
static void pofs_unpin(struct pages_or_folios *pofs)
{
if (pofs->has_folios)
unpin_folios(pofs->folios, pofs->nr_entries);
else
unpin_user_pages(pofs->pages, pofs->nr_entries);
}
/* /*
* Returns the number of collected folios. Return value is always >= 0. * Returns the number of collected folios. Return value is always >= 0.
*/ */
static unsigned long collect_longterm_unpinnable_folios( static unsigned long collect_longterm_unpinnable_folios(
struct list_head *movable_folio_list, struct list_head *movable_folio_list,
unsigned long nr_folios, struct pages_or_folios *pofs)
struct folio **folios)
{ {
unsigned long i, collected = 0; unsigned long i, collected = 0;
struct folio *prev_folio = NULL; struct folio *prev_folio = NULL;
bool drain_allow = true; bool drain_allow = true;
for (i = 0; i < nr_folios; i++) { for (i = 0; i < pofs->nr_entries; i++) {
struct folio *folio = folios[i]; struct folio *folio = pofs_get_folio(pofs, i);
if (folio == prev_folio) if (folio == prev_folio)
continue; continue;
@ -2327,16 +2364,15 @@ static unsigned long collect_longterm_unpinnable_folios(
* Returns -EAGAIN if all folios were successfully migrated or -errno for * Returns -EAGAIN if all folios were successfully migrated or -errno for
* failure (or partial success). * failure (or partial success).
*/ */
static int migrate_longterm_unpinnable_folios( static int
struct list_head *movable_folio_list, migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list,
unsigned long nr_folios, struct pages_or_folios *pofs)
struct folio **folios)
{ {
int ret; int ret;
unsigned long i; unsigned long i;
for (i = 0; i < nr_folios; i++) { for (i = 0; i < pofs->nr_entries; i++) {
struct folio *folio = folios[i]; struct folio *folio = pofs_get_folio(pofs, i);
if (folio_is_device_coherent(folio)) { if (folio_is_device_coherent(folio)) {
/* /*
@ -2344,7 +2380,7 @@ static int migrate_longterm_unpinnable_folios(
* convert the pin on the source folio to a normal * convert the pin on the source folio to a normal
* reference. * reference.
*/ */
folios[i] = NULL; pofs_clear_entry(pofs, i);
folio_get(folio); folio_get(folio);
gup_put_folio(folio, 1, FOLL_PIN); gup_put_folio(folio, 1, FOLL_PIN);
@ -2363,8 +2399,8 @@ static int migrate_longterm_unpinnable_folios(
* calling folio_isolate_lru() which takes a reference so the * calling folio_isolate_lru() which takes a reference so the
* folio won't be freed if it's migrating. * folio won't be freed if it's migrating.
*/ */
unpin_folio(folios[i]); unpin_folio(folio);
folios[i] = NULL; pofs_clear_entry(pofs, i);
} }
if (!list_empty(movable_folio_list)) { if (!list_empty(movable_folio_list)) {
@ -2387,12 +2423,26 @@ static int migrate_longterm_unpinnable_folios(
return -EAGAIN; return -EAGAIN;
err: err:
unpin_folios(folios, nr_folios); pofs_unpin(pofs);
putback_movable_pages(movable_folio_list); putback_movable_pages(movable_folio_list);
return ret; return ret;
} }
static long
check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
{
LIST_HEAD(movable_folio_list);
unsigned long collected;
collected = collect_longterm_unpinnable_folios(&movable_folio_list,
pofs);
if (!collected)
return 0;
return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
}
/* /*
* Check whether all folios are *allowed* to be pinned indefinitely (long term). * Check whether all folios are *allowed* to be pinned indefinitely (long term).
* Rather confusingly, all folios in the range are required to be pinned via * Rather confusingly, all folios in the range are required to be pinned via
@ -2417,16 +2467,13 @@ err:
static long check_and_migrate_movable_folios(unsigned long nr_folios, static long check_and_migrate_movable_folios(unsigned long nr_folios,
struct folio **folios) struct folio **folios)
{ {
unsigned long collected; struct pages_or_folios pofs = {
LIST_HEAD(movable_folio_list); .folios = folios,
.has_folios = true,
.nr_entries = nr_folios,
};
collected = collect_longterm_unpinnable_folios(&movable_folio_list, return check_and_migrate_movable_pages_or_folios(&pofs);
nr_folios, folios);
if (!collected)
return 0;
return migrate_longterm_unpinnable_folios(&movable_folio_list,
nr_folios, folios);
} }
/* /*
@ -2436,22 +2483,13 @@ static long check_and_migrate_movable_folios(unsigned long nr_folios,
static long check_and_migrate_movable_pages(unsigned long nr_pages, static long check_and_migrate_movable_pages(unsigned long nr_pages,
struct page **pages) struct page **pages)
{ {
struct folio **folios; struct pages_or_folios pofs = {
long i, ret; .pages = pages,
.has_folios = false,
.nr_entries = nr_pages,
};
folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL); return check_and_migrate_movable_pages_or_folios(&pofs);
if (!folios) {
unpin_user_pages(pages, nr_pages);
return -ENOMEM;
}
for (i = 0; i < nr_pages; i++)
folios[i] = page_folio(pages[i]);
ret = check_and_migrate_movable_folios(nr_pages, folios);
kfree(folios);
return ret;
} }
#else #else
static long check_and_migrate_movable_pages(unsigned long nr_pages, static long check_and_migrate_movable_pages(unsigned long nr_pages,

View File

@ -3790,7 +3790,9 @@ next:
* in the case it was underused, then consider it used and * in the case it was underused, then consider it used and
* don't add it back to split_queue. * don't add it back to split_queue.
*/ */
if (!did_split && !folio_test_partially_mapped(folio)) { if (did_split) {
; /* folio already removed from list */
} else if (!folio_test_partially_mapped(folio)) {
list_del_init(&folio->_deferred_list); list_del_init(&folio->_deferred_list);
removed++; removed++;
} else { } else {

View File

@ -431,6 +431,10 @@ static const unsigned int memcg_vm_event_stat[] = {
PGDEACTIVATE, PGDEACTIVATE,
PGLAZYFREE, PGLAZYFREE,
PGLAZYFREED, PGLAZYFREED,
#ifdef CONFIG_SWAP
SWPIN_ZERO,
SWPOUT_ZERO,
#endif
#ifdef CONFIG_ZSWAP #ifdef CONFIG_ZSWAP
ZSWPIN, ZSWPIN,
ZSWPOUT, ZSWPOUT,

View File

@ -573,7 +573,7 @@ static int delete_vma_from_mm(struct vm_area_struct *vma)
VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
vma_iter_config(&vmi, vma->vm_start, vma->vm_end); vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
if (vma_iter_prealloc(&vmi, vma)) { if (vma_iter_prealloc(&vmi, NULL)) {
pr_warn("Allocation of vma tree for process %d failed\n", pr_warn("Allocation of vma tree for process %d failed\n",
current->pid); current->pid);
return -ENOMEM; return -ENOMEM;

View File

@ -1048,6 +1048,7 @@ __always_inline bool free_pages_prepare(struct page *page,
bool skip_kasan_poison = should_skip_kasan_poison(page); bool skip_kasan_poison = should_skip_kasan_poison(page);
bool init = want_init_on_free(); bool init = want_init_on_free();
bool compound = PageCompound(page); bool compound = PageCompound(page);
struct folio *folio = page_folio(page);
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageTail(page), page);
@ -1057,6 +1058,20 @@ __always_inline bool free_pages_prepare(struct page *page,
if (memcg_kmem_online() && PageMemcgKmem(page)) if (memcg_kmem_online() && PageMemcgKmem(page))
__memcg_kmem_uncharge_page(page, order); __memcg_kmem_uncharge_page(page, order);
/*
* In rare cases, when truncation or holepunching raced with
* munlock after VM_LOCKED was cleared, Mlocked may still be
* found set here. This does not indicate a problem, unless
* "unevictable_pgs_cleared" appears worryingly large.
*/
if (unlikely(folio_test_mlocked(folio))) {
long nr_pages = folio_nr_pages(folio);
__folio_clear_mlocked(folio);
zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
}
if (unlikely(PageHWPoison(page)) && !order) { if (unlikely(PageHWPoison(page)) && !order) {
/* Do not let hwpoison pages hit pcplists/buddy */ /* Do not let hwpoison pages hit pcplists/buddy */
reset_page_owner(page, order); reset_page_owner(page, order);

View File

@ -204,7 +204,9 @@ static bool is_folio_zero_filled(struct folio *folio)
static void swap_zeromap_folio_set(struct folio *folio) static void swap_zeromap_folio_set(struct folio *folio)
{ {
struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio);
struct swap_info_struct *sis = swp_swap_info(folio->swap); struct swap_info_struct *sis = swp_swap_info(folio->swap);
int nr_pages = folio_nr_pages(folio);
swp_entry_t entry; swp_entry_t entry;
unsigned int i; unsigned int i;
@ -212,6 +214,12 @@ static void swap_zeromap_folio_set(struct folio *folio)
entry = page_swap_entry(folio_page(folio, i)); entry = page_swap_entry(folio_page(folio, i));
set_bit(swp_offset(entry), sis->zeromap); set_bit(swp_offset(entry), sis->zeromap);
} }
count_vm_events(SWPOUT_ZERO, nr_pages);
if (objcg) {
count_objcg_events(objcg, SWPOUT_ZERO, nr_pages);
obj_cgroup_put(objcg);
}
} }
static void swap_zeromap_folio_clear(struct folio *folio) static void swap_zeromap_folio_clear(struct folio *folio)
@ -503,6 +511,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
static bool swap_read_folio_zeromap(struct folio *folio) static bool swap_read_folio_zeromap(struct folio *folio)
{ {
int nr_pages = folio_nr_pages(folio); int nr_pages = folio_nr_pages(folio);
struct obj_cgroup *objcg;
bool is_zeromap; bool is_zeromap;
/* /*
@ -517,6 +526,13 @@ static bool swap_read_folio_zeromap(struct folio *folio)
if (!is_zeromap) if (!is_zeromap)
return false; return false;
objcg = get_obj_cgroup_from_folio(folio);
count_vm_events(SWPIN_ZERO, nr_pages);
if (objcg) {
count_objcg_events(objcg, SWPIN_ZERO, nr_pages);
obj_cgroup_put(objcg);
}
folio_zero_range(folio, 0, folio_size(folio)); folio_zero_range(folio, 0, folio_size(folio));
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
return true; return true;

View File

@ -78,20 +78,6 @@ static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
lruvec_del_folio(*lruvecp, folio); lruvec_del_folio(*lruvecp, folio);
__folio_clear_lru_flags(folio); __folio_clear_lru_flags(folio);
} }
/*
* In rare cases, when truncation or holepunching raced with
* munlock after VM_LOCKED was cleared, Mlocked may still be
* found set here. This does not indicate a problem, unless
* "unevictable_pgs_cleared" appears worryingly large.
*/
if (unlikely(folio_test_mlocked(folio))) {
long nr_pages = folio_nr_pages(folio);
__folio_clear_mlocked(folio);
zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
}
} }
/* /*

View File

@ -929,7 +929,7 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
si->highest_bit = 0; si->highest_bit = 0;
del_from_avail_list(si); del_from_avail_list(si);
if (vm_swap_full()) if (si->cluster_info && vm_swap_full())
schedule_work(&si->reclaim_work); schedule_work(&si->reclaim_work);
} }
} }

View File

@ -1415,6 +1415,8 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
"swap_ra", "swap_ra",
"swap_ra_hit", "swap_ra_hit",
"swpin_zero",
"swpout_zero",
#ifdef CONFIG_KSM #ifdef CONFIG_KSM
"ksm_swpin_copy", "ksm_swpin_copy",
#endif #endif

View File

@ -1053,7 +1053,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
count_vm_event(ZSWPWB); count_vm_event(ZSWPWB);
if (entry->objcg) if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPWB); count_objcg_events(entry->objcg, ZSWPWB, 1);
zswap_entry_free(entry); zswap_entry_free(entry);
@ -1483,7 +1483,7 @@ bool zswap_store(struct folio *folio)
if (objcg) { if (objcg) {
obj_cgroup_charge_zswap(objcg, entry->length); obj_cgroup_charge_zswap(objcg, entry->length);
count_objcg_event(objcg, ZSWPOUT); count_objcg_events(objcg, ZSWPOUT, 1);
} }
/* /*
@ -1577,7 +1577,7 @@ bool zswap_load(struct folio *folio)
count_vm_event(ZSWPIN); count_vm_event(ZSWPIN);
if (entry->objcg) if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN); count_objcg_events(entry->objcg, ZSWPIN, 1);
if (swapcache) { if (swapcache) {
zswap_entry_free(entry); zswap_entry_free(entry);

View File

@ -44,6 +44,13 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
if (fd < 0) if (fd < 0)
ksft_exit_fail_perror("Error opening file\n"); ksft_exit_fail_perror("Error opening file\n");
/* Get the free huge pages before allocation */
free_hpage_b = get_free_hugepages();
if (free_hpage_b == 0) {
close(fd);
ksft_exit_skip("No free hugepage, exiting!\n");
}
/* Allocate a hugetlb page */ /* Allocate a hugetlb page */
orig_buffer = mmap(NULL, h_pagesize, mmap_prot, mmap_flags, -1, 0); orig_buffer = mmap(NULL, h_pagesize, mmap_prot, mmap_flags, -1, 0);
if (orig_buffer == MAP_FAILED) { if (orig_buffer == MAP_FAILED) {