forked from Minki/linux
22 hotfixes. 8 are cc:stable and the remainder address issues which were
introduced post-6.0 or which aren't considered serious enough to justify a -stable backport. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY27xPAAKCRDdBJ7gKXxA juFXAP4tSmfNDrT6khFhV0l4cS43bluErVNLh32RfXBqse8GYgEA5EPvZkOssLqY 86ejRXFgAArxYC4caiNURUQL+IASvQo= =YVOx -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-11-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc hotfixes from Andrew Morton: "22 hotfixes. Eight are cc:stable and the remainder address issues which were introduced post-6.0 or which aren't considered serious enough to justify a -stable backport" * tag 'mm-hotfixes-stable-2022-11-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (22 commits) docs: kmsan: fix formatting of "Example report" mm/damon/dbgfs: check if rm_contexts input is for a real context maple_tree: don't set a new maximum on the node when not reusing nodes maple_tree: fix depth tracking in maple_state arch/x86/mm/hugetlbpage.c: pud_huge() returns 0 when using 2-level paging fs: fix leaked psi pressure state nilfs2: fix use-after-free bug of ns_writer on remount x86/traps: avoid KMSAN bugs originating from handle_bug() kmsan: make sure PREEMPT_RT is off Kconfig.debug: ensure early check for KMSAN in CONFIG_KMSAN_WARN x86/uaccess: instrument copy_from_user_nmi() kmsan: core: kmsan_in_runtime() should return true in NMI context mm: hugetlb_vmemmap: include missing linux/moduleparam.h mm/shmem: use page_mapping() to detect page cache for uffd continue mm/memremap.c: map FS_DAX device memory as decrypted Partly revert "mm/thp: carry over dirty bit when thp splits on pmd" nilfs2: fix deadlock in nilfs_count_free_blocks() mm/mmap: fix memory leak in mmap_region() hugetlbfs: don't delete error page from pagecache maple_tree: reorganize testing to restore module testing ...
This commit is contained in:
commit
d7c2b1f64e
@ -67,6 +67,7 @@ uninitialized in the local variable, as well as the stack where the value was
|
|||||||
copied to another memory location before use.
|
copied to another memory location before use.
|
||||||
|
|
||||||
A use of uninitialized value ``v`` is reported by KMSAN in the following cases:
|
A use of uninitialized value ``v`` is reported by KMSAN in the following cases:
|
||||||
|
|
||||||
- in a condition, e.g. ``if (v) { ... }``;
|
- in a condition, e.g. ``if (v) { ... }``;
|
||||||
- in an indexing or pointer dereferencing, e.g. ``array[v]`` or ``*v``;
|
- in an indexing or pointer dereferencing, e.g. ``array[v]`` or ``*v``;
|
||||||
- when it is copied to userspace or hardware, e.g. ``copy_to_user(..., &v, ...)``;
|
- when it is copied to userspace or hardware, e.g. ``copy_to_user(..., &v, ...)``;
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <linux/context_tracking.h>
|
#include <linux/context_tracking.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/kallsyms.h>
|
#include <linux/kallsyms.h>
|
||||||
|
#include <linux/kmsan.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
@ -301,6 +302,12 @@ static noinstr bool handle_bug(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
bool handled = false;
|
bool handled = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
|
||||||
|
* is a rare case that uses @regs without passing them to
|
||||||
|
* irqentry_enter().
|
||||||
|
*/
|
||||||
|
kmsan_unpoison_entry_regs(regs);
|
||||||
if (!is_valid_bugaddr(regs->ip))
|
if (!is_valid_bugaddr(regs->ip))
|
||||||
return handled;
|
return handled;
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <linux/instrumented.h>
|
||||||
|
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
@ -44,7 +45,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|||||||
* called from other contexts.
|
* called from other contexts.
|
||||||
*/
|
*/
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
instrument_copy_from_user_before(to, from, n);
|
||||||
ret = raw_copy_from_user(to, from, n);
|
ret = raw_copy_from_user(to, from, n);
|
||||||
|
instrument_copy_from_user_after(to, from, n, ret);
|
||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -37,8 +37,12 @@ int pmd_huge(pmd_t pmd)
|
|||||||
*/
|
*/
|
||||||
int pud_huge(pud_t pud)
|
int pud_huge(pud_t pud)
|
||||||
{
|
{
|
||||||
|
#if CONFIG_PGTABLE_LEVELS > 2
|
||||||
return !pud_none(pud) &&
|
return !pud_none(pud) &&
|
||||||
(pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
|
(pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
|
@ -512,7 +512,7 @@ static u64 bio_end_offset(struct bio *bio)
|
|||||||
static noinline int add_ra_bio_pages(struct inode *inode,
|
static noinline int add_ra_bio_pages(struct inode *inode,
|
||||||
u64 compressed_end,
|
u64 compressed_end,
|
||||||
struct compressed_bio *cb,
|
struct compressed_bio *cb,
|
||||||
unsigned long *pflags)
|
int *memstall, unsigned long *pflags)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||||
unsigned long end_index;
|
unsigned long end_index;
|
||||||
@ -581,8 +581,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PageWorkingset(page))
|
if (!*memstall && PageWorkingset(page)) {
|
||||||
psi_memstall_enter(pflags);
|
psi_memstall_enter(pflags);
|
||||||
|
*memstall = 1;
|
||||||
|
}
|
||||||
|
|
||||||
ret = set_page_extent_mapped(page);
|
ret = set_page_extent_mapped(page);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -670,8 +672,8 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||||||
u64 em_len;
|
u64 em_len;
|
||||||
u64 em_start;
|
u64 em_start;
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
/* Initialize to 1 to make skip psi_memstall_leave unless needed */
|
unsigned long pflags;
|
||||||
unsigned long pflags = 1;
|
int memstall = 0;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
int ret2;
|
int ret2;
|
||||||
int i;
|
int i;
|
||||||
@ -727,7 +729,7 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
add_ra_bio_pages(inode, em_start + em_len, cb, &pflags);
|
add_ra_bio_pages(inode, em_start + em_len, cb, &memstall, &pflags);
|
||||||
|
|
||||||
/* include any pages we added in add_ra-bio_pages */
|
/* include any pages we added in add_ra-bio_pages */
|
||||||
cb->len = bio->bi_iter.bi_size;
|
cb->len = bio->bi_iter.bi_size;
|
||||||
@ -807,7 +809,7 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pflags)
|
if (memstall)
|
||||||
psi_memstall_leave(&pflags);
|
psi_memstall_leave(&pflags);
|
||||||
|
|
||||||
if (refcount_dec_and_test(&cb->pending_ios))
|
if (refcount_dec_and_test(&cb->pending_ios))
|
||||||
|
@ -1412,8 +1412,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
|||||||
struct block_device *last_bdev;
|
struct block_device *last_bdev;
|
||||||
unsigned int nr_bios = 0;
|
unsigned int nr_bios = 0;
|
||||||
struct bio *bio = NULL;
|
struct bio *bio = NULL;
|
||||||
/* initialize to 1 to make skip psi_memstall_leave unless needed */
|
unsigned long pflags;
|
||||||
unsigned long pflags = 1;
|
int memstall = 0;
|
||||||
|
|
||||||
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
|
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
|
||||||
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
|
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
|
||||||
@ -1463,14 +1463,18 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
|||||||
if (bio && (cur != last_index + 1 ||
|
if (bio && (cur != last_index + 1 ||
|
||||||
last_bdev != mdev.m_bdev)) {
|
last_bdev != mdev.m_bdev)) {
|
||||||
submit_bio_retry:
|
submit_bio_retry:
|
||||||
if (!pflags)
|
|
||||||
psi_memstall_leave(&pflags);
|
|
||||||
submit_bio(bio);
|
submit_bio(bio);
|
||||||
|
if (memstall) {
|
||||||
|
psi_memstall_leave(&pflags);
|
||||||
|
memstall = 0;
|
||||||
|
}
|
||||||
bio = NULL;
|
bio = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(PageWorkingset(page)))
|
if (unlikely(PageWorkingset(page)) && !memstall) {
|
||||||
psi_memstall_enter(&pflags);
|
psi_memstall_enter(&pflags);
|
||||||
|
memstall = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (!bio) {
|
if (!bio) {
|
||||||
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
|
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
|
||||||
@ -1500,9 +1504,9 @@ submit_bio_retry:
|
|||||||
} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
|
} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
|
||||||
|
|
||||||
if (bio) {
|
if (bio) {
|
||||||
if (!pflags)
|
|
||||||
psi_memstall_leave(&pflags);
|
|
||||||
submit_bio(bio);
|
submit_bio(bio);
|
||||||
|
if (memstall)
|
||||||
|
psi_memstall_leave(&pflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -328,6 +328,12 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||||||
} else {
|
} else {
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
|
||||||
|
if (PageHWPoison(page)) {
|
||||||
|
put_page(page);
|
||||||
|
retval = -EIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have the page, copy it to user space buffer.
|
* We have the page, copy it to user space buffer.
|
||||||
*/
|
*/
|
||||||
@ -1111,13 +1117,6 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
|
|||||||
static int hugetlbfs_error_remove_page(struct address_space *mapping,
|
static int hugetlbfs_error_remove_page(struct address_space *mapping,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
struct inode *inode = mapping->host;
|
|
||||||
pgoff_t index = page->index;
|
|
||||||
|
|
||||||
hugetlb_delete_from_page_cache(page);
|
|
||||||
if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
|
|
||||||
hugetlb_fix_reserve_counts(inode);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +317,7 @@ void nilfs_relax_pressure_in_lock(struct super_block *sb)
|
|||||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||||
struct nilfs_sc_info *sci = nilfs->ns_writer;
|
struct nilfs_sc_info *sci = nilfs->ns_writer;
|
||||||
|
|
||||||
if (!sci || !sci->sc_flush_request)
|
if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
|
set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
|
||||||
@ -2242,7 +2242,7 @@ int nilfs_construct_segment(struct super_block *sb)
|
|||||||
struct nilfs_sc_info *sci = nilfs->ns_writer;
|
struct nilfs_sc_info *sci = nilfs->ns_writer;
|
||||||
struct nilfs_transaction_info *ti;
|
struct nilfs_transaction_info *ti;
|
||||||
|
|
||||||
if (!sci)
|
if (sb_rdonly(sb) || unlikely(!sci))
|
||||||
return -EROFS;
|
return -EROFS;
|
||||||
|
|
||||||
/* A call inside transactions causes a deadlock. */
|
/* A call inside transactions causes a deadlock. */
|
||||||
@ -2280,7 +2280,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
|
|||||||
struct nilfs_transaction_info ti;
|
struct nilfs_transaction_info ti;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (!sci)
|
if (sb_rdonly(sb) || unlikely(!sci))
|
||||||
return -EROFS;
|
return -EROFS;
|
||||||
|
|
||||||
nilfs_transaction_lock(sb, &ti, 0);
|
nilfs_transaction_lock(sb, &ti, 0);
|
||||||
@ -2776,11 +2776,12 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
|
|||||||
|
|
||||||
if (nilfs->ns_writer) {
|
if (nilfs->ns_writer) {
|
||||||
/*
|
/*
|
||||||
* This happens if the filesystem was remounted
|
* This happens if the filesystem is made read-only by
|
||||||
* read/write after nilfs_error degenerated it into a
|
* __nilfs_error or nilfs_remount and then remounted
|
||||||
* read-only mount.
|
* read/write. In these cases, reuse the existing
|
||||||
|
* writer.
|
||||||
*/
|
*/
|
||||||
nilfs_detach_log_writer(sb);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
nilfs->ns_writer = nilfs_segctor_new(sb, root);
|
nilfs->ns_writer = nilfs_segctor_new(sb, root);
|
||||||
|
@ -1133,8 +1133,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
|
|||||||
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
|
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
|
||||||
goto out;
|
goto out;
|
||||||
if (*flags & SB_RDONLY) {
|
if (*flags & SB_RDONLY) {
|
||||||
/* Shutting down log writer */
|
|
||||||
nilfs_detach_log_writer(sb);
|
|
||||||
sb->s_flags |= SB_RDONLY;
|
sb->s_flags |= SB_RDONLY;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -690,9 +690,7 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
|
|||||||
{
|
{
|
||||||
unsigned long ncleansegs;
|
unsigned long ncleansegs;
|
||||||
|
|
||||||
down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
|
|
||||||
ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
|
ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
|
||||||
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
|
|
||||||
*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
|
*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -638,6 +638,12 @@ static inline void mt_set_in_rcu(struct maple_tree *mt)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned int mt_height(const struct maple_tree *mt)
|
||||||
|
|
||||||
|
{
|
||||||
|
return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
|
||||||
|
}
|
||||||
|
|
||||||
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
|
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
|
||||||
void *mt_find_after(struct maple_tree *mt, unsigned long *index,
|
void *mt_find_after(struct maple_tree *mt, unsigned long *index,
|
||||||
unsigned long max);
|
unsigned long max);
|
||||||
@ -664,6 +670,7 @@ extern atomic_t maple_tree_tests_passed;
|
|||||||
|
|
||||||
void mt_dump(const struct maple_tree *mt);
|
void mt_dump(const struct maple_tree *mt);
|
||||||
void mt_validate(struct maple_tree *mt);
|
void mt_validate(struct maple_tree *mt);
|
||||||
|
void mt_cache_shrink(void);
|
||||||
#define MT_BUG_ON(__tree, __x) do { \
|
#define MT_BUG_ON(__tree, __x) do { \
|
||||||
atomic_inc(&maple_tree_tests_run); \
|
atomic_inc(&maple_tree_tests_run); \
|
||||||
if (__x) { \
|
if (__x) { \
|
||||||
|
@ -395,12 +395,12 @@ endif # DEBUG_INFO
|
|||||||
config FRAME_WARN
|
config FRAME_WARN
|
||||||
int "Warn for stack frames larger than"
|
int "Warn for stack frames larger than"
|
||||||
range 0 8192
|
range 0 8192
|
||||||
|
default 0 if KMSAN
|
||||||
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
|
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
|
||||||
default 2048 if PARISC
|
default 2048 if PARISC
|
||||||
default 1536 if (!64BIT && XTENSA)
|
default 1536 if (!64BIT && XTENSA)
|
||||||
default 1024 if !64BIT
|
default 1024 if !64BIT
|
||||||
default 2048 if 64BIT
|
default 2048 if 64BIT
|
||||||
default 0 if KMSAN
|
|
||||||
help
|
help
|
||||||
Tell the compiler to warn at build time for stack frames larger than this.
|
Tell the compiler to warn at build time for stack frames larger than this.
|
||||||
Setting this too low will cause a lot of warnings.
|
Setting this too low will cause a lot of warnings.
|
||||||
@ -2241,6 +2241,10 @@ config TEST_UUID
|
|||||||
config TEST_XARRAY
|
config TEST_XARRAY
|
||||||
tristate "Test the XArray code at runtime"
|
tristate "Test the XArray code at runtime"
|
||||||
|
|
||||||
|
config TEST_MAPLE_TREE
|
||||||
|
select DEBUG_MAPLE_TREE
|
||||||
|
tristate "Test the Maple Tree code at runtime"
|
||||||
|
|
||||||
config TEST_RHASHTABLE
|
config TEST_RHASHTABLE
|
||||||
tristate "Perform selftest on resizable hash table"
|
tristate "Perform selftest on resizable hash table"
|
||||||
help
|
help
|
||||||
|
@ -12,6 +12,7 @@ config KMSAN
|
|||||||
bool "KMSAN: detector of uninitialized values use"
|
bool "KMSAN: detector of uninitialized values use"
|
||||||
depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
|
depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
|
||||||
depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
|
depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
|
||||||
|
depends on !PREEMPT_RT
|
||||||
select STACKDEPOT
|
select STACKDEPOT
|
||||||
select STACKDEPOT_ALWAYS_INIT
|
select STACKDEPOT_ALWAYS_INIT
|
||||||
help
|
help
|
||||||
|
@ -85,6 +85,7 @@ obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
|
|||||||
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
|
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
|
||||||
obj-$(CONFIG_TEST_UUID) += test_uuid.o
|
obj-$(CONFIG_TEST_UUID) += test_uuid.o
|
||||||
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
|
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
|
||||||
|
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
|
||||||
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
|
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
|
||||||
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
|
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
|
||||||
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
|
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
|
||||||
|
@ -183,10 +183,6 @@ static void ma_free_rcu(struct maple_node *node)
|
|||||||
call_rcu(&node->rcu, mt_free_rcu);
|
call_rcu(&node->rcu, mt_free_rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int mt_height(const struct maple_tree *mt)
|
|
||||||
{
|
|
||||||
return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mas_set_height(struct ma_state *mas)
|
static void mas_set_height(struct ma_state *mas)
|
||||||
{
|
{
|
||||||
@ -1209,7 +1205,6 @@ done:
|
|||||||
static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
|
static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct maple_alloc *node;
|
struct maple_alloc *node;
|
||||||
struct maple_alloc **nodep = &mas->alloc;
|
|
||||||
unsigned long allocated = mas_allocated(mas);
|
unsigned long allocated = mas_allocated(mas);
|
||||||
unsigned long success = allocated;
|
unsigned long success = allocated;
|
||||||
unsigned int requested = mas_alloc_req(mas);
|
unsigned int requested = mas_alloc_req(mas);
|
||||||
@ -1263,8 +1258,7 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
|
|||||||
node->node_count--;
|
node->node_count--;
|
||||||
|
|
||||||
success += count;
|
success += count;
|
||||||
nodep = &node->slot[0];
|
node = node->slot[0];
|
||||||
node = *nodep;
|
|
||||||
requested -= count;
|
requested -= count;
|
||||||
}
|
}
|
||||||
mas->alloc->total = success;
|
mas->alloc->total = success;
|
||||||
@ -1357,6 +1351,7 @@ static inline struct maple_enode *mas_start(struct ma_state *mas)
|
|||||||
root = mas_root(mas);
|
root = mas_root(mas);
|
||||||
/* Tree with nodes */
|
/* Tree with nodes */
|
||||||
if (likely(xa_is_node(root))) {
|
if (likely(xa_is_node(root))) {
|
||||||
|
mas->depth = 1;
|
||||||
mas->node = mte_safe_root(root);
|
mas->node = mte_safe_root(root);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -3608,8 +3603,7 @@ static inline int mas_commit_b_node(struct ma_wr_state *wr_mas,
|
|||||||
node = mas_pop_node(wr_mas->mas);
|
node = mas_pop_node(wr_mas->mas);
|
||||||
node->parent = mas_mn(wr_mas->mas)->parent;
|
node->parent = mas_mn(wr_mas->mas)->parent;
|
||||||
wr_mas->mas->node = mt_mk_node(node, b_type);
|
wr_mas->mas->node = mt_mk_node(node, b_type);
|
||||||
mab_mas_cp(b_node, 0, b_end, wr_mas->mas, true);
|
mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
|
||||||
|
|
||||||
mas_replace(wr_mas->mas, false);
|
mas_replace(wr_mas->mas, false);
|
||||||
reuse_node:
|
reuse_node:
|
||||||
mas_update_gap(wr_mas->mas);
|
mas_update_gap(wr_mas->mas);
|
||||||
@ -3733,7 +3727,6 @@ static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
|
|||||||
|
|
||||||
static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
|
static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
|
||||||
{
|
{
|
||||||
wr_mas->mas->depth++;
|
|
||||||
wr_mas->type = mte_node_type(wr_mas->mas->node);
|
wr_mas->type = mte_node_type(wr_mas->mas->node);
|
||||||
mas_wr_node_walk(wr_mas);
|
mas_wr_node_walk(wr_mas);
|
||||||
wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
|
wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
|
||||||
@ -3745,6 +3738,7 @@ static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
|
|||||||
wr_mas->mas->min = wr_mas->r_min;
|
wr_mas->mas->min = wr_mas->r_min;
|
||||||
wr_mas->mas->node = wr_mas->content;
|
wr_mas->mas->node = wr_mas->content;
|
||||||
wr_mas->mas->offset = 0;
|
wr_mas->mas->offset = 0;
|
||||||
|
wr_mas->mas->depth++;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* mas_wr_walk() - Walk the tree for a write.
|
* mas_wr_walk() - Walk the tree for a write.
|
||||||
@ -4970,8 +4964,9 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
|
|||||||
{
|
{
|
||||||
enum maple_type type = mte_node_type(mas->node);
|
enum maple_type type = mte_node_type(mas->node);
|
||||||
unsigned long pivot, min, gap = 0;
|
unsigned long pivot, min, gap = 0;
|
||||||
unsigned char count, offset;
|
unsigned char offset;
|
||||||
unsigned long *gaps = NULL, *pivots = ma_pivots(mas_mn(mas), type);
|
unsigned long *gaps;
|
||||||
|
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
|
||||||
void __rcu **slots = ma_slots(mas_mn(mas), type);
|
void __rcu **slots = ma_slots(mas_mn(mas), type);
|
||||||
bool found = false;
|
bool found = false;
|
||||||
|
|
||||||
@ -4982,9 +4977,8 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
|
|||||||
|
|
||||||
gaps = ma_gaps(mte_to_node(mas->node), type);
|
gaps = ma_gaps(mte_to_node(mas->node), type);
|
||||||
offset = mas->offset;
|
offset = mas->offset;
|
||||||
count = mt_slots[type];
|
|
||||||
min = mas_safe_min(mas, pivots, offset);
|
min = mas_safe_min(mas, pivots, offset);
|
||||||
for (; offset < count; offset++) {
|
for (; offset < mt_slots[type]; offset++) {
|
||||||
pivot = mas_safe_pivot(mas, pivots, offset, type);
|
pivot = mas_safe_pivot(mas, pivots, offset, type);
|
||||||
if (offset && !pivot)
|
if (offset && !pivot)
|
||||||
break;
|
break;
|
||||||
@ -5010,8 +5004,6 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
|
|||||||
mas->min = min;
|
mas->min = min;
|
||||||
mas->max = pivot;
|
mas->max = pivot;
|
||||||
offset = 0;
|
offset = 0;
|
||||||
type = mte_node_type(mas->node);
|
|
||||||
count = mt_slots[type];
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5065,6 +5057,7 @@ retry:
|
|||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_walk);
|
||||||
|
|
||||||
static inline bool mas_rewind_node(struct ma_state *mas)
|
static inline bool mas_rewind_node(struct ma_state *mas)
|
||||||
{
|
{
|
||||||
@ -5276,6 +5269,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
|
|||||||
mas->last = mas->index + size - 1;
|
mas->last = mas->index + size - 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_empty_area);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mas_empty_area_rev() - Get the highest address within the range that is
|
* mas_empty_area_rev() - Get the highest address within the range that is
|
||||||
@ -5339,6 +5333,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
|
|||||||
mas->index = mas->last - size + 1;
|
mas->index = mas->last - size + 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_empty_area_rev);
|
||||||
|
|
||||||
static inline int mas_alloc(struct ma_state *mas, void *entry,
|
static inline int mas_alloc(struct ma_state *mas, void *entry,
|
||||||
unsigned long size, unsigned long *index)
|
unsigned long size, unsigned long *index)
|
||||||
@ -5660,6 +5655,7 @@ void *mas_store(struct ma_state *mas, void *entry)
|
|||||||
mas_wr_store_entry(&wr_mas);
|
mas_wr_store_entry(&wr_mas);
|
||||||
return wr_mas.content;
|
return wr_mas.content;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_store);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mas_store_gfp() - Store a value into the tree.
|
* mas_store_gfp() - Store a value into the tree.
|
||||||
@ -5686,6 +5682,7 @@ retry:
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_store_gfp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mas_store_prealloc() - Store a value into the tree using memory
|
* mas_store_prealloc() - Store a value into the tree using memory
|
||||||
@ -5703,6 +5700,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
|
|||||||
BUG_ON(mas_is_err(mas));
|
BUG_ON(mas_is_err(mas));
|
||||||
mas_destroy(mas);
|
mas_destroy(mas);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_store_prealloc);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mas_preallocate() - Preallocate enough nodes for a store operation
|
* mas_preallocate() - Preallocate enough nodes for a store operation
|
||||||
@ -5772,6 +5770,7 @@ void mas_destroy(struct ma_state *mas)
|
|||||||
}
|
}
|
||||||
mas->alloc = NULL;
|
mas->alloc = NULL;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_destroy);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mas_expected_entries() - Set the expected number of entries that will be inserted.
|
* mas_expected_entries() - Set the expected number of entries that will be inserted.
|
||||||
@ -5833,6 +5832,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_expected_entries);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mas_next() - Get the next entry.
|
* mas_next() - Get the next entry.
|
||||||
@ -6013,6 +6013,7 @@ void *mas_find(struct ma_state *mas, unsigned long max)
|
|||||||
/* Retries on dead nodes handled by mas_next_entry */
|
/* Retries on dead nodes handled by mas_next_entry */
|
||||||
return mas_next_entry(mas, max);
|
return mas_next_entry(mas, max);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mas_find);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mas_find_rev: On the first call, find the first non-null entry at or below
|
* mas_find_rev: On the first call, find the first non-null entry at or below
|
||||||
@ -6059,7 +6060,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min)
|
|||||||
/* Retries on dead nodes handled by mas_next_entry */
|
/* Retries on dead nodes handled by mas_next_entry */
|
||||||
return mas_prev_entry(mas, min);
|
return mas_prev_entry(mas, min);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mas_find);
|
EXPORT_SYMBOL_GPL(mas_find_rev);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mas_erase() - Find the range in which index resides and erase the entire
|
* mas_erase() - Find the range in which index resides and erase the entire
|
||||||
@ -6541,8 +6542,27 @@ static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
|
|||||||
mas_rewalk(mas, index);
|
mas_rewalk(mas, index);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
#endif /* not defined __KERNEL__ */
|
|
||||||
|
|
||||||
|
void mt_cache_shrink(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* mt_cache_shrink() - For testing, don't use this.
|
||||||
|
*
|
||||||
|
* Certain testcases can trigger an OOM when combined with other memory
|
||||||
|
* debugging configuration options. This function is used to reduce the
|
||||||
|
* possibility of an out of memory even due to kmem_cache objects remaining
|
||||||
|
* around for longer than usual.
|
||||||
|
*/
|
||||||
|
void mt_cache_shrink(void)
|
||||||
|
{
|
||||||
|
kmem_cache_shrink(maple_node_cache);
|
||||||
|
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mt_cache_shrink);
|
||||||
|
|
||||||
|
#endif /* not defined __KERNEL__ */
|
||||||
/*
|
/*
|
||||||
* mas_get_slot() - Get the entry in the maple state node stored at @offset.
|
* mas_get_slot() - Get the entry in the maple state node stored at @offset.
|
||||||
* @mas: The maple state
|
* @mas: The maple state
|
||||||
@ -6816,6 +6836,7 @@ void mt_dump(const struct maple_tree *mt)
|
|||||||
else if (entry)
|
else if (entry)
|
||||||
mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
|
mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mt_dump);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate the maximum gap in a node and check if that's what is reported in
|
* Calculate the maximum gap in a node and check if that's what is reported in
|
||||||
@ -7126,5 +7147,6 @@ done:
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mt_validate);
|
||||||
|
|
||||||
#endif /* CONFIG_DEBUG_MAPLE_TREE */
|
#endif /* CONFIG_DEBUG_MAPLE_TREE */
|
||||||
|
35930
lib/test_maple_tree.c
35930
lib/test_maple_tree.c
File diff suppressed because it is too large
Load Diff
@ -890,6 +890,7 @@ out:
|
|||||||
static int dbgfs_rm_context(char *name)
|
static int dbgfs_rm_context(char *name)
|
||||||
{
|
{
|
||||||
struct dentry *root, *dir, **new_dirs;
|
struct dentry *root, *dir, **new_dirs;
|
||||||
|
struct inode *inode;
|
||||||
struct damon_ctx **new_ctxs;
|
struct damon_ctx **new_ctxs;
|
||||||
int i, j;
|
int i, j;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -905,6 +906,12 @@ static int dbgfs_rm_context(char *name)
|
|||||||
if (!dir)
|
if (!dir)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
inode = d_inode(dir);
|
||||||
|
if (!S_ISDIR(inode->i_mode)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_dput;
|
||||||
|
}
|
||||||
|
|
||||||
new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
|
new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!new_dirs) {
|
if (!new_dirs) {
|
||||||
|
@ -2206,9 +2206,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
entry = pte_wrprotect(entry);
|
entry = pte_wrprotect(entry);
|
||||||
if (!young)
|
if (!young)
|
||||||
entry = pte_mkold(entry);
|
entry = pte_mkold(entry);
|
||||||
/* NOTE: this may set soft-dirty too on some archs */
|
/*
|
||||||
if (dirty)
|
* NOTE: we don't do pte_mkdirty when dirty==true
|
||||||
entry = pte_mkdirty(entry);
|
* because it breaks sparc64 which can sigsegv
|
||||||
|
* random process. Need to revisit when we figure
|
||||||
|
* out what is special with sparc64.
|
||||||
|
*/
|
||||||
if (soft_dirty)
|
if (soft_dirty)
|
||||||
entry = pte_mksoft_dirty(entry);
|
entry = pte_mksoft_dirty(entry);
|
||||||
if (uffd_wp)
|
if (uffd_wp)
|
||||||
|
@ -6111,6 +6111,10 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|||||||
|
|
||||||
ptl = huge_pte_lock(h, dst_mm, dst_pte);
|
ptl = huge_pte_lock(h, dst_mm, dst_pte);
|
||||||
|
|
||||||
|
ret = -EIO;
|
||||||
|
if (PageHWPoison(page))
|
||||||
|
goto out_release_unlock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We allow to overwrite a pte marker: consider when both MISSING|WP
|
* We allow to overwrite a pte marker: consider when both MISSING|WP
|
||||||
* registered, we firstly wr-protect a none pte which has no page cache
|
* registered, we firstly wr-protect a none pte which has no page cache
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#define pr_fmt(fmt) "HugeTLB: " fmt
|
#define pr_fmt(fmt) "HugeTLB: " fmt
|
||||||
|
|
||||||
#include <linux/pgtable.h>
|
#include <linux/pgtable.h>
|
||||||
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/bootmem_info.h>
|
#include <linux/bootmem_info.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
@ -124,6 +124,8 @@ static __always_inline bool kmsan_in_runtime(void)
|
|||||||
{
|
{
|
||||||
if ((hardirq_count() >> HARDIRQ_SHIFT) > 1)
|
if ((hardirq_count() >> HARDIRQ_SHIFT) > 1)
|
||||||
return true;
|
return true;
|
||||||
|
if (in_nmi())
|
||||||
|
return true;
|
||||||
return kmsan_get_context()->kmsan_in_runtime;
|
return kmsan_get_context()->kmsan_in_runtime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1080,6 +1080,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
|
|||||||
int res;
|
int res;
|
||||||
struct page *hpage = compound_head(p);
|
struct page *hpage = compound_head(p);
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
|
bool extra_pins = false;
|
||||||
|
|
||||||
if (!PageHuge(hpage))
|
if (!PageHuge(hpage))
|
||||||
return MF_DELAYED;
|
return MF_DELAYED;
|
||||||
@ -1087,6 +1088,8 @@ static int me_huge_page(struct page_state *ps, struct page *p)
|
|||||||
mapping = page_mapping(hpage);
|
mapping = page_mapping(hpage);
|
||||||
if (mapping) {
|
if (mapping) {
|
||||||
res = truncate_error_page(hpage, page_to_pfn(p), mapping);
|
res = truncate_error_page(hpage, page_to_pfn(p), mapping);
|
||||||
|
/* The page is kept in page cache. */
|
||||||
|
extra_pins = true;
|
||||||
unlock_page(hpage);
|
unlock_page(hpage);
|
||||||
} else {
|
} else {
|
||||||
unlock_page(hpage);
|
unlock_page(hpage);
|
||||||
@ -1104,7 +1107,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (has_extra_refcount(ps, p, false))
|
if (has_extra_refcount(ps, p, extra_pins))
|
||||||
res = MF_FAILED;
|
res = MF_FAILED;
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
@ -335,6 +335,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
|
|||||||
WARN(1, "File system DAX not supported\n");
|
WARN(1, "File system DAX not supported\n");
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
params.pgprot = pgprot_decrypted(params.pgprot);
|
||||||
break;
|
break;
|
||||||
case MEMORY_DEVICE_GENERIC:
|
case MEMORY_DEVICE_GENERIC:
|
||||||
break;
|
break;
|
||||||
|
@ -2674,6 +2674,8 @@ cannot_expand:
|
|||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
if (file)
|
if (file)
|
||||||
goto close_and_free_vma;
|
goto close_and_free_vma;
|
||||||
|
else if (vma->vm_file)
|
||||||
|
goto unmap_and_free_vma;
|
||||||
else
|
else
|
||||||
goto free_vma;
|
goto free_vma;
|
||||||
}
|
}
|
||||||
@ -2682,6 +2684,8 @@ cannot_expand:
|
|||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
if (file)
|
if (file)
|
||||||
goto close_and_free_vma;
|
goto close_and_free_vma;
|
||||||
|
else if (vma->vm_file)
|
||||||
|
goto unmap_and_free_vma;
|
||||||
else
|
else
|
||||||
goto free_vma;
|
goto free_vma;
|
||||||
}
|
}
|
||||||
@ -2751,7 +2755,7 @@ unmap_and_free_vma:
|
|||||||
|
|
||||||
/* Undo any partial mapping done by a device driver. */
|
/* Undo any partial mapping done by a device driver. */
|
||||||
unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end);
|
unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end);
|
||||||
if (vm_flags & VM_SHARED)
|
if (file && (vm_flags & VM_SHARED))
|
||||||
mapping_unmap_writable(file->f_mapping);
|
mapping_unmap_writable(file->f_mapping);
|
||||||
free_vma:
|
free_vma:
|
||||||
vm_area_free(vma);
|
vm_area_free(vma);
|
||||||
|
@ -64,7 +64,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
|||||||
pte_t _dst_pte, *dst_pte;
|
pte_t _dst_pte, *dst_pte;
|
||||||
bool writable = dst_vma->vm_flags & VM_WRITE;
|
bool writable = dst_vma->vm_flags & VM_WRITE;
|
||||||
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
|
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
|
||||||
bool page_in_cache = page->mapping;
|
bool page_in_cache = page_mapping(page);
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
pgoff_t offset, max_off;
|
pgoff_t offset, max_off;
|
||||||
|
1
tools/testing/radix-tree/.gitignore
vendored
1
tools/testing/radix-tree/.gitignore
vendored
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
generated/bit-length.h
|
||||||
generated/map-shift.h
|
generated/map-shift.h
|
||||||
idr.c
|
idr.c
|
||||||
idr-test
|
idr-test
|
||||||
|
@ -18,9 +18,14 @@ endif
|
|||||||
ifeq ($(BUILD), 32)
|
ifeq ($(BUILD), 32)
|
||||||
CFLAGS += -m32
|
CFLAGS += -m32
|
||||||
LDFLAGS += -m32
|
LDFLAGS += -m32
|
||||||
|
LONG_BIT := 32
|
||||||
endif
|
endif
|
||||||
|
|
||||||
targets: generated/map-shift.h $(TARGETS)
|
ifndef LONG_BIT
|
||||||
|
LONG_BIT := $(shell getconf LONG_BIT)
|
||||||
|
endif
|
||||||
|
|
||||||
|
targets: generated/map-shift.h generated/bit-length.h $(TARGETS)
|
||||||
|
|
||||||
main: $(OFILES)
|
main: $(OFILES)
|
||||||
|
|
||||||
@ -34,11 +39,11 @@ maple: $(CORE_OFILES)
|
|||||||
multiorder: multiorder.o $(CORE_OFILES)
|
multiorder: multiorder.o $(CORE_OFILES)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
|
$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h generated/bit-length.h
|
||||||
|
|
||||||
vpath %.c ../../lib
|
vpath %.c ../../lib
|
||||||
|
|
||||||
$(OFILES): Makefile *.h */*.h generated/map-shift.h \
|
$(OFILES): Makefile *.h */*.h generated/map-shift.h generated/bit-length.h \
|
||||||
../../include/linux/*.h \
|
../../include/linux/*.h \
|
||||||
../../include/asm/*.h \
|
../../include/asm/*.h \
|
||||||
../../../include/linux/xarray.h \
|
../../../include/linux/xarray.h \
|
||||||
@ -61,3 +66,11 @@ generated/map-shift.h:
|
|||||||
echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
|
echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
|
||||||
generated/map-shift.h; \
|
generated/map-shift.h; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
generated/bit-length.h: FORCE
|
||||||
|
@if ! grep -qws CONFIG_$(LONG_BIT)BIT generated/bit-length.h; then \
|
||||||
|
echo "Generating $@"; \
|
||||||
|
echo "#define CONFIG_$(LONG_BIT)BIT 1" > $@; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
FORCE: ;
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
|
#include "bit-length.h"
|
||||||
#define CONFIG_XARRAY_MULTI 1
|
#define CONFIG_XARRAY_MULTI 1
|
||||||
#define CONFIG_64BIT 1
|
|
||||||
|
@ -129,6 +129,10 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
|
|||||||
pthread_mutex_unlock(&cachep->lock);
|
pthread_mutex_unlock(&cachep->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kmem_cache_shrink(struct kmem_cache *cachep)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
|
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
|
||||||
void **p)
|
void **p)
|
||||||
{
|
{
|
||||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user