mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
Merge mm-hotfixes-stable into mm-stable to pick up depended-upon changes.
This commit is contained in:
commit
5ef8f1b2b4
6
.mailmap
6
.mailmap
@ -87,6 +87,7 @@ Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
|
||||
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
|
||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
|
||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
||||
Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
|
||||
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
|
||||
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
|
||||
Ben Gardner <bgardner@wabtec.com>
|
||||
@ -449,9 +450,10 @@ Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
|
||||
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
|
||||
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
|
||||
Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
|
||||
Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
|
||||
Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
|
||||
Oleksij Rempel <o.rempel@pengutronix.de>
|
||||
Oleksij Rempel <o.rempel@pengutronix.de> <ore@pengutronix.de>
|
||||
Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
|
||||
Ondřej Jirman <megi@xff.cz> <megous@megous.com>
|
||||
Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org>
|
||||
Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>
|
||||
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
|
||||
|
@ -6748,7 +6748,7 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7701.c
|
||||
DRM DRIVER FOR SITRONIX ST7703 PANELS
|
||||
M: Guido Günther <agx@sigxcpu.org>
|
||||
R: Purism Kernel Team <kernel@puri.sm>
|
||||
R: Ondrej Jirman <megous@megous.com>
|
||||
R: Ondrej Jirman <megi@xff.cz>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
|
||||
F: drivers/gpu/drm/panel/panel-sitronix-st7703.c
|
||||
|
@ -72,7 +72,7 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
|
||||
}
|
||||
pagefault_out_of_memory();
|
||||
return;
|
||||
} else if (fault & VM_FAULT_SIGBUS) {
|
||||
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs)) {
|
||||
no_context(regs, addr);
|
||||
|
@ -183,15 +183,22 @@ void set_huge_pte_at(struct mm_struct *mm,
|
||||
pte_t pte,
|
||||
unsigned long sz)
|
||||
{
|
||||
unsigned long hugepage_shift;
|
||||
int i, pte_num;
|
||||
|
||||
if (!pte_napot(pte)) {
|
||||
set_pte_at(mm, addr, ptep, pte);
|
||||
return;
|
||||
}
|
||||
if (sz >= PGDIR_SIZE)
|
||||
hugepage_shift = PGDIR_SHIFT;
|
||||
else if (sz >= P4D_SIZE)
|
||||
hugepage_shift = P4D_SHIFT;
|
||||
else if (sz >= PUD_SIZE)
|
||||
hugepage_shift = PUD_SHIFT;
|
||||
else if (sz >= PMD_SIZE)
|
||||
hugepage_shift = PMD_SHIFT;
|
||||
else
|
||||
hugepage_shift = PAGE_SHIFT;
|
||||
|
||||
pte_num = napot_pte_num(napot_cont_order(pte));
|
||||
for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
|
||||
pte_num = sz >> hugepage_shift;
|
||||
for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
|
||||
set_pte_at(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
|
@ -60,6 +60,7 @@ struct resv_map {
|
||||
long adds_in_progress;
|
||||
struct list_head region_cache;
|
||||
long region_cache_count;
|
||||
struct rw_semaphore rw_sema;
|
||||
#ifdef CONFIG_CGROUP_HUGETLB
|
||||
/*
|
||||
* On private mappings, the counter to uncharge reservations is stored
|
||||
@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
|
||||
void unmap_hugepage_range(struct vm_area_struct *,
|
||||
unsigned long, unsigned long, struct page *,
|
||||
zap_flags_t);
|
||||
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
void __unmap_hugepage_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end,
|
||||
struct page *ref_page, zap_flags_t zap_flags);
|
||||
@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
|
||||
unsigned long *start, unsigned long *end);
|
||||
|
||||
extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
|
||||
unsigned long *begin, unsigned long *end);
|
||||
extern void __hugetlb_zap_end(struct vm_area_struct *vma,
|
||||
struct zap_details *details);
|
||||
|
||||
static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
|
||||
unsigned long *start, unsigned long *end)
|
||||
{
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
__hugetlb_zap_begin(vma, start, end);
|
||||
}
|
||||
|
||||
static inline void hugetlb_zap_end(struct vm_area_struct *vma,
|
||||
struct zap_details *details)
|
||||
{
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
__hugetlb_zap_end(vma, details);
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
|
||||
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
|
||||
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
|
||||
@ -296,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_zap_begin(
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long *start, unsigned long *end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_zap_end(
|
||||
struct vm_area_struct *vma,
|
||||
struct zap_details *details)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct page *hugetlb_follow_page_mask(
|
||||
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
|
||||
unsigned int *page_mask)
|
||||
@ -441,7 +473,7 @@ static inline long hugetlb_change_protection(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, struct page *ref_page,
|
||||
zap_flags_t zap_flags)
|
||||
@ -1245,6 +1277,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
|
||||
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
|
||||
}
|
||||
|
||||
static inline bool __vma_private_lock(struct vm_area_struct *vma)
|
||||
{
|
||||
return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
|
||||
}
|
||||
|
||||
/*
|
||||
* Safe version of huge_pte_offset() to check the locks. See comments
|
||||
* above huge_pte_offset().
|
||||
|
@ -466,10 +466,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
|
||||
|
||||
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
||||
|
||||
#ifdef CONFIG_KASAN_INLINE
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
void kasan_non_canonical_hook(unsigned long addr);
|
||||
#else /* CONFIG_KASAN_INLINE */
|
||||
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
static inline void kasan_non_canonical_hook(unsigned long addr) { }
|
||||
#endif /* CONFIG_KASAN_INLINE */
|
||||
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
#endif /* LINUX_KASAN_H */
|
||||
|
@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
|
||||
/* Internal nodes */
|
||||
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
|
||||
/* Add working room for split (2 nodes) + new parents */
|
||||
mas_node_count(mas, nr_nodes + 3);
|
||||
mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
|
||||
|
||||
/* Detect if allocations run out */
|
||||
mas->mas_flags |= MA_STATE_PREALLOC;
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/maple_tree.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rwsem.h>
|
||||
|
||||
#define MTREE_ALLOC_MAX 0x2000000000000Ul
|
||||
#define CONFIG_MAPLE_SEARCH
|
||||
@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
|
||||
void *val;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, mt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
|
||||
init_rwsem(&newmt_lock);
|
||||
|
||||
for (i = 0; i <= nr_entries; i++)
|
||||
mtree_store_range(mt, i*10, i*10 + 5,
|
||||
xa_mk_value(i), GFP_KERNEL);
|
||||
|
||||
mt_set_non_kernel(99999);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
newmas.tree = &newmt;
|
||||
mas_reset(&newmas);
|
||||
mas_reset(&mas);
|
||||
mas_lock(&newmas);
|
||||
down_write(&newmt_lock);
|
||||
mas.index = 0;
|
||||
mas.last = 0;
|
||||
if (mas_expected_entries(&newmas, nr_entries)) {
|
||||
@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mas_destroy(&newmas);
|
||||
mas_unlock(&newmas);
|
||||
mt_validate(&newmt);
|
||||
mt_set_non_kernel(0);
|
||||
mtree_destroy(&newmt);
|
||||
__mt_destroy(&newmt);
|
||||
up_write(&newmt_lock);
|
||||
}
|
||||
|
||||
static noinline void __init check_iteration(struct maple_tree *mt)
|
||||
@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
|
||||
void *val;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, mt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
|
||||
init_rwsem(&newmt_lock);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
|
||||
for (i = 0; i <= nr_entries; i++)
|
||||
mtree_store_range(mt, i*10, i*10 + 5,
|
||||
@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
|
||||
mas.index = 0;
|
||||
mas.last = 0;
|
||||
rcu_read_lock();
|
||||
mas_lock(&newmas);
|
||||
down_write(&newmt_lock);
|
||||
if (mas_expected_entries(&newmas, nr_entries)) {
|
||||
printk("OOM!");
|
||||
BUG_ON(1);
|
||||
@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
|
||||
mas_store(&newmas, val);
|
||||
}
|
||||
mas_destroy(&newmas);
|
||||
mas_unlock(&newmas);
|
||||
rcu_read_unlock();
|
||||
mt_validate(&newmt);
|
||||
mt_set_non_kernel(0);
|
||||
mtree_destroy(&newmt);
|
||||
__mt_destroy(&newmt);
|
||||
up_write(&newmt_lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
|
||||
void *tmp;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, &newmt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
|
||||
init_rwsem(&newmt_lock);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
|
||||
if (!zero_start)
|
||||
i = 1;
|
||||
@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
|
||||
mtree_store_range(mt, i*10, (i+1)*10 - gap,
|
||||
xa_mk_value(i), GFP_KERNEL);
|
||||
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_non_kernel(99999);
|
||||
mas_lock(&newmas);
|
||||
down_write(&newmt_lock);
|
||||
ret = mas_expected_entries(&newmas, nr_entries);
|
||||
mt_set_non_kernel(0);
|
||||
MT_BUG_ON(mt, ret != 0);
|
||||
@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mas_destroy(&newmas);
|
||||
mas_unlock(&newmas);
|
||||
|
||||
mtree_destroy(&newmt);
|
||||
__mt_destroy(&newmt);
|
||||
up_write(&newmt_lock);
|
||||
}
|
||||
|
||||
/* Duplicate many sizes of trees. Mainly to test expected entry values */
|
||||
|
@ -1208,6 +1208,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool damon_sysfs_schemes_regions_updating;
|
||||
|
||||
static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
|
||||
{
|
||||
struct damon_target *t, *next;
|
||||
@ -1219,8 +1221,10 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
|
||||
cmd = damon_sysfs_cmd_request.cmd;
|
||||
if (kdamond && ctx == kdamond->damon_ctx &&
|
||||
(cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
|
||||
cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES)) {
|
||||
cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
|
||||
damon_sysfs_schemes_regions_updating) {
|
||||
damon_sysfs_schemes_update_regions_stop(ctx);
|
||||
damon_sysfs_schemes_regions_updating = false;
|
||||
mutex_unlock(&damon_sysfs_lock);
|
||||
}
|
||||
|
||||
@ -1340,7 +1344,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
|
||||
static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
|
||||
{
|
||||
struct damon_sysfs_kdamond *kdamond;
|
||||
static bool damon_sysfs_schemes_regions_updating;
|
||||
bool total_bytes_only = false;
|
||||
int err = 0;
|
||||
|
||||
|
84
mm/hugetlb.c
84
mm/hugetlb.c
@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
|
||||
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
|
||||
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
|
||||
|
||||
static inline bool subpool_is_free(struct hugepage_subpool *spool)
|
||||
{
|
||||
@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_read(&vma_lock->rw_sema);
|
||||
} else if (__vma_private_lock(vma)) {
|
||||
struct resv_map *resv_map = vma_resv_map(vma);
|
||||
|
||||
down_read(&resv_map->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
up_read(&vma_lock->rw_sema);
|
||||
} else if (__vma_private_lock(vma)) {
|
||||
struct resv_map *resv_map = vma_resv_map(vma);
|
||||
|
||||
up_read(&resv_map->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_write(&vma_lock->rw_sema);
|
||||
} else if (__vma_private_lock(vma)) {
|
||||
struct resv_map *resv_map = vma_resv_map(vma);
|
||||
|
||||
down_write(&resv_map->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
up_write(&vma_lock->rw_sema);
|
||||
} else if (__vma_private_lock(vma)) {
|
||||
struct resv_map *resv_map = vma_resv_map(vma);
|
||||
|
||||
up_write(&resv_map->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
if (!__vma_shareable_lock(vma))
|
||||
return 1;
|
||||
if (__vma_shareable_lock(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
return down_write_trylock(&vma_lock->rw_sema);
|
||||
return down_write_trylock(&vma_lock->rw_sema);
|
||||
} else if (__vma_private_lock(vma)) {
|
||||
struct resv_map *resv_map = vma_resv_map(vma);
|
||||
|
||||
return down_write_trylock(&resv_map->rw_sema);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
|
||||
@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
lockdep_assert_held(&vma_lock->rw_sema);
|
||||
} else if (__vma_private_lock(vma)) {
|
||||
struct resv_map *resv_map = vma_resv_map(vma);
|
||||
|
||||
lockdep_assert_held(&resv_map->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
__hugetlb_vma_unlock_write_put(vma_lock);
|
||||
} else if (__vma_private_lock(vma)) {
|
||||
struct resv_map *resv_map = vma_resv_map(vma);
|
||||
|
||||
/* no free for anon vmas, but still need to unlock */
|
||||
up_write(&resv_map->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1061,6 +1093,7 @@ struct resv_map *resv_map_alloc(void)
|
||||
kref_init(&resv_map->refs);
|
||||
spin_lock_init(&resv_map->lock);
|
||||
INIT_LIST_HEAD(&resv_map->regions);
|
||||
init_rwsem(&resv_map->rw_sema);
|
||||
|
||||
resv_map->adds_in_progress = 0;
|
||||
/*
|
||||
@ -1131,8 +1164,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
|
||||
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
|
||||
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
|
||||
|
||||
set_vma_private_data(vma, (get_vma_private_data(vma) &
|
||||
HPAGE_RESV_MASK) | (unsigned long)map);
|
||||
set_vma_private_data(vma, (unsigned long)map);
|
||||
}
|
||||
|
||||
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
|
||||
@ -5315,9 +5347,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
|
||||
return len + old_addr - old_end;
|
||||
}
|
||||
|
||||
static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end,
|
||||
struct page *ref_page, zap_flags_t zap_flags)
|
||||
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end,
|
||||
struct page *ref_page, zap_flags_t zap_flags)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long address;
|
||||
@ -5446,16 +5478,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
|
||||
tlb_flush_mmu_tlbonly(tlb);
|
||||
}
|
||||
|
||||
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, struct page *ref_page,
|
||||
zap_flags_t zap_flags)
|
||||
void __hugetlb_zap_begin(struct vm_area_struct *vma,
|
||||
unsigned long *start, unsigned long *end)
|
||||
{
|
||||
hugetlb_vma_lock_write(vma);
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
if (!vma->vm_file) /* hugetlbfs_file_mmap error */
|
||||
return;
|
||||
|
||||
/* mmu notification performed in caller */
|
||||
__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
|
||||
adjust_range_if_pmd_sharing_possible(vma, start, end);
|
||||
hugetlb_vma_lock_write(vma);
|
||||
if (vma->vm_file)
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
}
|
||||
|
||||
void __hugetlb_zap_end(struct vm_area_struct *vma,
|
||||
struct zap_details *details)
|
||||
{
|
||||
zap_flags_t zap_flags = details ? details->zap_flags : 0;
|
||||
|
||||
if (!vma->vm_file) /* hugetlbfs_file_mmap error */
|
||||
return;
|
||||
|
||||
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
|
||||
/*
|
||||
@ -5468,11 +5509,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
* someone else.
|
||||
*/
|
||||
__hugetlb_vma_unlock_write_free(vma);
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
} else {
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
hugetlb_vma_unlock_write(vma);
|
||||
}
|
||||
|
||||
if (vma->vm_file)
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
}
|
||||
|
||||
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
||||
@ -6853,8 +6895,10 @@ out_err:
|
||||
*/
|
||||
if (chg >= 0 && add < 0)
|
||||
region_abort(resv_map, from, to, regions_needed);
|
||||
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
||||
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
||||
kref_put(&resv_map->refs, resv_map_release);
|
||||
set_vma_resv_map(vma, NULL);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -621,7 +621,7 @@ void kasan_report_async(void)
|
||||
}
|
||||
#endif /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
#ifdef CONFIG_KASAN_INLINE
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
/*
|
||||
* With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
|
||||
* canonical half of the address space) cause out-of-bounds shadow memory reads
|
||||
|
13
mm/memory.c
13
mm/memory.c
@ -1691,7 +1691,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
|
||||
if (vma->vm_file) {
|
||||
zap_flags_t zap_flags = details ?
|
||||
details->zap_flags : 0;
|
||||
__unmap_hugepage_range_final(tlb, vma, start, end,
|
||||
__unmap_hugepage_range(tlb, vma, start, end,
|
||||
NULL, zap_flags);
|
||||
}
|
||||
} else
|
||||
@ -1736,8 +1736,12 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
|
||||
start_addr, end_addr);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
do {
|
||||
unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
|
||||
unsigned long start = start_addr;
|
||||
unsigned long end = end_addr;
|
||||
hugetlb_zap_begin(vma, &start, &end);
|
||||
unmap_single_vma(tlb, vma, start, end, &details,
|
||||
mm_wr_locked);
|
||||
hugetlb_zap_end(vma, &details);
|
||||
} while ((vma = mas_find(mas, tree_end - 1)) != NULL);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
}
|
||||
@ -1761,9 +1765,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
||||
lru_add_drain();
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
|
||||
address, end);
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
adjust_range_if_pmd_sharing_possible(vma, &range.start,
|
||||
&range.end);
|
||||
hugetlb_zap_begin(vma, &range.start, &range.end);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
update_hiwater_rss(vma->vm_mm);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
@ -1774,6 +1776,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
||||
unmap_single_vma(&tlb, vma, address, end, details, false);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb);
|
||||
hugetlb_zap_end(vma, details);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
|
||||
* the home node for vmas we already updated before.
|
||||
*/
|
||||
old = vma_policy(vma);
|
||||
if (!old)
|
||||
if (!old) {
|
||||
prev = vma;
|
||||
continue;
|
||||
}
|
||||
if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
|
14
mm/migrate.c
14
mm/migrate.c
@ -2155,6 +2155,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
|
||||
const int __user *nodes,
|
||||
int __user *status, int flags)
|
||||
{
|
||||
compat_uptr_t __user *compat_pages = (void __user *)pages;
|
||||
int current_node = NUMA_NO_NODE;
|
||||
LIST_HEAD(pagelist);
|
||||
int start, i;
|
||||
@ -2167,8 +2168,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
|
||||
int node;
|
||||
|
||||
err = -EFAULT;
|
||||
if (get_user(p, pages + i))
|
||||
goto out_flush;
|
||||
if (in_compat_syscall()) {
|
||||
compat_uptr_t cp;
|
||||
|
||||
if (get_user(cp, compat_pages + i))
|
||||
goto out_flush;
|
||||
|
||||
p = compat_ptr(cp);
|
||||
} else {
|
||||
if (get_user(p, pages + i))
|
||||
goto out_flush;
|
||||
}
|
||||
if (get_user(node, nodes + i))
|
||||
goto out_flush;
|
||||
|
||||
|
46
mm/mmap.c
46
mm/mmap.c
@ -583,11 +583,12 @@ again:
|
||||
* dup_anon_vma() - Helper function to duplicate anon_vma
|
||||
* @dst: The destination VMA
|
||||
* @src: The source VMA
|
||||
* @dup: Pointer to the destination VMA when successful.
|
||||
*
|
||||
* Returns: 0 on success.
|
||||
*/
|
||||
static inline int dup_anon_vma(struct vm_area_struct *dst,
|
||||
struct vm_area_struct *src)
|
||||
struct vm_area_struct *src, struct vm_area_struct **dup)
|
||||
{
|
||||
/*
|
||||
* Easily overlooked: when mprotect shifts the boundary, make sure the
|
||||
@ -595,9 +596,15 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
|
||||
* anon pages imported.
|
||||
*/
|
||||
if (src->anon_vma && !dst->anon_vma) {
|
||||
int ret;
|
||||
|
||||
vma_assert_write_locked(dst);
|
||||
dst->anon_vma = src->anon_vma;
|
||||
return anon_vma_clone(dst, src);
|
||||
ret = anon_vma_clone(dst, src);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*dup = dst;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -624,6 +631,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end, pgoff_t pgoff,
|
||||
struct vm_area_struct *next)
|
||||
{
|
||||
struct vm_area_struct *anon_dup = NULL;
|
||||
bool remove_next = false;
|
||||
struct vma_prepare vp;
|
||||
|
||||
@ -633,7 +641,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
|
||||
remove_next = true;
|
||||
vma_start_write(next);
|
||||
ret = dup_anon_vma(vma, next);
|
||||
ret = dup_anon_vma(vma, next, &anon_dup);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -661,6 +669,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
return 0;
|
||||
|
||||
nomem:
|
||||
if (anon_dup)
|
||||
unlink_anon_vmas(anon_dup);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -860,6 +870,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
{
|
||||
struct vm_area_struct *curr, *next, *res;
|
||||
struct vm_area_struct *vma, *adjust, *remove, *remove2;
|
||||
struct vm_area_struct *anon_dup = NULL;
|
||||
struct vma_prepare vp;
|
||||
pgoff_t vma_pgoff;
|
||||
int err = 0;
|
||||
@ -927,18 +938,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
vma_start_write(next);
|
||||
remove = next; /* case 1 */
|
||||
vma_end = next->vm_end;
|
||||
err = dup_anon_vma(prev, next);
|
||||
err = dup_anon_vma(prev, next, &anon_dup);
|
||||
if (curr) { /* case 6 */
|
||||
vma_start_write(curr);
|
||||
remove = curr;
|
||||
remove2 = next;
|
||||
if (!next->anon_vma)
|
||||
err = dup_anon_vma(prev, curr);
|
||||
err = dup_anon_vma(prev, curr, &anon_dup);
|
||||
}
|
||||
} else if (merge_prev) { /* case 2 */
|
||||
if (curr) {
|
||||
vma_start_write(curr);
|
||||
err = dup_anon_vma(prev, curr);
|
||||
err = dup_anon_vma(prev, curr, &anon_dup);
|
||||
if (end == curr->vm_end) { /* case 7 */
|
||||
remove = curr;
|
||||
} else { /* case 5 */
|
||||
@ -954,7 +965,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
vma_end = addr;
|
||||
adjust = next;
|
||||
adj_start = -(prev->vm_end - addr);
|
||||
err = dup_anon_vma(next, prev);
|
||||
err = dup_anon_vma(next, prev, &anon_dup);
|
||||
} else {
|
||||
/*
|
||||
* Note that cases 3 and 8 are the ONLY ones where prev
|
||||
@ -968,14 +979,14 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
vma_pgoff = curr->vm_pgoff;
|
||||
vma_start_write(curr);
|
||||
remove = curr;
|
||||
err = dup_anon_vma(next, curr);
|
||||
err = dup_anon_vma(next, curr, &anon_dup);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Error in anon_vma clone. */
|
||||
if (err)
|
||||
return NULL;
|
||||
goto anon_vma_fail;
|
||||
|
||||
if (vma_start < vma->vm_start || vma_end > vma->vm_end)
|
||||
vma_expanded = true;
|
||||
@ -988,7 +999,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
}
|
||||
|
||||
if (vma_iter_prealloc(vmi, vma))
|
||||
return NULL;
|
||||
goto prealloc_fail;
|
||||
|
||||
init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
|
||||
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
|
||||
@ -1016,6 +1027,15 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
vma_complete(&vp, vmi, mm);
|
||||
khugepaged_enter_vma(res, vm_flags);
|
||||
return res;
|
||||
|
||||
prealloc_fail:
|
||||
if (anon_dup)
|
||||
unlink_anon_vmas(anon_dup);
|
||||
|
||||
anon_vma_fail:
|
||||
vma_iter_set(vmi, addr);
|
||||
vma_iter_load(vmi);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3141,13 +3161,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
||||
/* Until we need other flags, refuse anything except VM_EXEC. */
|
||||
if ((flags & (~VM_EXEC)) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
||||
ret = check_brk_limits(addr, len);
|
||||
if (ret)
|
||||
goto limits_failed;
|
||||
|
@ -6475,6 +6475,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
|
||||
next_page = page;
|
||||
current_buddy = page + size;
|
||||
}
|
||||
page = next_page;
|
||||
|
||||
if (set_page_guard(zone, current_buddy, high, migratetype))
|
||||
continue;
|
||||
@ -6482,7 +6483,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
|
||||
if (current_buddy != target) {
|
||||
add_to_free_list(current_buddy, zone, high, migratetype);
|
||||
set_buddy_order(current_buddy, high);
|
||||
page = next_page;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1383,8 +1383,8 @@ reject:
|
||||
|
||||
shrink:
|
||||
pool = zswap_pool_last_get();
|
||||
if (pool)
|
||||
queue_work(shrink_wq, &pool->shrink_work);
|
||||
if (pool && !queue_work(shrink_wq, &pool->shrink_work))
|
||||
zswap_pool_put(pool);
|
||||
goto reject;
|
||||
}
|
||||
|
||||
|
40
tools/include/linux/rwsem.h
Normal file
40
tools/include/linux/rwsem.h
Normal file
@ -0,0 +1,40 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
#ifndef _TOOLS__RWSEM_H
|
||||
#define _TOOLS__RWSEM_H
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
struct rw_semaphore {
|
||||
pthread_rwlock_t lock;
|
||||
};
|
||||
|
||||
static inline int init_rwsem(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_init(&sem->lock, NULL);
|
||||
}
|
||||
|
||||
static inline int exit_rwsem(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_destroy(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_rdlock(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_unlock(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_wrlock(&sem->lock);
|
||||
}
|
||||
|
||||
static inline int up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
return pthread_rwlock_unlock(&sem->lock);
|
||||
}
|
||||
#endif /* _TOOLS_RWSEM_H */
|
@ -196,7 +196,12 @@ int main(int argc, char *argv[])
|
||||
CLONE3_ARGS_NO_TEST);
|
||||
|
||||
/* Do a clone3() in a new time namespace */
|
||||
test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
|
||||
if (access("/proc/self/ns/time", F_OK) == 0) {
|
||||
test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
|
||||
} else {
|
||||
ksft_print_msg("Time namespaces are not supported\n");
|
||||
ksft_test_result_skip("Skipping clone3() with CLONE_NEWTIME\n");
|
||||
}
|
||||
|
||||
/* Do a clone3() with exit signal (SIGCHLD) in flags */
|
||||
test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
#define _GNU_SOURCE
|
||||
#include <sys/mman.h>
|
||||
#include <linux/mman.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
Loading…
Reference in New Issue
Block a user