12 hotfixes, 5 of which are c:stable. All singletons, about half of which

are MM.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZwcILgAKCRDdBJ7gKXxA
 jjnMAQDRl+UfscRUeMippi7wnL3ee6MKyhhZVOhoxP24uB7yBwD/Ulq4oE+mLHml
 YTlK/wj5qTZIsdxGaBzM1yifqp3L7gU=
 =lFmJ
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2024-10-09-15-46' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "12 hotfixes, 5 of which are c:stable. All singletons, about half of
  which are MM"

* tag 'mm-hotfixes-stable-2024-10-09-15-46' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: zswap: delete comments for "value" member of 'struct zswap_entry'.
  CREDITS: sort alphabetically by name
  secretmem: disable memfd_secret() if arch cannot set direct map
  .mailmap: update Fangrui's email
  mm/huge_memory: check pmd_special() only after pmd_present()
  resource, kunit: fix user-after-free in resource_test_region_intersects()
  fs/proc/kcore.c: allow translation of physical memory addresses
  selftests/mm: fix incorrect buffer->mirror size in hmm2 double_map test
  device-dax: correct pgoff align in dax_set_mapping()
  kthread: unpark only parked kthread
  Revert "mm: introduce PF_MEMALLOC_NORECLAIM, PF_MEMALLOC_NOWARN"
  bcachefs: do not use PF_MEMALLOC_NORECLAIM
This commit is contained in:
Linus Torvalds 2024-10-09 16:01:40 -07:00
commit d3d1556696
18 changed files with 117 additions and 73 deletions

View File

@ -203,6 +203,7 @@ Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
Faith Ekstrand <faith.ekstrand@collabora.com> <jason@jlekstrand.net>
Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@intel.com>
Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@collabora.com>
Fangrui Song <i@maskray.me> <maskray@google.com>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>

54
CREDITS
View File

@ -1358,10 +1358,6 @@ D: Major kbuild rework during the 2.5 cycle
D: ISDN Maintainer
S: USA
N: Gerrit Renker
E: gerrit@erg.abdn.ac.uk
D: DCCP protocol support.
N: Philip Gladstone
E: philip@gladstonefamily.net
D: Kernel / timekeeping stuff
@ -1677,11 +1673,6 @@ W: http://www.carumba.com/
D: bug toaster (A1 sauce makes all the difference)
D: Random linux hacker
N: James Hogan
E: jhogan@kernel.org
D: Metag architecture maintainer
D: TZ1090 SoC maintainer
N: Tim Hockin
E: thockin@hockin.org
W: http://www.hockin.org/~thockin
@ -1697,6 +1688,11 @@ D: hwmon subsystem maintainer
D: i2c-sis96x and i2c-stub SMBus drivers
S: USA
N: James Hogan
E: jhogan@kernel.org
D: Metag architecture maintainer
D: TZ1090 SoC maintainer
N: Dirk Hohndel
E: hohndel@suse.de
D: The XFree86[tm] Project
@ -1872,6 +1868,10 @@ S: K osmidomkum 723
S: 160 00 Praha 6
S: Czech Republic
N: Seth Jennings
E: sjenning@redhat.com
D: Creation and maintenance of zswap
N: Jeremy Kerr
D: Maintainer of SPU File System
@ -2188,19 +2188,6 @@ N: Mike Kravetz
E: mike.kravetz@oracle.com
D: Maintenance and development of the hugetlb subsystem
N: Seth Jennings
E: sjenning@redhat.com
D: Creation and maintenance of zswap
N: Dan Streetman
E: ddstreet@ieee.org
D: Maintenance and development of zswap
D: Creation and maintenance of the zpool API
N: Vitaly Wool
E: vitaly.wool@konsulko.com
D: Maintenance and development of zswap
N: Andreas S. Krebs
E: akrebs@altavista.net
D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
@ -3191,6 +3178,11 @@ N: Ken Pizzini
E: ken@halcyon.com
D: CDROM driver "sonycd535" (Sony CDU-535/531)
N: Mathieu Poirier
E: mathieu.poirier@linaro.org
D: CoreSight kernel subsystem, Maintainer 2014-2022
D: Perf tool support for CoreSight
N: Stelian Pop
E: stelian@popies.net
P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0 D3F7 7185 9E7A EDBB 6147
@ -3300,6 +3292,10 @@ S: Schlossbergring 9
S: 79098 Freiburg
S: Germany
N: Gerrit Renker
E: gerrit@erg.abdn.ac.uk
D: DCCP protocol support.
N: Thomas Renninger
E: trenn@suse.de
D: cpupowerutils
@ -3576,11 +3572,6 @@ D: several improvements to system programs
S: Oldenburg
S: Germany
N: Mathieu Poirier
E: mathieu.poirier@linaro.org
D: CoreSight kernel subsystem, Maintainer 2014-2022
D: Perf tool support for CoreSight
N: Robert Schwebel
E: robert@schwebel.de
W: https://www.schwebel.de
@ -3771,6 +3762,11 @@ S: Chr. Winthersvej 1 B, st.th.
S: DK-1860 Frederiksberg C
S: Denmark
N: Dan Streetman
E: ddstreet@ieee.org
D: Maintenance and development of zswap
D: Creation and maintenance of the zpool API
N: Drew Sullivan
E: drew@ss.org
W: http://www.ss.org/
@ -4286,6 +4282,10 @@ S: Pipers Way
S: Swindon. SN3 1RJ
S: England
N: Vitaly Wool
E: vitaly.wool@konsulko.com
D: Maintenance and development of zswap
N: Chris Wright
E: chrisw@sous-sol.org
D: hacking on LSM framework and security modules.

View File

@ -16,8 +16,10 @@
#include <asm/pci_io.h>
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
void *xlate_dev_mem_ptr(phys_addr_t phys);
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0

View File

@ -86,7 +86,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
nr_pages = 1;
pgoff = linear_page_index(vmf->vma,
ALIGN(vmf->address, fault_size));
ALIGN_DOWN(vmf->address, fault_size));
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);

View File

@ -300,10 +300,10 @@ static struct inode *bch2_alloc_inode(struct super_block *sb)
BUG();
}
static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c, gfp_t gfp)
{
struct bch_inode_info *inode = alloc_inode_sb(c->vfs_sb,
bch2_inode_cache, GFP_NOFS);
bch2_inode_cache, gfp);
if (!inode)
return NULL;
@ -315,7 +315,7 @@ static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
mutex_init(&inode->ei_quota_lock);
memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) {
if (unlikely(inode_init_always_gfp(c->vfs_sb, &inode->v, gfp))) {
kmem_cache_free(bch2_inode_cache, inode);
return NULL;
}
@ -328,12 +328,10 @@ static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
*/
static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
{
struct bch_inode_info *inode =
memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN,
__bch2_new_inode(trans->c));
struct bch_inode_info *inode = __bch2_new_inode(trans->c, GFP_NOWAIT);
if (unlikely(!inode)) {
int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c)) ? 0 : -ENOMEM);
int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c, GFP_NOFS)) ? 0 : -ENOMEM);
if (ret && inode) {
__destroy_inode(&inode->v);
kmem_cache_free(bch2_inode_cache, inode);
@ -407,7 +405,7 @@ __bch2_create(struct mnt_idmap *idmap,
if (ret)
return ERR_PTR(ret);
#endif
inode = __bch2_new_inode(c);
inode = __bch2_new_inode(c, GFP_NOFS);
if (unlikely(!inode)) {
inode = ERR_PTR(-ENOMEM);
goto err;

View File

@ -146,14 +146,16 @@ static int no_open(struct inode *inode, struct file *file)
}
/**
* inode_init_always - perform inode structure initialisation
* inode_init_always_gfp - perform inode structure initialisation
* @sb: superblock inode belongs to
* @inode: inode to initialise
* @gfp: allocation flags
*
* These are initializations that need to be done on every inode
* allocation as the fields are not initialised by slab allocation.
* If there are additional allocations required @gfp is used.
*/
int inode_init_always(struct super_block *sb, struct inode *inode)
int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp)
{
static const struct inode_operations empty_iops;
static const struct file_operations no_open_fops = {.open = no_open};
@ -230,14 +232,14 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
#endif
inode->i_flctx = NULL;
if (unlikely(security_inode_alloc(inode)))
if (unlikely(security_inode_alloc(inode, gfp)))
return -ENOMEM;
this_cpu_inc(nr_inodes);
return 0;
}
EXPORT_SYMBOL(inode_init_always);
EXPORT_SYMBOL(inode_init_always_gfp);
void free_inode_nonrcu(struct inode *inode)
{

View File

@ -50,6 +50,20 @@ static struct proc_dir_entry *proc_root_kcore;
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
#endif
#ifndef kc_xlate_dev_mem_ptr
#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
{
return __va(phys);
}
#endif
#ifndef kc_unxlate_dev_mem_ptr
#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
{
}
#endif
static LIST_HEAD(kclist_head);
static DECLARE_RWSEM(kclist_lock);
static int kcore_need_update = 1;
@ -471,6 +485,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
while (buflen) {
struct page *page;
unsigned long pfn;
phys_addr_t phys;
void *__start;
/*
* If this is the first iteration or the address is not within
@ -537,7 +553,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
}
break;
case KCORE_RAM:
pfn = __pa(start) >> PAGE_SHIFT;
phys = __pa(start);
pfn = phys >> PAGE_SHIFT;
page = pfn_to_online_page(pfn);
/*
@ -557,13 +574,28 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
fallthrough;
case KCORE_VMEMMAP:
case KCORE_TEXT:
if (m->type == KCORE_RAM) {
__start = kc_xlate_dev_mem_ptr(phys);
if (!__start) {
ret = -ENOMEM;
if (iov_iter_zero(tsz, iter) != tsz)
ret = -EFAULT;
goto out;
}
} else {
__start = (void *)start;
}
/*
* Sadly we must use a bounce buffer here to be able to
* make use of copy_from_kernel_nofault(), as these
* memory regions might not always be mapped on all
* architectures.
*/
if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
ret = copy_from_kernel_nofault(buf, __start, tsz);
if (m->type == KCORE_RAM)
kc_unxlate_dev_mem_ptr(phys, __start);
if (ret) {
if (iov_iter_zero(tsz, iter) != tsz) {
ret = -EFAULT;
goto out;

View File

@ -3082,7 +3082,12 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
extern int inode_init_always(struct super_block *, struct inode *);
extern int inode_init_always_gfp(struct super_block *, struct inode *, gfp_t);
static inline int inode_init_always(struct super_block *sb, struct inode *inode)
{
return inode_init_always_gfp(sb, inode, GFP_NOFS);
}
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern struct inode * igrab(struct inode *);

View File

@ -1681,8 +1681,8 @@ extern struct pid *cad_pid;
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_MEMALLOC_NORECLAIM 0x00800000 /* All allocation requests will clear __GFP_DIRECT_RECLAIM */
#define PF_MEMALLOC_NOWARN 0x01000000 /* All allocation requests will inherit __GFP_NOWARN */
#define PF__HOLE__00800000 0x00800000
#define PF__HOLE__01000000 0x01000000
#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */

View File

@ -251,25 +251,16 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{
unsigned int pflags = READ_ONCE(current->flags);
if (unlikely(pflags & (PF_MEMALLOC_NOIO |
PF_MEMALLOC_NOFS |
PF_MEMALLOC_NORECLAIM |
PF_MEMALLOC_NOWARN |
PF_MEMALLOC_PIN))) {
if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
/*
* Stronger flags before weaker flags:
* NORECLAIM implies NOIO, which in turn implies NOFS
* NOIO implies both NOIO and NOFS and it is a weaker context
* so always make sure it makes precedence
*/
if (pflags & PF_MEMALLOC_NORECLAIM)
flags &= ~__GFP_DIRECT_RECLAIM;
else if (pflags & PF_MEMALLOC_NOIO)
if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
if (pflags & PF_MEMALLOC_NOWARN)
flags |= __GFP_NOWARN;
if (pflags & PF_MEMALLOC_PIN)
flags &= ~__GFP_MOVABLE;
}

View File

@ -348,7 +348,7 @@ int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct cred *new);
int security_path_notify(const struct path *path, u64 mask,
unsigned int obj_type);
int security_inode_alloc(struct inode *inode);
int security_inode_alloc(struct inode *inode, gfp_t gfp);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
@ -789,7 +789,7 @@ static inline int security_path_notify(const struct path *path, u64 mask,
return 0;
}
static inline int security_inode_alloc(struct inode *inode)
static inline int security_inode_alloc(struct inode *inode, gfp_t gfp)
{
return 0;
}

View File

@ -623,6 +623,8 @@ void kthread_unpark(struct task_struct *k)
{
struct kthread *kthread = to_kthread(k);
if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
return;
/*
* Newly created kthread was parked when the CPU was offline.
* The binding was lost and we need to set it again.

View File

@ -169,6 +169,8 @@ static void resource_test_intersection(struct kunit *test)
#define RES_TEST_RAM3_SIZE SZ_1M
#define RES_TEST_TOTAL_SIZE ((RES_TEST_WIN1_OFFSET + RES_TEST_WIN1_SIZE))
KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
static void remove_free_resource(void *ctx)
{
struct resource *res = (struct resource *)ctx;
@ -177,6 +179,14 @@ static void remove_free_resource(void *ctx)
kfree(res);
}
static void resource_test_add_action_or_abort(
struct kunit *test, void (*action)(void *), void *ctx)
{
KUNIT_ASSERT_EQ_MSG(test, 0,
kunit_add_action_or_reset(test, action, ctx),
"Fail to add action");
}
static void resource_test_request_region(struct kunit *test, struct resource *parent,
resource_size_t start, resource_size_t size,
const char *name, unsigned long flags)
@ -185,7 +195,7 @@ static void resource_test_request_region(struct kunit *test, struct resource *pa
res = __request_region(parent, start, size, name, flags);
KUNIT_ASSERT_NOT_NULL(test, res);
kunit_add_action_or_reset(test, remove_free_resource, res);
resource_test_add_action_or_abort(test, remove_free_resource, res);
}
static void resource_test_insert_resource(struct kunit *test, struct resource *parent,
@ -202,11 +212,11 @@ static void resource_test_insert_resource(struct kunit *test, struct resource *p
res->end = start + size - 1;
res->flags = flags;
if (insert_resource(parent, res)) {
kfree(res);
resource_test_add_action_or_abort(test, kfree_wrapper, res);
KUNIT_FAIL_AND_ABORT(test, "Fail to insert resource %pR\n", res);
}
kunit_add_action_or_reset(test, remove_free_resource, res);
resource_test_add_action_or_abort(test, remove_free_resource, res);
}
static void resource_test_region_intersects(struct kunit *test)
@ -220,7 +230,7 @@ static void resource_test_region_intersects(struct kunit *test)
"test resources");
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
start = parent->start;
kunit_add_action_or_reset(test, remove_free_resource, parent);
resource_test_add_action_or_abort(test, remove_free_resource, parent);
resource_test_request_region(test, parent, start + RES_TEST_RAM0_OFFSET,
RES_TEST_RAM0_SIZE, "Test System RAM 0", flags);

View File

@ -1586,7 +1586,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
int ret = -ENOMEM;
pmd = pmdp_get_lockless(src_pmd);
if (unlikely(pmd_special(pmd))) {
if (unlikely(pmd_present(pmd) && pmd_special(pmd))) {
dst_ptl = pmd_lock(dst_mm, dst_pmd);
src_ptl = pmd_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);

View File

@ -238,7 +238,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
/* make sure local flags do not confict with global fcntl.h */
BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
if (!secretmem_enable)
if (!secretmem_enable || !can_set_direct_map())
return -ENOSYS;
if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
@ -280,7 +280,7 @@ static struct file_system_type secretmem_fs = {
static int __init secretmem_init(void)
{
if (!secretmem_enable)
if (!secretmem_enable || !can_set_direct_map())
return 0;
secretmem_mnt = kern_mount(&secretmem_fs);

View File

@ -190,7 +190,6 @@ static struct shrinker *zswap_shrinker;
* section for context.
* pool - the zswap_pool the entry's data is in
* handle - zpool allocation handle that stores the compressed page data
* value - value of the same-value filled pages which have same content
* objcg - the obj_cgroup that the compressed memory is charged to
* lru - handle to the pool's lru used to evict pages.
*/

View File

@ -740,19 +740,20 @@ static int lsm_file_alloc(struct file *file)
/**
* lsm_inode_alloc - allocate a composite inode blob
* @inode: the inode that needs a blob
* @gfp: allocation flags
*
* Allocate the inode blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_inode_alloc(struct inode *inode)
static int lsm_inode_alloc(struct inode *inode, gfp_t gfp)
{
if (!lsm_inode_cache) {
inode->i_security = NULL;
return 0;
}
inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
inode->i_security = kmem_cache_zalloc(lsm_inode_cache, gfp);
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
@ -1678,6 +1679,7 @@ int security_path_notify(const struct path *path, u64 mask,
/**
* security_inode_alloc() - Allocate an inode LSM blob
* @inode: the inode
* @gfp: allocation flags
*
* Allocate and attach a security structure to @inode->i_security. The
* i_security field is initialized to NULL when the inode structure is
@ -1685,9 +1687,9 @@ int security_path_notify(const struct path *path, u64 mask,
*
* Return: Return 0 if operation was successful.
*/
int security_inode_alloc(struct inode *inode)
int security_inode_alloc(struct inode *inode, gfp_t gfp)
{
int rc = lsm_inode_alloc(inode);
int rc = lsm_inode_alloc(inode, gfp);
if (unlikely(rc))
return rc;

View File

@ -1657,7 +1657,7 @@ TEST_F(hmm2, double_map)
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(npages);
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
/* Reserve a range of addresses. */