mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "15 patches. Subsystems affected by this patch series: ipc, hexagon, mm (swap, slab-generic, kmemleak, hugetlb, kasan, damon, and highmem), and proc" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: proc/vmcore: fix clearing user buffer by properly using clear_user() kmap_local: don't assume kmap PTEs are linear arrays in memory mm/damon/dbgfs: fix missed use of damon_dbgfs_lock mm/damon/dbgfs: use '__GFP_NOWARN' for user-specified size buffer allocation kasan: test: silence intentional read overflow warnings hugetlb, userfaultfd: fix reservation restore on userfaultfd error hugetlb: fix hugetlb cgroup refcounting during mremap mm: kmemleak: slob: respect SLAB_NOLEAKTRACE flag hexagon: ignore vmlinux.lds hexagon: clean up timer-regs.h hexagon: export raw I/O routines for modules mm: emit the "free" trace report before freeing memory in kmem_cache_free() shm: extend forced shm destroy to support objects from several IPC nses ipc: WARN if trying to remove ipc object which is absent mm/swap.c:put_pages_list(): reinitialise the page list
This commit is contained in:
commit
923dcc5eb0
@ -1463,6 +1463,7 @@ config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
depends on MMU
|
||||
select KMAP_LOCAL
|
||||
select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
|
||||
help
|
||||
The address space of ARM processors is only 4 Gigabytes large
|
||||
and it has to accommodate user address space, kernel address
|
||||
|
@ -1,26 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Timer support for Hexagon
|
||||
*
|
||||
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TIMER_REGS_H
|
||||
#define _ASM_TIMER_REGS_H
|
||||
|
||||
/* This stuff should go into a platform specific file */
|
||||
#define TCX0_CLK_RATE 19200
|
||||
#define TIMER_ENABLE 0
|
||||
#define TIMER_CLR_ON_MATCH 1
|
||||
|
||||
/*
|
||||
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
|
||||
* release 1.1, and then it's "adjustable" and probably not defaulted.
|
||||
*/
|
||||
#define RTOS_TIMER_INT 3
|
||||
#ifdef CONFIG_HEXAGON_COMET
|
||||
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
|
||||
#endif
|
||||
#define SLEEP_CLK_RATE 32000
|
||||
|
||||
#endif
|
@ -7,11 +7,10 @@
|
||||
#define _ASM_TIMEX_H
|
||||
|
||||
#include <asm-generic/timex.h>
|
||||
#include <asm/timer-regs.h>
|
||||
#include <asm/hexagon_vm.h>
|
||||
|
||||
/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
|
||||
#define CLOCK_TICK_RATE TCX0_CLK_RATE
|
||||
#define CLOCK_TICK_RATE 19200
|
||||
|
||||
#define ARCH_HAS_READ_CURRENT_TIMER
|
||||
|
||||
|
1
arch/hexagon/kernel/.gitignore
vendored
Normal file
1
arch/hexagon/kernel/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
vmlinux.lds
|
@ -17,9 +17,10 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/timer-regs.h>
|
||||
#include <asm/hexagon_vm.h>
|
||||
|
||||
#define TIMER_ENABLE BIT(0)
|
||||
|
||||
/*
|
||||
* For the clocksource we need:
|
||||
* pcycle frequency (600MHz)
|
||||
@ -33,6 +34,13 @@ cycles_t pcycle_freq_mhz;
|
||||
cycles_t thread_freq_mhz;
|
||||
cycles_t sleep_clk_freq;
|
||||
|
||||
/*
|
||||
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
|
||||
* release 1.1, and then it's "adjustable" and probably not defaulted.
|
||||
*/
|
||||
#define RTOS_TIMER_INT 3
|
||||
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
|
||||
|
||||
static struct resource rtos_timer_resources[] = {
|
||||
{
|
||||
.start = RTOS_TIMER_REGS_ADDR,
|
||||
@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt)
|
||||
iowrite32(0, &rtos_timer->clear);
|
||||
|
||||
iowrite32(delta, &rtos_timer->match);
|
||||
iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
|
||||
iowrite32(TIMER_ENABLE, &rtos_timer->enable);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len)
|
||||
*dst++ = *src;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_readsw);
|
||||
|
||||
/*
|
||||
* __raw_writesw - read words a short at a time
|
||||
@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len)
|
||||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_writesw);
|
||||
|
||||
/* Pretty sure len is pre-adjusted for the length of the access already */
|
||||
void __raw_readsl(const void __iomem *addr, void *data, int len)
|
||||
@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len)
|
||||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_readsl);
|
||||
|
||||
void __raw_writesl(void __iomem *addr, const void *data, int len)
|
||||
{
|
||||
@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len)
|
||||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_writesl);
|
||||
|
@ -154,9 +154,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
nr_bytes = count;
|
||||
|
||||
/* If pfn is not ram, return zeros for sparse dump files */
|
||||
if (!pfn_is_ram(pfn))
|
||||
memset(buf, 0, nr_bytes);
|
||||
else {
|
||||
if (!pfn_is_ram(pfn)) {
|
||||
tmp = 0;
|
||||
if (!userbuf)
|
||||
memset(buf, 0, nr_bytes);
|
||||
else if (clear_user(buf, nr_bytes))
|
||||
tmp = -EFAULT;
|
||||
} else {
|
||||
if (encrypted)
|
||||
tmp = copy_oldmem_page_encrypted(pfn, buf,
|
||||
nr_bytes,
|
||||
@ -165,12 +169,12 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
else
|
||||
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
|
||||
offset, userbuf);
|
||||
|
||||
if (tmp < 0) {
|
||||
up_read(&vmcore_cb_rwsem);
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
if (tmp < 0) {
|
||||
up_read(&vmcore_cb_rwsem);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
*ppos += nr_bytes;
|
||||
count -= nr_bytes;
|
||||
buf += nr_bytes;
|
||||
|
@ -128,6 +128,13 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
|
||||
css_get(resv_map->css);
|
||||
}
|
||||
|
||||
static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
|
||||
struct resv_map *resv_map)
|
||||
{
|
||||
if (resv_map->css)
|
||||
css_put(resv_map->css);
|
||||
}
|
||||
|
||||
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
||||
struct hugetlb_cgroup **ptr);
|
||||
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
|
||||
@ -211,6 +218,11 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
|
||||
{
|
||||
}
|
||||
|
||||
static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
|
||||
struct resv_map *resv_map)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
||||
struct hugetlb_cgroup **ptr)
|
||||
{
|
||||
|
@ -131,6 +131,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
|
||||
return ns;
|
||||
}
|
||||
|
||||
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
|
||||
{
|
||||
if (ns) {
|
||||
if (refcount_inc_not_zero(&ns->ns.count))
|
||||
return ns;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern void put_ipc_ns(struct ipc_namespace *ns);
|
||||
#else
|
||||
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
|
||||
@ -147,6 +157,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
|
||||
return ns;
|
||||
}
|
||||
|
||||
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
|
||||
{
|
||||
return ns;
|
||||
}
|
||||
|
||||
static inline void put_ipc_ns(struct ipc_namespace *ns)
|
||||
{
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
|
||||
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
|
||||
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
||||
* pins the final release of task.io_context. Also protects ->cpuset and
|
||||
* ->cgroup.subsys[]. And ->vfork_done.
|
||||
* ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
|
||||
*
|
||||
* Nests both inside and outside of read_lock(&tasklist_lock).
|
||||
* It must not be nested with write_lock_irq(&tasklist_lock),
|
||||
|
197
ipc/shm.c
197
ipc/shm.c
@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */
|
||||
struct pid *shm_lprid;
|
||||
struct ucounts *mlock_ucounts;
|
||||
|
||||
/* The task created the shm object. NULL if the task is dead. */
|
||||
/*
|
||||
* The task created the shm object, for
|
||||
* task_lock(shp->shm_creator)
|
||||
*/
|
||||
struct task_struct *shm_creator;
|
||||
struct list_head shm_clist; /* list by creator */
|
||||
|
||||
/*
|
||||
* List by creator. task_lock(->shm_creator) required for read/write.
|
||||
* If list_empty(), then the creator is dead already.
|
||||
*/
|
||||
struct list_head shm_clist;
|
||||
struct ipc_namespace *ns;
|
||||
} __randomize_layout;
|
||||
|
||||
/* shm_mode upper byte flags */
|
||||
@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
|
||||
struct shmid_kernel *shp;
|
||||
|
||||
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
|
||||
WARN_ON(ns != shp->ns);
|
||||
|
||||
if (shp->shm_nattch) {
|
||||
shp->shm_perm.mode |= SHM_DEST;
|
||||
@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head)
|
||||
kfree(shp);
|
||||
}
|
||||
|
||||
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
|
||||
/*
|
||||
* It has to be called with shp locked.
|
||||
* It must be called before ipc_rmid()
|
||||
*/
|
||||
static inline void shm_clist_rm(struct shmid_kernel *shp)
|
||||
{
|
||||
list_del(&s->shm_clist);
|
||||
ipc_rmid(&shm_ids(ns), &s->shm_perm);
|
||||
struct task_struct *creator;
|
||||
|
||||
/* ensure that shm_creator does not disappear */
|
||||
rcu_read_lock();
|
||||
|
||||
/*
|
||||
* A concurrent exit_shm may do a list_del_init() as well.
|
||||
* Just do nothing if exit_shm already did the work
|
||||
*/
|
||||
if (!list_empty(&shp->shm_clist)) {
|
||||
/*
|
||||
* shp->shm_creator is guaranteed to be valid *only*
|
||||
* if shp->shm_clist is not empty.
|
||||
*/
|
||||
creator = shp->shm_creator;
|
||||
|
||||
task_lock(creator);
|
||||
/*
|
||||
* list_del_init() is a nop if the entry was already removed
|
||||
* from the list.
|
||||
*/
|
||||
list_del_init(&shp->shm_clist);
|
||||
task_unlock(creator);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void shm_rmid(struct shmid_kernel *s)
|
||||
{
|
||||
shm_clist_rm(s);
|
||||
ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
|
||||
}
|
||||
|
||||
|
||||
@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||
shm_file = shp->shm_file;
|
||||
shp->shm_file = NULL;
|
||||
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
shm_rmid(ns, shp);
|
||||
shm_rmid(shp);
|
||||
shm_unlock(shp);
|
||||
if (!is_file_hugepages(shm_file))
|
||||
shmem_lock(shm_file, 0, shp->mlock_ucounts);
|
||||
@ -303,10 +346,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||
*
|
||||
* 2) sysctl kernel.shm_rmid_forced is set to 1.
|
||||
*/
|
||||
static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||
static bool shm_may_destroy(struct shmid_kernel *shp)
|
||||
{
|
||||
return (shp->shm_nattch == 0) &&
|
||||
(ns->shm_rmid_forced ||
|
||||
(shp->ns->shm_rmid_forced ||
|
||||
(shp->shm_perm.mode & SHM_DEST));
|
||||
}
|
||||
|
||||
@ -337,7 +380,7 @@ static void shm_close(struct vm_area_struct *vma)
|
||||
ipc_update_pid(&shp->shm_lprid, task_tgid(current));
|
||||
shp->shm_dtim = ktime_get_real_seconds();
|
||||
shp->shm_nattch--;
|
||||
if (shm_may_destroy(ns, shp))
|
||||
if (shm_may_destroy(shp))
|
||||
shm_destroy(ns, shp);
|
||||
else
|
||||
shm_unlock(shp);
|
||||
@ -358,10 +401,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
|
||||
*
|
||||
* As shp->* are changed under rwsem, it's safe to skip shp locking.
|
||||
*/
|
||||
if (shp->shm_creator != NULL)
|
||||
if (!list_empty(&shp->shm_clist))
|
||||
return 0;
|
||||
|
||||
if (shm_may_destroy(ns, shp)) {
|
||||
if (shm_may_destroy(shp)) {
|
||||
shm_lock_by_ptr(shp);
|
||||
shm_destroy(ns, shp);
|
||||
}
|
||||
@ -379,48 +422,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
|
||||
/* Locking assumes this will only be called with task == current */
|
||||
void exit_shm(struct task_struct *task)
|
||||
{
|
||||
struct ipc_namespace *ns = task->nsproxy->ipc_ns;
|
||||
struct shmid_kernel *shp, *n;
|
||||
for (;;) {
|
||||
struct shmid_kernel *shp;
|
||||
struct ipc_namespace *ns;
|
||||
|
||||
if (list_empty(&task->sysvshm.shm_clist))
|
||||
return;
|
||||
task_lock(task);
|
||||
|
||||
/*
|
||||
* If kernel.shm_rmid_forced is not set then only keep track of
|
||||
* which shmids are orphaned, so that a later set of the sysctl
|
||||
* can clean them up.
|
||||
*/
|
||||
if (!ns->shm_rmid_forced) {
|
||||
down_read(&shm_ids(ns).rwsem);
|
||||
list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
|
||||
shp->shm_creator = NULL;
|
||||
/*
|
||||
* Only under read lock but we are only called on current
|
||||
* so no entry on the list will be shared.
|
||||
*/
|
||||
list_del(&task->sysvshm.shm_clist);
|
||||
up_read(&shm_ids(ns).rwsem);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy all already created segments, that were not yet mapped,
|
||||
* and mark any mapped as orphan to cover the sysctl toggling.
|
||||
* Destroy is skipped if shm_may_destroy() returns false.
|
||||
*/
|
||||
down_write(&shm_ids(ns).rwsem);
|
||||
list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
|
||||
shp->shm_creator = NULL;
|
||||
|
||||
if (shm_may_destroy(ns, shp)) {
|
||||
shm_lock_by_ptr(shp);
|
||||
shm_destroy(ns, shp);
|
||||
if (list_empty(&task->sysvshm.shm_clist)) {
|
||||
task_unlock(task);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove the list head from any segments still attached. */
|
||||
list_del(&task->sysvshm.shm_clist);
|
||||
up_write(&shm_ids(ns).rwsem);
|
||||
shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
|
||||
shm_clist);
|
||||
|
||||
/*
|
||||
* 1) Get pointer to the ipc namespace. It is worth to say
|
||||
* that this pointer is guaranteed to be valid because
|
||||
* shp lifetime is always shorter than namespace lifetime
|
||||
* in which shp lives.
|
||||
* We taken task_lock it means that shp won't be freed.
|
||||
*/
|
||||
ns = shp->ns;
|
||||
|
||||
/*
|
||||
* 2) If kernel.shm_rmid_forced is not set then only keep track of
|
||||
* which shmids are orphaned, so that a later set of the sysctl
|
||||
* can clean them up.
|
||||
*/
|
||||
if (!ns->shm_rmid_forced)
|
||||
goto unlink_continue;
|
||||
|
||||
/*
|
||||
* 3) get a reference to the namespace.
|
||||
* The refcount could be already 0. If it is 0, then
|
||||
* the shm objects will be free by free_ipc_work().
|
||||
*/
|
||||
ns = get_ipc_ns_not_zero(ns);
|
||||
if (!ns) {
|
||||
unlink_continue:
|
||||
list_del_init(&shp->shm_clist);
|
||||
task_unlock(task);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* 4) get a reference to shp.
|
||||
* This cannot fail: shm_clist_rm() is called before
|
||||
* ipc_rmid(), thus the refcount cannot be 0.
|
||||
*/
|
||||
WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
|
||||
|
||||
/*
|
||||
* 5) unlink the shm segment from the list of segments
|
||||
* created by current.
|
||||
* This must be done last. After unlinking,
|
||||
* only the refcounts obtained above prevent IPC_RMID
|
||||
* from destroying the segment or the namespace.
|
||||
*/
|
||||
list_del_init(&shp->shm_clist);
|
||||
|
||||
task_unlock(task);
|
||||
|
||||
/*
|
||||
* 6) we have all references
|
||||
* Thus lock & if needed destroy shp.
|
||||
*/
|
||||
down_write(&shm_ids(ns).rwsem);
|
||||
shm_lock_by_ptr(shp);
|
||||
/*
|
||||
* rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
|
||||
* safe to call ipc_rcu_putref here
|
||||
*/
|
||||
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
|
||||
|
||||
if (ipc_valid_object(&shp->shm_perm)) {
|
||||
if (shm_may_destroy(shp))
|
||||
shm_destroy(ns, shp);
|
||||
else
|
||||
shm_unlock(shp);
|
||||
} else {
|
||||
/*
|
||||
* Someone else deleted the shp from namespace
|
||||
* idr/kht while we have waited.
|
||||
* Just unlock and continue.
|
||||
*/
|
||||
shm_unlock(shp);
|
||||
}
|
||||
|
||||
up_write(&shm_ids(ns).rwsem);
|
||||
put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
|
||||
}
|
||||
}
|
||||
|
||||
static vm_fault_t shm_fault(struct vm_fault *vmf)
|
||||
@ -676,7 +768,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
|
||||
if (error < 0)
|
||||
goto no_id;
|
||||
|
||||
shp->ns = ns;
|
||||
|
||||
task_lock(current);
|
||||
list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
|
||||
task_unlock(current);
|
||||
|
||||
/*
|
||||
* shmid gets reported as "inode#" in /proc/pid/maps.
|
||||
@ -1567,7 +1663,8 @@ out_nattch:
|
||||
down_write(&shm_ids(ns).rwsem);
|
||||
shp = shm_lock(ns, shmid);
|
||||
shp->shm_nattch--;
|
||||
if (shm_may_destroy(ns, shp))
|
||||
|
||||
if (shm_may_destroy(shp))
|
||||
shm_destroy(ns, shp);
|
||||
else
|
||||
shm_unlock(shp);
|
||||
|
@ -447,8 +447,8 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
|
||||
static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
|
||||
{
|
||||
if (ipcp->key != IPC_PRIVATE)
|
||||
rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
|
||||
ipc_kht_params);
|
||||
WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
|
||||
ipc_kht_params));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -498,7 +498,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
|
||||
{
|
||||
int idx = ipcid_to_idx(ipcp->id);
|
||||
|
||||
idr_remove(&ids->ipcs_idr, idx);
|
||||
WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, idx) != ipcp);
|
||||
ipc_kht_remove(ids, ipcp);
|
||||
ids->in_use--;
|
||||
ipcp->deleted = true;
|
||||
|
@ -869,6 +869,7 @@ static void kasan_memchr(struct kunit *test)
|
||||
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test,
|
||||
kasan_ptr_result = memchr(ptr, '1', size + 1));
|
||||
|
||||
@ -894,6 +895,7 @@ static void kasan_memcmp(struct kunit *test)
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
memset(arr, 0, sizeof(arr));
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test,
|
||||
kasan_int_result = memcmp(ptr, arr, size+1));
|
||||
kfree(ptr);
|
||||
|
@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS
|
||||
config KMAP_LOCAL
|
||||
bool
|
||||
|
||||
config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
|
||||
bool
|
||||
|
||||
# struct io_mapping based helper. Selected by drivers that need them
|
||||
config IO_MAPPING
|
||||
bool
|
||||
|
@ -32,7 +32,7 @@ static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
|
||||
if (*ppos)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
kbuf = kmalloc(count + 1, GFP_KERNEL);
|
||||
kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!kbuf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -133,7 +133,7 @@ static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
|
||||
char *kbuf;
|
||||
ssize_t len;
|
||||
|
||||
kbuf = kmalloc(count, GFP_KERNEL);
|
||||
kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -452,7 +452,7 @@ static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
|
||||
char *kbuf;
|
||||
ssize_t len;
|
||||
|
||||
kbuf = kmalloc(count, GFP_KERNEL);
|
||||
kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -578,7 +578,7 @@ static ssize_t dbgfs_kdamond_pid_read(struct file *file,
|
||||
char *kbuf;
|
||||
ssize_t len;
|
||||
|
||||
kbuf = kmalloc(count, GFP_KERNEL);
|
||||
kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -877,12 +877,14 @@ static ssize_t dbgfs_monitor_on_write(struct file *file,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&damon_dbgfs_lock);
|
||||
if (!strncmp(kbuf, "on", count)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dbgfs_nr_ctxs; i++) {
|
||||
if (damon_targets_empty(dbgfs_ctxs[i])) {
|
||||
kfree(kbuf);
|
||||
mutex_unlock(&damon_dbgfs_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -892,6 +894,7 @@ static ssize_t dbgfs_monitor_on_write(struct file *file,
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
mutex_unlock(&damon_dbgfs_lock);
|
||||
|
||||
if (!ret)
|
||||
ret = count;
|
||||
@ -944,15 +947,16 @@ static int __init __damon_dbgfs_init(void)
|
||||
|
||||
static int __init damon_dbgfs_init(void)
|
||||
{
|
||||
int rc;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
mutex_lock(&damon_dbgfs_lock);
|
||||
dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
|
||||
if (!dbgfs_ctxs)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
dbgfs_ctxs[0] = dbgfs_new_ctx();
|
||||
if (!dbgfs_ctxs[0]) {
|
||||
kfree(dbgfs_ctxs);
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
dbgfs_nr_ctxs = 1;
|
||||
|
||||
@ -963,6 +967,8 @@ static int __init damon_dbgfs_init(void)
|
||||
pr_err("%s: dbgfs init failed\n", __func__);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&damon_dbgfs_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
32
mm/highmem.c
32
mm/highmem.c
@ -503,16 +503,22 @@ static inline int kmap_local_calc_idx(int idx)
|
||||
|
||||
static pte_t *__kmap_pte;
|
||||
|
||||
static pte_t *kmap_get_pte(void)
|
||||
static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
|
||||
/*
|
||||
* Set by the arch if __kmap_pte[-idx] does not produce
|
||||
* the correct entry.
|
||||
*/
|
||||
return virt_to_kpte(vaddr);
|
||||
if (!__kmap_pte)
|
||||
__kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
|
||||
return __kmap_pte;
|
||||
return &__kmap_pte[-idx];
|
||||
}
|
||||
|
||||
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
pte_t pteval, *kmap_pte = kmap_get_pte();
|
||||
pte_t pteval, *kmap_pte;
|
||||
unsigned long vaddr;
|
||||
int idx;
|
||||
|
||||
@ -524,9 +530,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
|
||||
preempt_disable();
|
||||
idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
BUG_ON(!pte_none(*(kmap_pte - idx)));
|
||||
kmap_pte = kmap_get_pte(vaddr, idx);
|
||||
BUG_ON(!pte_none(*kmap_pte));
|
||||
pteval = pfn_pte(pfn, prot);
|
||||
arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
|
||||
arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
|
||||
arch_kmap_local_post_map(vaddr, pteval);
|
||||
current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
|
||||
preempt_enable();
|
||||
@ -559,7 +566,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot);
|
||||
void kunmap_local_indexed(void *vaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
|
||||
pte_t *kmap_pte = kmap_get_pte();
|
||||
pte_t *kmap_pte;
|
||||
int idx;
|
||||
|
||||
if (addr < __fix_to_virt(FIX_KMAP_END) ||
|
||||
@ -584,8 +591,9 @@ void kunmap_local_indexed(void *vaddr)
|
||||
idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
|
||||
WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
||||
|
||||
kmap_pte = kmap_get_pte(addr, idx);
|
||||
arch_kmap_local_pre_unmap(addr);
|
||||
pte_clear(&init_mm, addr, kmap_pte - idx);
|
||||
pte_clear(&init_mm, addr, kmap_pte);
|
||||
arch_kmap_local_post_unmap(addr);
|
||||
current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
|
||||
kmap_local_idx_pop();
|
||||
@ -607,7 +615,7 @@ EXPORT_SYMBOL(kunmap_local_indexed);
|
||||
void __kmap_local_sched_out(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
pte_t *kmap_pte = kmap_get_pte();
|
||||
pte_t *kmap_pte;
|
||||
int i;
|
||||
|
||||
/* Clear kmaps */
|
||||
@ -634,8 +642,9 @@ void __kmap_local_sched_out(void)
|
||||
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
|
||||
|
||||
addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
kmap_pte = kmap_get_pte(addr, idx);
|
||||
arch_kmap_local_pre_unmap(addr);
|
||||
pte_clear(&init_mm, addr, kmap_pte - idx);
|
||||
pte_clear(&init_mm, addr, kmap_pte);
|
||||
arch_kmap_local_post_unmap(addr);
|
||||
}
|
||||
}
|
||||
@ -643,7 +652,7 @@ void __kmap_local_sched_out(void)
|
||||
void __kmap_local_sched_in(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
pte_t *kmap_pte = kmap_get_pte();
|
||||
pte_t *kmap_pte;
|
||||
int i;
|
||||
|
||||
/* Restore kmaps */
|
||||
@ -663,7 +672,8 @@ void __kmap_local_sched_in(void)
|
||||
/* See comment in __kmap_local_sched_out() */
|
||||
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
|
||||
addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
|
||||
kmap_pte = kmap_get_pte(addr, idx);
|
||||
set_pte_at(&init_mm, addr, kmap_pte, pteval);
|
||||
arch_kmap_local_post_map(addr, pteval);
|
||||
}
|
||||
}
|
||||
|
11
mm/hugetlb.c
11
mm/hugetlb.c
@ -1037,8 +1037,10 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
|
||||
*/
|
||||
struct resv_map *reservations = vma_resv_map(vma);
|
||||
|
||||
if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
||||
if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
||||
resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
|
||||
kref_put(&reservations->refs, resv_map_release);
|
||||
}
|
||||
|
||||
reset_vma_resv_huge_pages(vma);
|
||||
}
|
||||
@ -5734,13 +5736,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
int ret = -ENOMEM;
|
||||
struct page *page;
|
||||
int writable;
|
||||
bool new_pagecache_page = false;
|
||||
bool page_in_pagecache = false;
|
||||
|
||||
if (is_continue) {
|
||||
ret = -EFAULT;
|
||||
page = find_lock_page(mapping, idx);
|
||||
if (!page)
|
||||
goto out;
|
||||
page_in_pagecache = true;
|
||||
} else if (!*pagep) {
|
||||
/* If a page already exists, then it's UFFDIO_COPY for
|
||||
* a non-missing case. Return -EEXIST.
|
||||
@ -5828,7 +5831,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
ret = huge_add_to_page_cache(page, mapping, idx);
|
||||
if (ret)
|
||||
goto out_release_nounlock;
|
||||
new_pagecache_page = true;
|
||||
page_in_pagecache = true;
|
||||
}
|
||||
|
||||
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
|
||||
@ -5892,7 +5895,7 @@ out_release_unlock:
|
||||
if (vm_shared || is_continue)
|
||||
unlock_page(page);
|
||||
out_release_nounlock:
|
||||
if (!new_pagecache_page)
|
||||
if (!page_in_pagecache)
|
||||
restore_reserve_on_error(h, dst_vma, dst_addr, page);
|
||||
put_page(page);
|
||||
goto out;
|
||||
|
@ -3733,14 +3733,13 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||
if (!cachep)
|
||||
return;
|
||||
|
||||
trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
|
||||
local_irq_save(flags);
|
||||
debug_check_no_locks_freed(objp, cachep->object_size);
|
||||
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
|
||||
debug_check_no_obj_freed(objp, cachep->object_size);
|
||||
__cache_free(cachep, objp, _RET_IP_);
|
||||
local_irq_restore(flags);
|
||||
|
||||
trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
@ -147,7 +147,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
|
||||
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
|
||||
SLAB_TEMPORARY | SLAB_ACCOUNT)
|
||||
#else
|
||||
#define SLAB_CACHE_FLAGS (0)
|
||||
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
|
||||
#endif
|
||||
|
||||
/* Common flags available with current configuration */
|
||||
|
@ -666,6 +666,7 @@ static void kmem_rcu_free(struct rcu_head *head)
|
||||
void kmem_cache_free(struct kmem_cache *c, void *b)
|
||||
{
|
||||
kmemleak_free_recursive(b, c->flags);
|
||||
trace_kmem_cache_free(_RET_IP_, b, c->name);
|
||||
if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
|
||||
struct slob_rcu *slob_rcu;
|
||||
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
|
||||
@ -674,8 +675,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
|
||||
} else {
|
||||
__kmem_cache_free(b, c->size);
|
||||
}
|
||||
|
||||
trace_kmem_cache_free(_RET_IP_, b, c->name);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
@ -3526,8 +3526,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
||||
s = cache_from_obj(s, x);
|
||||
if (!s)
|
||||
return;
|
||||
slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
|
||||
trace_kmem_cache_free(_RET_IP_, x, s->name);
|
||||
slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user