Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:
 "Various misc subsystems, before getting into the post-linux-next
  material.

  41 patches.

  Subsystems affected by this patch series: procfs, misc, core-kernel,
  lib, checkpatch, init, pipe, minix, fat, cgroups, kexec, kdump,
  taskstats, panic, kcov, resource, and ubsan"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (41 commits)
  Revert "ubsan, kcsan: Don't combine sanitizer with kcov on clang"
  kernel/resource: fix kfree() of bootmem memory again
  kcov: properly handle subsequent mmap calls
  kcov: split ioctl handling into locked and unlocked parts
  panic: move panic_print before kmsg dumpers
  panic: add option to dump all CPUs backtraces in panic_print
  docs: sysctl/kernel: add missing bit to panic_print
  taskstats: remove unneeded dead assignment
  kasan: no need to unset panic_on_warn in end_report()
  ubsan: no need to unset panic_on_warn in ubsan_epilogue()
  panic: unset panic_on_warn inside panic()
  docs: kdump: add scp example to write out the dump file
  docs: kdump: update description about sysfs file system support
  arm64: mm: use IS_ENABLED(CONFIG_KEXEC_CORE) instead of #ifdef
  x86/setup: use IS_ENABLED(CONFIG_KEXEC_CORE) instead of #ifdef
  riscv: mm: init: use IS_ENABLED(CONFIG_KEXEC_CORE) instead of #ifdef
  kexec: make crashk_res, crashk_low_res and crash_notes symbols always visible
  cgroup: use irqsave in cgroup_rstat_flush_locked().
  fat: use pointer to simple type in put_user()
  minix: fix bug when opening a file with O_DIRECT
  ...
This commit is contained in:
Linus Torvalds
2022-03-24 14:14:07 -07:00
34 changed files with 311 additions and 303 deletions

View File

@@ -153,8 +153,17 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
cpu);
struct cgroup *pos = NULL;
unsigned long flags;
raw_spin_lock(cpu_lock);
/*
* The _irqsave() is needed because cgroup_rstat_lock is
* spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
* this lock with the _irq() suffix only disables interrupts on
* a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
* interrupts on both configurations. The _irqsave() ensures
* that interrupts are always disabled and later restored.
*/
raw_spin_lock_irqsave(cpu_lock, flags);
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
struct cgroup_subsys_state *css;
@@ -166,7 +175,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
raw_spin_unlock(cpu_lock);
raw_spin_unlock_irqrestore(cpu_lock, flags);
/* if @may_sleep, play nice and yield if necessary */
if (may_sleep && (need_resched() ||

View File

@@ -459,37 +459,28 @@ void kcov_task_exit(struct task_struct *t)
static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
{
int res = 0;
void *area;
struct kcov *kcov = vma->vm_file->private_data;
unsigned long size, off;
struct page *page;
unsigned long flags;
area = vmalloc_user(vma->vm_end - vma->vm_start);
if (!area)
return -ENOMEM;
spin_lock_irqsave(&kcov->lock, flags);
size = kcov->size * sizeof(unsigned long);
if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
if (kcov->area == NULL || vma->vm_pgoff != 0 ||
vma->vm_end - vma->vm_start != size) {
res = -EINVAL;
goto exit;
}
if (!kcov->area) {
kcov->area = area;
vma->vm_flags |= VM_DONTEXPAND;
spin_unlock_irqrestore(&kcov->lock, flags);
for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off);
if (vm_insert_page(vma, vma->vm_start + off, page))
WARN_ONCE(1, "vm_insert_page() failed");
}
return 0;
spin_unlock_irqrestore(&kcov->lock, flags);
vma->vm_flags |= VM_DONTEXPAND;
for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off);
if (vm_insert_page(vma, vma->vm_start + off, page))
WARN_ONCE(1, "vm_insert_page() failed");
}
return 0;
exit:
spin_unlock_irqrestore(&kcov->lock, flags);
vfree(area);
return res;
}
@@ -564,31 +555,12 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
{
struct task_struct *t;
unsigned long size, unused;
unsigned long flags, unused;
int mode, i;
struct kcov_remote_arg *remote_arg;
struct kcov_remote *remote;
unsigned long flags;
switch (cmd) {
case KCOV_INIT_TRACE:
/*
* Enable kcov in trace mode and setup buffer size.
* Must happen before anything else.
*/
if (kcov->mode != KCOV_MODE_DISABLED)
return -EBUSY;
/*
* Size must be at least 2 to hold current position and one PC.
* Later we allocate size * sizeof(unsigned long) memory,
* that must not overflow.
*/
size = arg;
if (size < 2 || size > INT_MAX / sizeof(unsigned long))
return -EINVAL;
kcov->size = size;
kcov->mode = KCOV_MODE_INIT;
return 0;
case KCOV_ENABLE:
/*
* Enable coverage for the current task.
@@ -692,9 +664,37 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
struct kcov_remote_arg *remote_arg = NULL;
unsigned int remote_num_handles;
unsigned long remote_arg_size;
unsigned long flags;
unsigned long size, flags;
void *area;
if (cmd == KCOV_REMOTE_ENABLE) {
kcov = filep->private_data;
switch (cmd) {
case KCOV_INIT_TRACE:
/*
* Enable kcov in trace mode and setup buffer size.
* Must happen before anything else.
*
* First check the size argument - it must be at least 2
* to hold the current position and one PC.
*/
size = arg;
if (size < 2 || size > INT_MAX / sizeof(unsigned long))
return -EINVAL;
area = vmalloc_user(size * sizeof(unsigned long));
if (area == NULL)
return -ENOMEM;
spin_lock_irqsave(&kcov->lock, flags);
if (kcov->mode != KCOV_MODE_DISABLED) {
spin_unlock_irqrestore(&kcov->lock, flags);
vfree(area);
return -EBUSY;
}
kcov->area = area;
kcov->size = size;
kcov->mode = KCOV_MODE_INIT;
spin_unlock_irqrestore(&kcov->lock, flags);
return 0;
case KCOV_REMOTE_ENABLE:
if (get_user(remote_num_handles, (unsigned __user *)(arg +
offsetof(struct kcov_remote_arg, num_handles))))
return -EFAULT;
@@ -710,16 +710,18 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
arg = (unsigned long)remote_arg;
fallthrough;
default:
/*
* All other commands can be normally executed under a spin lock, so we
* obtain and release it here in order to simplify kcov_ioctl_locked().
*/
spin_lock_irqsave(&kcov->lock, flags);
res = kcov_ioctl_locked(kcov, cmd, arg);
spin_unlock_irqrestore(&kcov->lock, flags);
kfree(remote_arg);
return res;
}
kcov = filep->private_data;
spin_lock_irqsave(&kcov->lock, flags);
res = kcov_ioctl_locked(kcov, cmd, arg);
spin_unlock_irqrestore(&kcov->lock, flags);
kfree(remote_arg);
return res;
}
static const struct file_operations kcov_fops = {

View File

@@ -24,8 +24,7 @@
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define KERNEL_ATTR_RW(_name) \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
/* current uevent sequence number */
static ssize_t uevent_seqnum_show(struct kobject *kobj,

View File

@@ -66,6 +66,7 @@ EXPORT_SYMBOL_GPL(panic_timeout);
#define PANIC_PRINT_LOCK_INFO 0x00000008
#define PANIC_PRINT_FTRACE_INFO 0x00000010
#define PANIC_PRINT_ALL_PRINTK_MSG 0x00000020
#define PANIC_PRINT_ALL_CPU_BT 0x00000040
unsigned long panic_print;
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
@@ -147,10 +148,16 @@ void nmi_panic(struct pt_regs *regs, const char *msg)
}
EXPORT_SYMBOL(nmi_panic);
static void panic_print_sys_info(void)
static void panic_print_sys_info(bool console_flush)
{
if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
console_flush_on_panic(CONSOLE_REPLAY_ALL);
if (console_flush) {
if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
console_flush_on_panic(CONSOLE_REPLAY_ALL);
return;
}
if (panic_print & PANIC_PRINT_ALL_CPU_BT)
trigger_all_cpu_backtrace();
if (panic_print & PANIC_PRINT_TASK_INFO)
show_state();
@@ -185,6 +192,16 @@ void panic(const char *fmt, ...)
int old_cpu, this_cpu;
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
if (panic_on_warn) {
/*
* This thread may hit another WARN() in the panic path.
* Resetting this prevents additional WARN() from panicking the
* system on this thread. Other threads are blocked by the
* panic_mutex in panic().
*/
panic_on_warn = 0;
}
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
* from deadlocking the first cpu that invokes the panic, since
@@ -272,6 +289,8 @@ void panic(const char *fmt, ...)
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
panic_print_sys_info(false);
kmsg_dump(KMSG_DUMP_PANIC);
/*
@@ -302,7 +321,7 @@ void panic(const char *fmt, ...)
debug_locks_off();
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
panic_print_sys_info();
panic_print_sys_info(true);
if (!panic_blink)
panic_blink = no_blink;
@@ -576,16 +595,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
if (regs)
show_regs(regs);
if (panic_on_warn) {
/*
* This thread may hit another WARN() in the panic path.
* Resetting this prevents additional WARN() from panicking the
* system on this thread. Other threads are blocked by the
* panic_mutex in panic().
*/
panic_on_warn = 0;
if (panic_on_warn)
panic("panic_on_warn set ...\n");
}
if (!regs)
dump_stack();

View File

@@ -56,14 +56,6 @@ struct resource_constraint {
static DEFINE_RWLOCK(resource_lock);
/*
* For memory hotplug, there is no way to free resource entries allocated
* by boot mem after the system is up. So for reusing the resource entry
* we need to remember the resource.
*/
static struct resource *bootmem_resource_free;
static DEFINE_SPINLOCK(bootmem_resource_lock);
static struct resource *next_resource(struct resource *p)
{
if (p->child)
@@ -160,36 +152,19 @@ __initcall(ioresources_init);
static void free_resource(struct resource *res)
{
if (!res)
return;
if (!PageSlab(virt_to_head_page(res))) {
spin_lock(&bootmem_resource_lock);
res->sibling = bootmem_resource_free;
bootmem_resource_free = res;
spin_unlock(&bootmem_resource_lock);
} else {
/**
* If the resource was allocated using memblock early during boot
* we'll leak it here: we can only return full pages back to the
* buddy and trying to be smart and reusing them eventually in
* alloc_resource() overcomplicates resource handling.
*/
if (res && PageSlab(virt_to_head_page(res)))
kfree(res);
}
}
static struct resource *alloc_resource(gfp_t flags)
{
struct resource *res = NULL;
spin_lock(&bootmem_resource_lock);
if (bootmem_resource_free) {
res = bootmem_resource_free;
bootmem_resource_free = res->sibling;
}
spin_unlock(&bootmem_resource_lock);
if (res)
memset(res, 0, sizeof(struct resource));
else
res = kzalloc(sizeof(struct resource), flags);
return res;
return kzalloc(sizeof(struct resource), flags);
}
/* Return the conflict entry if you can't request it */

View File

@@ -113,13 +113,14 @@ static void send_cpu_listeners(struct sk_buff *skb,
struct listener *s, *tmp;
struct sk_buff *skb_next, *skb_cur = skb;
void *reply = genlmsg_data(genlhdr);
int rc, delcount = 0;
int delcount = 0;
genlmsg_end(skb, reply);
rc = 0;
down_read(&listeners->sem);
list_for_each_entry(s, &listeners->list, list) {
int rc;
skb_next = NULL;
if (!list_is_last(&s->list, &listeners->list)) {
skb_next = skb_clone(skb_cur, GFP_KERNEL);