2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2017-09-08 23:17:00 +00:00
|
|
|
* kmod - the kernel module loader
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-08 17:51:36 +00:00
|
|
|
#include <linux/sched/task.h>
|
2017-02-05 13:24:31 +00:00
|
|
|
#include <linux/binfmts.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/kmod.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/completion.h>
|
2011-04-01 21:07:50 +00:00
|
|
|
#include <linux/cred.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/file.h>
|
2008-04-24 11:44:08 +00:00
|
|
|
#include <linux/fdtable.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
2006-10-01 06:29:28 +00:00
|
|
|
#include <linux/resource.h>
|
2007-07-19 08:47:36 +00:00
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/suspend.h>
|
PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled()
Commit a144c6a (PM: Print a warning if firmware is requested when tasks
are frozen) introduced usermodehelper_is_disabled() to warn and exit
immediately if firmware is requested when usermodehelpers are disabled.
However, it is racy. Consider the following scenario, currently used in
drivers/base/firmware_class.c:
...
if (usermodehelper_is_disabled())
goto out;
/* Do actual work */
...
out:
return err;
Nothing prevents someone from disabling usermodehelpers just after the check
in the 'if' condition, which means that it is quite possible to try doing the
"actual work" with usermodehelpers disabled, leading to undesirable
consequences.
In particular, this race condition in _request_firmware() causes task freezing
failures whenever suspend/hibernation is in progress because, it wrongly waits
to get the firmware/microcode image from userspace when actually the
usermodehelpers are disabled or userspace has been frozen.
Some of the example scenarios that cause freezing failures due to this race
are those that depend on userspace via request_firmware(), such as x86
microcode module initialization and microcode image reload.
Previous discussions about this issue can be found at:
http://thread.gmane.org/gmane.linux.kernel/1198291/focus=1200591
This patch adds proper synchronization to fix this issue.
It is to be noted that this patchset fixes the freezing failures but doesn't
remove the warnings. IOW, it does not attempt to add explicit synchronization
to x86 microcode driver to avoid requesting microcode image at inopportune
moments. Because, the warnings were introduced to highlight such cases, in the
first place. And we need not silence the warnings, since we take care of the
*real* problem (freezing failure) and hence, after that, the warnings are
pretty harmless anyway.
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2011-12-09 22:36:36 +00:00
|
|
|
#include <linux/rwsem.h>
|
2012-10-11 01:28:25 +00:00
|
|
|
#include <linux/ptrace.h>
|
2013-01-23 00:48:03 +00:00
|
|
|
#include <linux/async.h>
|
2016-12-24 19:46:01 +00:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-08-17 08:56:28 +00:00
|
|
|
#include <trace/events/module.h>
|
|
|
|
|
2017-06-23 19:19:12 +00:00
|
|
|
/*
|
|
|
|
* Assuming:
|
|
|
|
*
|
|
|
|
* threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
|
|
|
|
* (u64) THREAD_SIZE * 8UL);
|
|
|
|
*
|
|
|
|
* If you need less than 50 threads would mean we're dealing with systems
|
2020-04-07 03:11:49 +00:00
|
|
|
* smaller than 3200 pages. This assumes you are capable of having ~13M memory,
|
2017-06-23 19:19:12 +00:00
|
|
|
* and this would only be an be an upper limit, after which the OOM killer
|
|
|
|
* would take effect. Systems like these are very unlikely if modules are
|
|
|
|
* enabled.
|
|
|
|
*/
|
|
|
|
#define MAX_KMOD_CONCURRENT 50
|
|
|
|
static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
|
2017-07-14 21:50:11 +00:00
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-08-18 22:15:58 +00:00
|
|
|
/*
|
|
|
|
* This is a restriction on having *all* MAX_KMOD_CONCURRENT threads
|
|
|
|
* running at the same time without returning. When this happens we
|
|
|
|
* believe you've somehow ended up with a recursive module dependency
|
|
|
|
* creating a loop.
|
|
|
|
*
|
|
|
|
* We have no option but to fail.
|
|
|
|
*
|
|
|
|
* Userspace should proactively try to detect and prevent these.
|
|
|
|
*/
|
|
|
|
#define MAX_KMOD_ALL_BUSY_TIMEOUT 5
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
modprobe_path is set via /proc/sys.
|
|
|
|
*/
|
|
|
|
char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
|
|
|
|
|
2012-03-23 22:02:50 +00:00
|
|
|
static void free_modprobe_argv(struct subprocess_info *info)
|
|
|
|
{
|
|
|
|
kfree(info->argv[3]); /* check call_modprobe() */
|
|
|
|
kfree(info->argv);
|
|
|
|
}
|
|
|
|
|
2012-03-23 22:02:49 +00:00
|
|
|
static int call_modprobe(char *module_name, int wait)
|
|
|
|
{
|
2013-04-30 22:28:03 +00:00
|
|
|
struct subprocess_info *info;
|
2012-03-23 22:02:49 +00:00
|
|
|
static char *envp[] = {
|
|
|
|
"HOME=/",
|
|
|
|
"TERM=linux",
|
|
|
|
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2012-03-23 22:02:50 +00:00
|
|
|
char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
|
|
|
|
if (!argv)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
module_name = kstrdup(module_name, GFP_KERNEL);
|
|
|
|
if (!module_name)
|
|
|
|
goto free_argv;
|
|
|
|
|
|
|
|
argv[0] = modprobe_path;
|
|
|
|
argv[1] = "-q";
|
|
|
|
argv[2] = "--";
|
|
|
|
argv[3] = module_name; /* check free_modprobe_argv() */
|
|
|
|
argv[4] = NULL;
|
2012-03-23 22:02:49 +00:00
|
|
|
|
2013-04-30 22:28:03 +00:00
|
|
|
info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
|
|
|
|
NULL, free_modprobe_argv, NULL);
|
|
|
|
if (!info)
|
|
|
|
goto free_module_name;
|
|
|
|
|
|
|
|
return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
|
|
|
|
|
|
|
|
free_module_name:
|
|
|
|
kfree(module_name);
|
2012-03-23 22:02:50 +00:00
|
|
|
free_argv:
|
|
|
|
kfree(argv);
|
|
|
|
out:
|
|
|
|
return -ENOMEM;
|
2012-03-23 22:02:49 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
2009-02-08 18:42:01 +00:00
|
|
|
* __request_module - try to load a kernel module
|
|
|
|
* @wait: wait (or not) for the operation to complete
|
2009-01-06 22:42:39 +00:00
|
|
|
* @fmt: printf style format string for the name of the module
|
|
|
|
* @...: arguments as specified in the format string
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Load a module using the user mode module loader. The function returns
|
2015-09-09 22:38:10 +00:00
|
|
|
* zero on success or a negative errno code or positive exit code from
|
|
|
|
* "modprobe" on failure. Note that a successful module load does not mean
|
|
|
|
* the module did not then unload and exit on an error of its own. Callers
|
|
|
|
* must check that the service they requested is now available not blindly
|
|
|
|
* invoke it.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* If module auto-loading support is disabled then this function
|
|
|
|
* becomes a no-operation.
|
|
|
|
*/
|
2009-02-08 18:42:01 +00:00
|
|
|
int __request_module(bool wait, const char *fmt, ...)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
char module_name[MODULE_NAME_LEN];
|
|
|
|
int ret;
|
|
|
|
|
2013-01-23 00:48:03 +00:00
|
|
|
/*
|
|
|
|
* We don't allow synchronous module loading from async. Module
|
|
|
|
* init may invoke async_synchronize_full() which will end up
|
|
|
|
* waiting for this task which already is waiting for the module
|
|
|
|
* loading to complete, leading to a deadlock.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(wait && current_is_async());
|
|
|
|
|
2013-07-03 22:08:15 +00:00
|
|
|
if (!modprobe_path[0])
|
|
|
|
return 0;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
va_start(args, fmt);
|
|
|
|
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
if (ret >= MODULE_NAME_LEN)
|
|
|
|
return -ENAMETOOLONG;
|
|
|
|
|
2009-11-03 05:35:32 +00:00
|
|
|
ret = security_kernel_module_request(module_name);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-06-23 19:19:12 +00:00
|
|
|
if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
|
2017-07-14 21:50:11 +00:00
|
|
|
pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
|
|
|
|
atomic_read(&kmod_concurrent_max),
|
|
|
|
MAX_KMOD_CONCURRENT, module_name);
|
2017-08-18 22:15:58 +00:00
|
|
|
ret = wait_event_killable_timeout(kmod_wq,
|
|
|
|
atomic_dec_if_positive(&kmod_concurrent_max) >= 0,
|
|
|
|
MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
|
|
|
|
if (!ret) {
|
|
|
|
pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
|
|
|
|
module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
|
|
|
|
return -ETIME;
|
|
|
|
} else if (ret == -ERESTARTSYS) {
|
|
|
|
pr_warn_ratelimited("request_module: sigkill sent for modprobe %s, giving up", module_name);
|
|
|
|
return ret;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-08-17 08:56:28 +00:00
|
|
|
trace_module_request(module_name, wait, _RET_IP_);
|
|
|
|
|
2012-03-23 22:02:49 +00:00
|
|
|
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
|
kmod: add init function to usermodehelper
About 6 months ago, I made a set of changes to how the core-dump-to-a-pipe
feature in the kernel works. We had reports of several races, including
some reports of apps bypassing our recursion check so that a process that
was forked as part of a core_pattern setup could infinitely crash and
refork until the system crashed.
We fixed those by improving our recursion checks. The new check basically
refuses to fork a process if its core limit is zero, which works well.
Unfortunately, I've been getting grief from maintainer of user space
programs that are inserted as the forked process of core_pattern. They
contend that in order for their programs (such as abrt and apport) to
work, all the running processes in a system must have their core limits
set to a non-zero value, to which I say 'yes'. I did this by design, and
think thats the right way to do things.
But I've been asked to ease this burden on user space enough times that I
thought I would take a look at it. The first suggestion was to make the
recursion check fail on a non-zero 'special' number, like one. That way
the core collector process could set its core size ulimit to 1, and enable
the kernel's recursion detection. This isn't a bad idea on the surface,
but I don't like it since its opt-in, in that if a program like abrt or
apport has a bug and fails to set such a core limit, we're left with a
recursively crashing system again.
So I've come up with this. What I've done is modify the
call_usermodehelper api such that an extra parameter is added, a function
pointer which will be called by the user helper task, after it forks, but
before it exec's the required process. This will give the caller the
opportunity to get a call back in the processes context, allowing it to do
whatever it needs to to the process in the kernel prior to exec-ing the
user space code. In the case of do_coredump, this callback is ues to set
the core ulimit of the helper process to 1. This elimnates the opt-in
problem that I had above, as it allows the ulimit for core sizes to be set
to the value of 1, which is what the recursion check looks for in
do_coredump.
This patch:
Create new function call_usermodehelper_fns() and allow it to assign both
an init and cleanup function, as we'll as arbitrary data.
The init function is called from the context of the forked process and
allows for customization of the helper process prior to calling exec. Its
return code gates the continuation of the process, or causes its exit.
Also add an arbitrary data pointer to the subprocess_info struct allowing
for data to be passed from the caller to the new process, and the
subsequent cleanup process
Also, use this patch to cleanup the cleanup function. It currently takes
an argp and envp pointer for freeing, which is ugly. Lets instead just
make the subprocess_info structure public, and pass that to the cleanup
and init routines
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-26 21:42:58 +00:00
|
|
|
|
2017-06-23 19:19:12 +00:00
|
|
|
atomic_inc(&kmod_concurrent_max);
|
2017-07-14 21:50:11 +00:00
|
|
|
wake_up(&kmod_wq);
|
2017-06-23 19:19:12 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2009-02-08 18:42:01 +00:00
|
|
|
EXPORT_SYMBOL(__request_module);
|