Merge branch 'linus' into irq/core
Reason: Pull in upstream fixes on which new patches depend on.
This commit is contained in:
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
|
||||
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
|
||||
{
|
||||
module_free(NULL, hdr);
|
||||
module_memfree(hdr);
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
|
||||
@@ -2023,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
|
||||
kdb_printf("%-20s%8u 0x%p ", mod->name,
|
||||
mod->core_size, (void *)mod);
|
||||
#ifdef CONFIG_MODULE_UNLOAD
|
||||
kdb_printf("%4ld ", module_refcount(mod));
|
||||
kdb_printf("%4d ", module_refcount(mod));
|
||||
#endif
|
||||
if (mod->state == MODULE_STATE_GOING)
|
||||
kdb_printf(" (Unloading)");
|
||||
|
||||
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
|
||||
|
||||
static void free_insn_page(void *page)
|
||||
{
|
||||
module_free(NULL, page);
|
||||
module_memfree(page);
|
||||
}
|
||||
|
||||
struct kprobe_insn_cache kprobe_insn_slots = {
|
||||
|
||||
@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long module_refcount(struct module *mod)
|
||||
/**
|
||||
* module_refcount - return the refcount or -1 if unloading
|
||||
*
|
||||
* @mod: the module we're checking
|
||||
*
|
||||
* Returns:
|
||||
* -1 if the module is in the process of unloading
|
||||
* otherwise the number of references in the kernel to the module
|
||||
*/
|
||||
int module_refcount(struct module *mod)
|
||||
{
|
||||
return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
|
||||
return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
|
||||
}
|
||||
EXPORT_SYMBOL(module_refcount);
|
||||
|
||||
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
|
||||
struct module_use *use;
|
||||
int printed_something = 0;
|
||||
|
||||
seq_printf(m, " %lu ", module_refcount(mod));
|
||||
seq_printf(m, " %i ", module_refcount(mod));
|
||||
|
||||
/*
|
||||
* Always include a trailing , so userspace can differentiate
|
||||
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
|
||||
static ssize_t show_refcnt(struct module_attribute *mattr,
|
||||
struct module_kobject *mk, char *buffer)
|
||||
{
|
||||
return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
|
||||
return sprintf(buffer, "%i\n", module_refcount(mk->mod));
|
||||
}
|
||||
|
||||
static struct module_attribute modinfo_refcnt =
|
||||
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { }
|
||||
static void unset_module_init_ro_nx(struct module *mod) { }
|
||||
#endif
|
||||
|
||||
void __weak module_free(struct module *mod, void *module_region)
|
||||
void __weak module_memfree(void *module_region)
|
||||
{
|
||||
vfree(module_region);
|
||||
}
|
||||
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
}
|
||||
|
||||
void __weak module_arch_freeing_init(struct module *mod)
|
||||
{
|
||||
}
|
||||
|
||||
/* Free a module, remove from lists, etc. */
|
||||
static void free_module(struct module *mod)
|
||||
{
|
||||
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod)
|
||||
|
||||
/* This may be NULL, but that's OK */
|
||||
unset_module_init_ro_nx(mod);
|
||||
module_free(mod, mod->module_init);
|
||||
module_arch_freeing_init(mod);
|
||||
module_memfree(mod->module_init);
|
||||
kfree(mod->args);
|
||||
percpu_modfree(mod);
|
||||
|
||||
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod)
|
||||
|
||||
/* Finally, free the core (containing the module structure) */
|
||||
unset_module_core_ro_nx(mod);
|
||||
module_free(mod, mod->module_core);
|
||||
module_memfree(mod->module_core);
|
||||
|
||||
#ifdef CONFIG_MPU
|
||||
update_protections(current->mm);
|
||||
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info)
|
||||
*/
|
||||
kmemleak_ignore(ptr);
|
||||
if (!ptr) {
|
||||
module_free(mod, mod->module_core);
|
||||
module_memfree(mod->module_core);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(ptr, 0, mod->init_size);
|
||||
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
|
||||
static void module_deallocate(struct module *mod, struct load_info *info)
|
||||
{
|
||||
percpu_modfree(mod);
|
||||
module_free(mod, mod->module_init);
|
||||
module_free(mod, mod->module_core);
|
||||
module_arch_freeing_init(mod);
|
||||
module_memfree(mod->module_init);
|
||||
module_memfree(mod->module_core);
|
||||
}
|
||||
|
||||
int __weak module_finalize(const Elf_Ehdr *hdr,
|
||||
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* For freeing module_init on success, in case kallsyms traversing */
|
||||
struct mod_initfree {
|
||||
struct rcu_head rcu;
|
||||
void *module_init;
|
||||
};
|
||||
|
||||
static void do_free_init(struct rcu_head *head)
|
||||
{
|
||||
struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
|
||||
module_memfree(m->module_init);
|
||||
kfree(m);
|
||||
}
|
||||
|
||||
/* This is where the real work happens */
|
||||
static int do_init_module(struct module *mod)
|
||||
{
|
||||
int ret = 0;
|
||||
struct mod_initfree *freeinit;
|
||||
|
||||
freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
|
||||
if (!freeinit) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
freeinit->module_init = mod->module_init;
|
||||
|
||||
/*
|
||||
* We want to find out whether @mod uses async during init. Clear
|
||||
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod)
|
||||
if (mod->init != NULL)
|
||||
ret = do_one_initcall(mod->init);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Init routine failed: abort. Try to protect us from
|
||||
* buggy refcounters.
|
||||
*/
|
||||
mod->state = MODULE_STATE_GOING;
|
||||
synchronize_sched();
|
||||
module_put(mod);
|
||||
blocking_notifier_call_chain(&module_notify_list,
|
||||
MODULE_STATE_GOING, mod);
|
||||
free_module(mod);
|
||||
wake_up_all(&module_wq);
|
||||
return ret;
|
||||
goto fail_free_freeinit;
|
||||
}
|
||||
if (ret > 0) {
|
||||
pr_warn("%s: '%s'->init suspiciously returned %d, it should "
|
||||
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod)
|
||||
mod->strtab = mod->core_strtab;
|
||||
#endif
|
||||
unset_module_init_ro_nx(mod);
|
||||
module_free(mod, mod->module_init);
|
||||
module_arch_freeing_init(mod);
|
||||
mod->module_init = NULL;
|
||||
mod->init_size = 0;
|
||||
mod->init_ro_size = 0;
|
||||
mod->init_text_size = 0;
|
||||
/*
|
||||
* We want to free module_init, but be aware that kallsyms may be
|
||||
* walking this with preempt disabled. In all the failure paths,
|
||||
* we call synchronize_rcu/synchronize_sched, but we don't want
|
||||
* to slow down the success path, so use actual RCU here.
|
||||
*/
|
||||
call_rcu(&freeinit->rcu, do_free_init);
|
||||
mutex_unlock(&module_mutex);
|
||||
wake_up_all(&module_wq);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_free_freeinit:
|
||||
kfree(freeinit);
|
||||
fail:
|
||||
/* Try to protect us from buggy refcounters. */
|
||||
mod->state = MODULE_STATE_GOING;
|
||||
synchronize_sched();
|
||||
module_put(mod);
|
||||
blocking_notifier_call_chain(&module_notify_list,
|
||||
MODULE_STATE_GOING, mod);
|
||||
free_module(mod);
|
||||
wake_up_all(&module_wq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int may_init_module(void)
|
||||
|
||||
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
|
||||
mk->mp->grp.attrs = new_attrs;
|
||||
|
||||
/* Tack new one on the end. */
|
||||
memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
|
||||
sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
|
||||
mk->mp->attrs[mk->mp->num].param = kp;
|
||||
mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
|
||||
/* Do not allow runtime DAC changes to make param writable. */
|
||||
if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
|
||||
mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
|
||||
else
|
||||
mk->mp->attrs[mk->mp->num].mattr.store = NULL;
|
||||
mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
|
||||
mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
|
||||
mk->mp->num++;
|
||||
|
||||
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
||||
up_write(&me->mm->mmap_sem);
|
||||
break;
|
||||
case PR_MPX_ENABLE_MANAGEMENT:
|
||||
if (arg2 || arg3 || arg4 || arg5)
|
||||
return -EINVAL;
|
||||
error = MPX_ENABLE_MANAGEMENT(me);
|
||||
break;
|
||||
case PR_MPX_DISABLE_MANAGEMENT:
|
||||
if (arg2 || arg3 || arg4 || arg5)
|
||||
return -EINVAL;
|
||||
error = MPX_DISABLE_MANAGEMENT(me);
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
|
||||
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
|
||||
return -EPERM;
|
||||
|
||||
if (txc->modes & ADJ_FREQUENCY) {
|
||||
if (LONG_MIN / PPM_SCALE > txc->freq)
|
||||
return -EINVAL;
|
||||
if (LONG_MAX / PPM_SCALE < txc->freq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
|
||||
if (tv) {
|
||||
if (copy_from_user(&user_tv, tv, sizeof(*tv)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!timeval_valid(&user_tv))
|
||||
return -EINVAL;
|
||||
|
||||
new_ts.tv_sec = user_tv.tv_sec;
|
||||
new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool)
|
||||
* spin_lock_irq(pool->lock) which may be released and regrabbed
|
||||
* multiple times. Does GFP_KERNEL allocations. Called only from
|
||||
* manager.
|
||||
*
|
||||
* Return:
|
||||
* %false if no action was taken and pool->lock stayed locked, %true
|
||||
* otherwise.
|
||||
*/
|
||||
static bool maybe_create_worker(struct worker_pool *pool)
|
||||
static void maybe_create_worker(struct worker_pool *pool)
|
||||
__releases(&pool->lock)
|
||||
__acquires(&pool->lock)
|
||||
{
|
||||
if (!need_to_create_worker(pool))
|
||||
return false;
|
||||
restart:
|
||||
spin_unlock_irq(&pool->lock);
|
||||
|
||||
@@ -1877,7 +1871,6 @@ restart:
|
||||
*/
|
||||
if (need_to_create_worker(pool))
|
||||
goto restart;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1897,16 +1890,14 @@ restart:
|
||||
* multiple times. Does GFP_KERNEL allocations.
|
||||
*
|
||||
* Return:
|
||||
* %false if the pool don't need management and the caller can safely start
|
||||
* processing works, %true indicates that the function released pool->lock
|
||||
* and reacquired it to perform some management function and that the
|
||||
* conditions that the caller verified while holding the lock before
|
||||
* calling the function might no longer be true.
|
||||
* %false if the pool doesn't need management and the caller can safely
|
||||
* start processing works, %true if management function was performed and
|
||||
* the conditions that the caller verified before calling the function may
|
||||
* no longer be true.
|
||||
*/
|
||||
static bool manage_workers(struct worker *worker)
|
||||
{
|
||||
struct worker_pool *pool = worker->pool;
|
||||
bool ret = false;
|
||||
|
||||
/*
|
||||
* Anyone who successfully grabs manager_arb wins the arbitration
|
||||
@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker)
|
||||
* actual management, the pool may stall indefinitely.
|
||||
*/
|
||||
if (!mutex_trylock(&pool->manager_arb))
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
ret |= maybe_create_worker(pool);
|
||||
maybe_create_worker(pool);
|
||||
|
||||
mutex_unlock(&pool->manager_arb);
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user