forked from Minki/linux
3bb5d2ee39
percpu areas are already allocated during boot for each possible cpu. percpu idle threads can be considered as an extension of the percpu areas, and allocate them for each possible cpu during boot. This will eliminate the need for workqueue based idle thread allocation. In future we can move the idle thread area into the percpu area too. [ tglx: Moved the loop into smpboot.c and added an error check when the init code failed to allocate an idle thread for a cpu which should be onlined ] Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: venki@google.com Link: http://lkml.kernel.org/r/1334966930.28674.245.camel@sbsiddha-desk.sc.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
682 lines
16 KiB
C
682 lines
16 KiB
C
/* CPU control.
|
|
* (C) 2001, 2002, 2003, 2004 Rusty Russell
|
|
*
|
|
* This code is licenced under the GPL.
|
|
*/
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/init.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/unistd.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/stop_machine.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/suspend.h>
|
|
|
|
#include "smpboot.h"
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
|
|
static DEFINE_MUTEX(cpu_add_remove_lock);
|
|
|
|
/*
|
|
* The following two API's must be used when attempting
|
|
* to serialize the updates to cpu_online_mask, cpu_present_mask.
|
|
*/
|
|
void cpu_maps_update_begin(void)
|
|
{
|
|
mutex_lock(&cpu_add_remove_lock);
|
|
}
|
|
|
|
void cpu_maps_update_done(void)
|
|
{
|
|
mutex_unlock(&cpu_add_remove_lock);
|
|
}
|
|
|
|
static RAW_NOTIFIER_HEAD(cpu_chain);
|
|
|
|
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
|
|
* Should always be manipulated under cpu_add_remove_lock
|
|
*/
|
|
static int cpu_hotplug_disabled;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
static struct {
|
|
struct task_struct *active_writer;
|
|
struct mutex lock; /* Synchronizes accesses to refcount, */
|
|
/*
|
|
* Also blocks the new readers during
|
|
* an ongoing cpu hotplug operation.
|
|
*/
|
|
int refcount;
|
|
} cpu_hotplug = {
|
|
.active_writer = NULL,
|
|
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
|
|
.refcount = 0,
|
|
};
|
|
|
|
void get_online_cpus(void)
|
|
{
|
|
might_sleep();
|
|
if (cpu_hotplug.active_writer == current)
|
|
return;
|
|
mutex_lock(&cpu_hotplug.lock);
|
|
cpu_hotplug.refcount++;
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
|
|
}
|
|
EXPORT_SYMBOL_GPL(get_online_cpus);
|
|
|
|
void put_online_cpus(void)
|
|
{
|
|
if (cpu_hotplug.active_writer == current)
|
|
return;
|
|
mutex_lock(&cpu_hotplug.lock);
|
|
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
|
|
wake_up_process(cpu_hotplug.active_writer);
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
|
|
}
|
|
EXPORT_SYMBOL_GPL(put_online_cpus);
|
|
|
|
/*
|
|
* This ensures that the hotplug operation can begin only when the
|
|
* refcount goes to zero.
|
|
*
|
|
* Note that during a cpu-hotplug operation, the new readers, if any,
|
|
* will be blocked by the cpu_hotplug.lock
|
|
*
|
|
* Since cpu_hotplug_begin() is always called after invoking
|
|
* cpu_maps_update_begin(), we can be sure that only one writer is active.
|
|
*
|
|
* Note that theoretically, there is a possibility of a livelock:
|
|
* - Refcount goes to zero, last reader wakes up the sleeping
|
|
* writer.
|
|
* - Last reader unlocks the cpu_hotplug.lock.
|
|
* - A new reader arrives at this moment, bumps up the refcount.
|
|
* - The writer acquires the cpu_hotplug.lock finds the refcount
|
|
* non zero and goes to sleep again.
|
|
*
|
|
* However, this is very difficult to achieve in practice since
|
|
* get_online_cpus() not an api which is called all that often.
|
|
*
|
|
*/
|
|
static void cpu_hotplug_begin(void)
|
|
{
|
|
cpu_hotplug.active_writer = current;
|
|
|
|
for (;;) {
|
|
mutex_lock(&cpu_hotplug.lock);
|
|
if (likely(!cpu_hotplug.refcount))
|
|
break;
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
schedule();
|
|
}
|
|
}
|
|
|
|
static void cpu_hotplug_done(void)
|
|
{
|
|
cpu_hotplug.active_writer = NULL;
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
}
|
|
|
|
#else /* #if CONFIG_HOTPLUG_CPU */
|
|
static void cpu_hotplug_begin(void) {}
|
|
static void cpu_hotplug_done(void) {}
|
|
#endif /* #else #if CONFIG_HOTPLUG_CPU */
|
|
|
|
/* Need to know about CPUs going up/down? */
|
|
int __ref register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
int ret;
|
|
cpu_maps_update_begin();
|
|
ret = raw_notifier_chain_register(&cpu_chain, nb);
|
|
cpu_maps_update_done();
|
|
return ret;
|
|
}
|
|
|
|
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
|
|
int *nr_calls)
|
|
{
|
|
int ret;
|
|
|
|
ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
|
|
nr_calls);
|
|
|
|
return notifier_to_errno(ret);
|
|
}
|
|
|
|
static int cpu_notify(unsigned long val, void *v)
|
|
{
|
|
return __cpu_notify(val, v, -1, NULL);
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
static void cpu_notify_nofail(unsigned long val, void *v)
|
|
{
|
|
BUG_ON(cpu_notify(val, v));
|
|
}
|
|
EXPORT_SYMBOL(register_cpu_notifier);
|
|
|
|
void __ref unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
cpu_maps_update_begin();
|
|
raw_notifier_chain_unregister(&cpu_chain, nb);
|
|
cpu_maps_update_done();
|
|
}
|
|
EXPORT_SYMBOL(unregister_cpu_notifier);
|
|
|
|
static inline void check_for_tasks(int cpu)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
write_lock_irq(&tasklist_lock);
|
|
for_each_process(p) {
|
|
if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
|
|
(p->utime || p->stime))
|
|
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
|
|
"(state = %ld, flags = %x)\n",
|
|
p->comm, task_pid_nr(p), cpu,
|
|
p->state, p->flags);
|
|
}
|
|
write_unlock_irq(&tasklist_lock);
|
|
}
|
|
|
|
struct take_cpu_down_param {
|
|
unsigned long mod;
|
|
void *hcpu;
|
|
};
|
|
|
|
/* Take this CPU down. */
|
|
static int __ref take_cpu_down(void *_param)
|
|
{
|
|
struct take_cpu_down_param *param = _param;
|
|
int err;
|
|
|
|
/* Ensure this CPU doesn't handle any more interrupts. */
|
|
err = __cpu_disable();
|
|
if (err < 0)
|
|
return err;
|
|
|
|
cpu_notify(CPU_DYING | param->mod, param->hcpu);
|
|
return 0;
|
|
}
|
|
|
|
/* Requires cpu_add_remove_lock to be held */
|
|
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|
{
|
|
int err, nr_calls = 0;
|
|
void *hcpu = (void *)(long)cpu;
|
|
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
|
|
struct take_cpu_down_param tcd_param = {
|
|
.mod = mod,
|
|
.hcpu = hcpu,
|
|
};
|
|
|
|
if (num_online_cpus() == 1)
|
|
return -EBUSY;
|
|
|
|
if (!cpu_online(cpu))
|
|
return -EINVAL;
|
|
|
|
cpu_hotplug_begin();
|
|
|
|
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
|
|
if (err) {
|
|
nr_calls--;
|
|
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
|
|
printk("%s: attempt to take down CPU %u failed\n",
|
|
__func__, cpu);
|
|
goto out_release;
|
|
}
|
|
|
|
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
|
if (err) {
|
|
/* CPU didn't die: tell everyone. Can't complain. */
|
|
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
|
|
|
|
goto out_release;
|
|
}
|
|
BUG_ON(cpu_online(cpu));
|
|
|
|
/*
|
|
* The migration_call() CPU_DYING callback will have removed all
|
|
* runnable tasks from the cpu, there's only the idle task left now
|
|
* that the migration thread is done doing the stop_machine thing.
|
|
*
|
|
* Wait for the stop thread to go away.
|
|
*/
|
|
while (!idle_cpu(cpu))
|
|
cpu_relax();
|
|
|
|
/* This actually kills the CPU. */
|
|
__cpu_die(cpu);
|
|
|
|
/* CPU is completely dead: tell everyone. Too late to complain. */
|
|
cpu_notify_nofail(CPU_DEAD | mod, hcpu);
|
|
|
|
check_for_tasks(cpu);
|
|
|
|
out_release:
|
|
cpu_hotplug_done();
|
|
if (!err)
|
|
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
|
|
return err;
|
|
}
|
|
|
|
int __ref cpu_down(unsigned int cpu)
|
|
{
|
|
int err;
|
|
|
|
cpu_maps_update_begin();
|
|
|
|
if (cpu_hotplug_disabled) {
|
|
err = -EBUSY;
|
|
goto out;
|
|
}
|
|
|
|
err = _cpu_down(cpu, 0);
|
|
|
|
out:
|
|
cpu_maps_update_done();
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL(cpu_down);
|
|
#endif /*CONFIG_HOTPLUG_CPU*/
|
|
|
|
/* Requires cpu_add_remove_lock to be held */
|
|
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
|
|
{
|
|
int ret, nr_calls = 0;
|
|
void *hcpu = (void *)(long)cpu;
|
|
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
|
|
struct task_struct *idle;
|
|
|
|
if (cpu_online(cpu) || !cpu_present(cpu))
|
|
return -EINVAL;
|
|
|
|
cpu_hotplug_begin();
|
|
|
|
idle = idle_thread_get(cpu);
|
|
if (IS_ERR(idle)) {
|
|
ret = PTR_ERR(idle);
|
|
goto out;
|
|
}
|
|
|
|
ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
|
|
if (ret) {
|
|
nr_calls--;
|
|
printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
|
|
__func__, cpu);
|
|
goto out_notify;
|
|
}
|
|
|
|
/* Arch-specific enabling code. */
|
|
ret = __cpu_up(cpu, idle);
|
|
if (ret != 0)
|
|
goto out_notify;
|
|
BUG_ON(!cpu_online(cpu));
|
|
|
|
/* Now call notifier in preparation. */
|
|
cpu_notify(CPU_ONLINE | mod, hcpu);
|
|
|
|
out_notify:
|
|
if (ret != 0)
|
|
__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
|
|
out:
|
|
cpu_hotplug_done();
|
|
|
|
return ret;
|
|
}
|
|
|
|
int __cpuinit cpu_up(unsigned int cpu)
|
|
{
|
|
int err = 0;
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
int nid;
|
|
pg_data_t *pgdat;
|
|
#endif
|
|
|
|
if (!cpu_possible(cpu)) {
|
|
printk(KERN_ERR "can't online cpu %d because it is not "
|
|
"configured as may-hotadd at boot time\n", cpu);
|
|
#if defined(CONFIG_IA64)
|
|
printk(KERN_ERR "please check additional_cpus= boot "
|
|
"parameter\n");
|
|
#endif
|
|
return -EINVAL;
|
|
}
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
nid = cpu_to_node(cpu);
|
|
if (!node_online(nid)) {
|
|
err = mem_online_node(nid);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
pgdat = NODE_DATA(nid);
|
|
if (!pgdat) {
|
|
printk(KERN_ERR
|
|
"Can't online cpu %d due to NULL pgdat\n", cpu);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
|
|
mutex_lock(&zonelists_mutex);
|
|
build_all_zonelists(NULL);
|
|
mutex_unlock(&zonelists_mutex);
|
|
}
|
|
#endif
|
|
|
|
cpu_maps_update_begin();
|
|
|
|
if (cpu_hotplug_disabled) {
|
|
err = -EBUSY;
|
|
goto out;
|
|
}
|
|
|
|
err = _cpu_up(cpu, 0);
|
|
|
|
out:
|
|
cpu_maps_update_done();
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_up);
|
|
|
|
#ifdef CONFIG_PM_SLEEP_SMP
|
|
static cpumask_var_t frozen_cpus;
|
|
|
|
void __weak arch_disable_nonboot_cpus_begin(void)
|
|
{
|
|
}
|
|
|
|
void __weak arch_disable_nonboot_cpus_end(void)
|
|
{
|
|
}
|
|
|
|
int disable_nonboot_cpus(void)
|
|
{
|
|
int cpu, first_cpu, error = 0;
|
|
|
|
cpu_maps_update_begin();
|
|
first_cpu = cpumask_first(cpu_online_mask);
|
|
/*
|
|
* We take down all of the non-boot CPUs in one shot to avoid races
|
|
* with the userspace trying to use the CPU hotplug at the same time
|
|
*/
|
|
cpumask_clear(frozen_cpus);
|
|
arch_disable_nonboot_cpus_begin();
|
|
|
|
printk("Disabling non-boot CPUs ...\n");
|
|
for_each_online_cpu(cpu) {
|
|
if (cpu == first_cpu)
|
|
continue;
|
|
error = _cpu_down(cpu, 1);
|
|
if (!error)
|
|
cpumask_set_cpu(cpu, frozen_cpus);
|
|
else {
|
|
printk(KERN_ERR "Error taking CPU%d down: %d\n",
|
|
cpu, error);
|
|
break;
|
|
}
|
|
}
|
|
|
|
arch_disable_nonboot_cpus_end();
|
|
|
|
if (!error) {
|
|
BUG_ON(num_online_cpus() > 1);
|
|
/* Make sure the CPUs won't be enabled by someone else */
|
|
cpu_hotplug_disabled = 1;
|
|
} else {
|
|
printk(KERN_ERR "Non-boot CPUs are not disabled\n");
|
|
}
|
|
cpu_maps_update_done();
|
|
return error;
|
|
}
|
|
|
|
void __weak arch_enable_nonboot_cpus_begin(void)
|
|
{
|
|
}
|
|
|
|
void __weak arch_enable_nonboot_cpus_end(void)
|
|
{
|
|
}
|
|
|
|
void __ref enable_nonboot_cpus(void)
|
|
{
|
|
int cpu, error;
|
|
|
|
/* Allow everyone to use the CPU hotplug again */
|
|
cpu_maps_update_begin();
|
|
cpu_hotplug_disabled = 0;
|
|
if (cpumask_empty(frozen_cpus))
|
|
goto out;
|
|
|
|
printk(KERN_INFO "Enabling non-boot CPUs ...\n");
|
|
|
|
arch_enable_nonboot_cpus_begin();
|
|
|
|
for_each_cpu(cpu, frozen_cpus) {
|
|
error = _cpu_up(cpu, 1);
|
|
if (!error) {
|
|
printk(KERN_INFO "CPU%d is up\n", cpu);
|
|
continue;
|
|
}
|
|
printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
|
|
}
|
|
|
|
arch_enable_nonboot_cpus_end();
|
|
|
|
cpumask_clear(frozen_cpus);
|
|
out:
|
|
cpu_maps_update_done();
|
|
}
|
|
|
|
static int __init alloc_frozen_cpus(void)
|
|
{
|
|
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
|
|
return -ENOMEM;
|
|
return 0;
|
|
}
|
|
core_initcall(alloc_frozen_cpus);
|
|
|
|
/*
|
|
* Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
|
|
* hotplug when tasks are about to be frozen. Also, don't allow the freezer
|
|
* to continue until any currently running CPU hotplug operation gets
|
|
* completed.
|
|
* To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
|
|
* 'cpu_add_remove_lock'. And this same lock is also taken by the regular
|
|
* CPU hotplug path and released only after it is complete. Thus, we
|
|
* (and hence the freezer) will block here until any currently running CPU
|
|
* hotplug operation gets completed.
|
|
*/
|
|
void cpu_hotplug_disable_before_freeze(void)
|
|
{
|
|
cpu_maps_update_begin();
|
|
cpu_hotplug_disabled = 1;
|
|
cpu_maps_update_done();
|
|
}
|
|
|
|
|
|
/*
|
|
* When tasks have been thawed, re-enable regular CPU hotplug (which had been
|
|
* disabled while beginning to freeze tasks).
|
|
*/
|
|
void cpu_hotplug_enable_after_thaw(void)
|
|
{
|
|
cpu_maps_update_begin();
|
|
cpu_hotplug_disabled = 0;
|
|
cpu_maps_update_done();
|
|
}
|
|
|
|
/*
|
|
* When callbacks for CPU hotplug notifications are being executed, we must
|
|
* ensure that the state of the system with respect to the tasks being frozen
|
|
* or not, as reported by the notification, remains unchanged *throughout the
|
|
* duration* of the execution of the callbacks.
|
|
* Hence we need to prevent the freezer from racing with regular CPU hotplug.
|
|
*
|
|
* This synchronization is implemented by mutually excluding regular CPU
|
|
* hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
|
|
* Hibernate notifications.
|
|
*/
|
|
static int
|
|
cpu_hotplug_pm_callback(struct notifier_block *nb,
|
|
unsigned long action, void *ptr)
|
|
{
|
|
switch (action) {
|
|
|
|
case PM_SUSPEND_PREPARE:
|
|
case PM_HIBERNATION_PREPARE:
|
|
cpu_hotplug_disable_before_freeze();
|
|
break;
|
|
|
|
case PM_POST_SUSPEND:
|
|
case PM_POST_HIBERNATION:
|
|
cpu_hotplug_enable_after_thaw();
|
|
break;
|
|
|
|
default:
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
|
|
static int __init cpu_hotplug_pm_sync_init(void)
|
|
{
|
|
pm_notifier(cpu_hotplug_pm_callback, 0);
|
|
return 0;
|
|
}
|
|
core_initcall(cpu_hotplug_pm_sync_init);
|
|
|
|
#endif /* CONFIG_PM_SLEEP_SMP */
|
|
|
|
/**
|
|
* notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
|
|
* @cpu: cpu that just started
|
|
*
|
|
* This function calls the cpu_chain notifiers with CPU_STARTING.
|
|
* It must be called by the arch code on the new cpu, before the new cpu
|
|
* enables interrupts and before the "boot" cpu returns from __cpu_up().
|
|
*/
|
|
void __cpuinit notify_cpu_starting(unsigned int cpu)
|
|
{
|
|
unsigned long val = CPU_STARTING;
|
|
|
|
#ifdef CONFIG_PM_SLEEP_SMP
|
|
if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
|
|
val = CPU_STARTING_FROZEN;
|
|
#endif /* CONFIG_PM_SLEEP_SMP */
|
|
cpu_notify(val, (void *)(long)cpu);
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* cpu_bit_bitmap[] is a special, "compressed" data structure that
|
|
* represents all NR_CPUS bits binary values of 1<<nr.
|
|
*
|
|
* It is used by cpumask_of() to get a constant address to a CPU
|
|
* mask value that has a single bit set only.
|
|
*/
|
|
|
|
/* cpu_bit_bitmap[0] is empty - so we can back into it */
|
|
#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
|
|
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
|
|
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
|
|
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
|
|
|
|
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
|
|
|
|
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
|
|
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
|
|
#if BITS_PER_LONG > 32
|
|
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
|
|
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
|
|
#endif
|
|
};
|
|
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
|
|
|
|
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
|
|
EXPORT_SYMBOL(cpu_all_bits);
|
|
|
|
#ifdef CONFIG_INIT_ALL_POSSIBLE
|
|
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
|
|
= CPU_BITS_ALL;
|
|
#else
|
|
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
|
|
#endif
|
|
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
|
|
EXPORT_SYMBOL(cpu_possible_mask);
|
|
|
|
static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
|
|
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
|
|
EXPORT_SYMBOL(cpu_online_mask);
|
|
|
|
static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
|
|
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
|
|
EXPORT_SYMBOL(cpu_present_mask);
|
|
|
|
static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
|
|
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
|
|
EXPORT_SYMBOL(cpu_active_mask);
|
|
|
|
void set_cpu_possible(unsigned int cpu, bool possible)
|
|
{
|
|
if (possible)
|
|
cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
|
|
else
|
|
cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
|
|
}
|
|
|
|
void set_cpu_present(unsigned int cpu, bool present)
|
|
{
|
|
if (present)
|
|
cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
|
|
else
|
|
cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
|
|
}
|
|
|
|
void set_cpu_online(unsigned int cpu, bool online)
|
|
{
|
|
if (online)
|
|
cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
|
|
else
|
|
cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
|
|
}
|
|
|
|
void set_cpu_active(unsigned int cpu, bool active)
|
|
{
|
|
if (active)
|
|
cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
|
|
else
|
|
cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
|
|
}
|
|
|
|
void init_cpu_present(const struct cpumask *src)
|
|
{
|
|
cpumask_copy(to_cpumask(cpu_present_bits), src);
|
|
}
|
|
|
|
void init_cpu_possible(const struct cpumask *src)
|
|
{
|
|
cpumask_copy(to_cpumask(cpu_possible_bits), src);
|
|
}
|
|
|
|
void init_cpu_online(const struct cpumask *src)
|
|
{
|
|
cpumask_copy(to_cpumask(cpu_online_bits), src);
|
|
}
|