mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 23:21:31 +00:00
s390/topology: add detection of dedicated vs shared CPUs
The topology information returned by STSI 15.x.x contains a flag if the CPUs of a topology-list are dedicated or shared. Make this information available if the machine provides topology information. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
8179c7ba10
commit
1887aa07b6
@ -21,6 +21,7 @@
|
||||
#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
|
||||
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
|
||||
#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
|
||||
#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
|
||||
|
||||
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
|
||||
#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
|
||||
@ -30,6 +31,7 @@
|
||||
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
|
||||
#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
|
||||
#define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST)
|
||||
#define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -156,7 +156,8 @@ static inline unsigned char topology_mnest_limit(void)
|
||||
struct topology_core {
|
||||
unsigned char nl;
|
||||
unsigned char reserved0[3];
|
||||
unsigned char :6;
|
||||
unsigned char :5;
|
||||
unsigned char d:1;
|
||||
unsigned char pp:2;
|
||||
unsigned char reserved1;
|
||||
unsigned short origin;
|
||||
|
@ -16,6 +16,7 @@ struct cpu_topology_s390 {
|
||||
unsigned short book_id;
|
||||
unsigned short drawer_id;
|
||||
unsigned short node_id;
|
||||
unsigned short dedicated : 1;
|
||||
cpumask_t thread_mask;
|
||||
cpumask_t core_mask;
|
||||
cpumask_t book_mask;
|
||||
@ -34,6 +35,7 @@ extern cpumask_t cpus_with_topology;
|
||||
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
|
||||
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
|
||||
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
|
||||
#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated)
|
||||
|
||||
#define mc_capable() 1
|
||||
|
||||
|
@ -800,6 +800,8 @@ void __init smp_detect_cpus(void)
|
||||
*/
|
||||
static void smp_start_secondary(void *cpuvoid)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
S390_lowcore.last_update_clock = get_tod_clock();
|
||||
S390_lowcore.restart_stack = (unsigned long) restart_stack;
|
||||
S390_lowcore.restart_fn = (unsigned long) do_restart;
|
||||
@ -813,8 +815,12 @@ static void smp_start_secondary(void *cpuvoid)
|
||||
init_cpu_timer();
|
||||
vtime_init();
|
||||
pfault_init();
|
||||
notify_cpu_starting(smp_processor_id());
|
||||
set_cpu_online(smp_processor_id(), true);
|
||||
notify_cpu_starting(cpu);
|
||||
if (topology_cpu_dedicated(cpu))
|
||||
set_cpu_flag(CIF_DEDICATED_CPU);
|
||||
else
|
||||
clear_cpu_flag(CIF_DEDICATED_CPU);
|
||||
set_cpu_online(cpu, true);
|
||||
inc_irq_stat(CPU_RST);
|
||||
local_irq_enable();
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
|
@ -133,6 +133,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
|
||||
topo->socket_id = socket->id;
|
||||
topo->core_id = rcore;
|
||||
topo->thread_id = lcpu + i;
|
||||
topo->dedicated = tl_core->d;
|
||||
cpumask_set_cpu(lcpu + i, &drawer->mask);
|
||||
cpumask_set_cpu(lcpu + i, &book->mask);
|
||||
cpumask_set_cpu(lcpu + i, &socket->mask);
|
||||
@ -273,6 +274,14 @@ void store_topology(struct sysinfo_15_1_x *info)
|
||||
stsi(info, 15, 1, topology_mnest_limit());
|
||||
}
|
||||
|
||||
static void __arch_update_dedicated_flag(void *arg)
|
||||
{
|
||||
if (topology_cpu_dedicated(smp_processor_id()))
|
||||
set_cpu_flag(CIF_DEDICATED_CPU);
|
||||
else
|
||||
clear_cpu_flag(CIF_DEDICATED_CPU);
|
||||
}
|
||||
|
||||
static int __arch_update_cpu_topology(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info = tl_info;
|
||||
@ -298,6 +307,7 @@ int arch_update_cpu_topology(void)
|
||||
int cpu, rc;
|
||||
|
||||
rc = __arch_update_cpu_topology();
|
||||
on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
|
||||
for_each_online_cpu(cpu) {
|
||||
dev = get_cpu_device(cpu);
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
@ -435,9 +445,39 @@ static struct attribute_group topology_cpu_attr_group = {
|
||||
.attrs = topology_cpu_attrs,
|
||||
};
|
||||
|
||||
static ssize_t cpu_dedicated_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int cpu = dev->id;
|
||||
ssize_t count;
|
||||
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
|
||||
mutex_unlock(&smp_cpu_state_mutex);
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
|
||||
|
||||
static struct attribute *topology_extra_cpu_attrs[] = {
|
||||
&dev_attr_dedicated.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group topology_extra_cpu_attr_group = {
|
||||
.attrs = topology_extra_cpu_attrs,
|
||||
};
|
||||
|
||||
int topology_cpu_init(struct cpu *cpu)
|
||||
{
|
||||
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
int rc;
|
||||
|
||||
rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
if (rc || !MACHINE_HAS_TOPOLOGY)
|
||||
return rc;
|
||||
rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
|
||||
if (rc)
|
||||
sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct cpumask *cpu_thread_mask(int cpu)
|
||||
@ -509,6 +549,7 @@ void __init topology_init_early(void)
|
||||
alloc_masks(info, &drawer_info, 3);
|
||||
out:
|
||||
__arch_update_cpu_topology();
|
||||
__arch_update_dedicated_flag(NULL);
|
||||
}
|
||||
|
||||
static inline int topology_get_mode(int enabled)
|
||||
|
Loading…
Reference in New Issue
Block a user