mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
X86 core code updates:
- Limit the hardcoded topology quirk for Hygon CPUs to those which have a model ID less than 4. The newer models have the topology CPUID leaf 0xB correctly implemented and are not affected. - Make SMT control more robust against enumeration failures SMT control was added to allow controlling SMT at boottime or runtime. The primary purpose was to provide a simple mechanism to disable SMT in the light of speculation attack vectors. It turned out that the code is sensible to enumeration failures and worked only by chance for XEN/PV. XEN/PV has no real APIC enumeration which means the primary thread mask is not set up correctly. By chance a XEN/PV boot ends up with smp_num_siblings == 2, which makes the hotplug control stay at its default value "enabled". So the mask is never evaluated. The ongoing rework of the topology evaluation caused XEN/PV to end up with smp_num_siblings == 1, which sets the SMT control to "not supported" and the empty primary thread mask causes the hotplug core to deny the bringup of the APS. Make the decision logic more robust and take 'not supported' and 'not implemented' into account for the decision whether a CPU should be booted or not. - Fake primary thread mask for XEN/PV Pretend that all XEN/PV vCPUs are primary threads, which makes the usage of the primary thread mask valid on XEN/PV. That is consistent with because all of the topology information on XEN/PV is fake or even non-existent. - Encapsulate topology information in cpuinfo_x86 Move the randomly scattered topology data into a separate data structure for readability and as a preparatory step for the topology evaluation overhaul. - Consolidate APIC ID data type to u32 It's fixed width hardware data and not randomly u16, int, unsigned long or whatever developers decided to use. - Cure the abuse of cpuinfo for persisting logical IDs. Per CPU cpuinfo is used to persist the logical package and die IDs. That's really not the right place simply because cpuinfo is subject to be reinitialized when a CPU goes through an offline/online cycle. Use separate per CPU data for the persisting to enable the further topology management rework. It will be removed once the new topology management is in place. - Provide a debug interface for inspecting topology information Useful in general and extremly helpful for validating the topology management rework in terms of correctness or "bug" compatibility. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmU+yX0THHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoROUD/4vlvKEcpm9rbI5DzLcaq4DFHKbyEZF cQtzuOSM/9vTc9DHnuoNNLl9TWSYxiVYnejf3E21evfsqspYlzbTH8bId9XBCUid 6B68AJW842M2erNuwj0b0HwF1z++zpDmBDyhGOty/KQhoM8pYOHMvntAmbzJbuso Dgx6BLVFcboTy6RwlfRa0EE8f9W5V+JbmG/VBDpdyCInal7VrudoVFZmWQnPIft7 zwOJpAoehkp8OKq7geKDf79yWxu9a1sNPd62HtaVEvfHwehHqE6OaMLss1us+0vT SJ/D6gmRQBOwcXaZL0wL1dG7Km9Et4AisOvzhXGvTa5b2D5oljVoqJ7V7FTf5g3u y3aqWbeUJzERUbeJt1HoGVAKyA4GtZOvg+TNIysf6F1Z4khl9alfa9jiqjj4g1au zgItq/ZMBEBmJ7X4FxQUEUVBG2CDsEidyNBDRcimWQUDfBakV/iCs0suD8uu8ZOD K5jMx8Hi2+xFx7r1YqsfsyMBYOf/zUZw65RbNe+kI992JbJ9nhcODbnbo5MlAsyv vcqlK5FwXgZ4YAC8dZHU/tyTiqAW7oaOSkqKwTP5gcyNEqsjQHV//q6v+uqtjfYn 1C4oUsRHT2vJiV9ktNJTA4GQHIYF4geGgpG8Ih2SjXsSzdGtUd3DtX1iq0YiLEOk eHhYsnniqsYB5g== =xrz8 -----END PGP SIGNATURE----- Merge tag 'x86-core-2023-10-29-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 core updates from Thomas Gleixner: - Limit the hardcoded topology quirk for Hygon CPUs to those which have a model ID less than 4. The newer models have the topology CPUID leaf 0xB correctly implemented and are not affected. - Make SMT control more robust against enumeration failures SMT control was added to allow controlling SMT at boottime or runtime. The primary purpose was to provide a simple mechanism to disable SMT in the light of speculation attack vectors. It turned out that the code is sensible to enumeration failures and worked only by chance for XEN/PV. XEN/PV has no real APIC enumeration which means the primary thread mask is not set up correctly. By chance a XEN/PV boot ends up with smp_num_siblings == 2, which makes the hotplug control stay at its default value "enabled". So the mask is never evaluated. The ongoing rework of the topology evaluation caused XEN/PV to end up with smp_num_siblings == 1, which sets the SMT control to "not supported" and the empty primary thread mask causes the hotplug core to deny the bringup of the APS. Make the decision logic more robust and take 'not supported' and 'not implemented' into account for the decision whether a CPU should be booted or not. - Fake primary thread mask for XEN/PV Pretend that all XEN/PV vCPUs are primary threads, which makes the usage of the primary thread mask valid on XEN/PV. That is consistent with because all of the topology information on XEN/PV is fake or even non-existent. - Encapsulate topology information in cpuinfo_x86 Move the randomly scattered topology data into a separate data structure for readability and as a preparatory step for the topology evaluation overhaul. - Consolidate APIC ID data type to u32 It's fixed width hardware data and not randomly u16, int, unsigned long or whatever developers decided to use. - Cure the abuse of cpuinfo for persisting logical IDs. Per CPU cpuinfo is used to persist the logical package and die IDs. That's really not the right place simply because cpuinfo is subject to be reinitialized when a CPU goes through an offline/online cycle. Use separate per CPU data for the persisting to enable the further topology management rework. It will be removed once the new topology management is in place. - Provide a debug interface for inspecting topology information Useful in general and extremly helpful for validating the topology management rework in terms of correctness or "bug" compatibility. * tag 'x86-core-2023-10-29-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) x86/apic, x86/hyperv: Use u32 in hv_snp_boot_ap() too x86/cpu: Provide debug interface x86/cpu/topology: Cure the abuse of cpuinfo for persisting logical ids x86/apic: Use u32 for wakeup_secondary_cpu[_64]() x86/apic: Use u32 for [gs]et_apic_id() x86/apic: Use u32 for phys_pkg_id() x86/apic: Use u32 for cpu_present_to_apicid() x86/apic: Use u32 for check_apicid_used() x86/apic: Use u32 for APIC IDs in global data x86/apic: Use BAD_APICID consistently x86/cpu: Move cpu_l[l2]c_id into topology info x86/cpu: Move logical package and die IDs into topology info x86/cpu: Remove pointless evaluation of x86_coreid_bits x86/cpu: Move cu_id into topology info x86/cpu: Move cpu_core_id into topology info hwmon: (fam15h_power) Use topology_core_id() scsi: lpfc: Use topology_core_id() x86/cpu: Move cpu_die_id into topology info x86/cpu: Move phys_proc_id into topology info x86/cpu: Encapsulate topology information in cpuinfo_x86 ...
This commit is contained in:
commit
eb55307e67
@ -55,19 +55,19 @@ Package-related topology information in the kernel:
|
||||
|
||||
The number of dies in a package. This information is retrieved via CPUID.
|
||||
|
||||
- cpuinfo_x86.cpu_die_id:
|
||||
- cpuinfo_x86.topo.die_id:
|
||||
|
||||
The physical ID of the die. This information is retrieved via CPUID.
|
||||
|
||||
- cpuinfo_x86.phys_proc_id:
|
||||
- cpuinfo_x86.topo.pkg_id:
|
||||
|
||||
The physical ID of the package. This information is retrieved via CPUID
|
||||
and deduced from the APIC IDs of the cores in the package.
|
||||
|
||||
Modern systems use this value for the socket. There may be multiple
|
||||
packages within a socket. This value may differ from cpu_die_id.
|
||||
packages within a socket. This value may differ from topo.die_id.
|
||||
|
||||
- cpuinfo_x86.logical_proc_id:
|
||||
- cpuinfo_x86.topo.logical_pkg_id:
|
||||
|
||||
The logical ID of the package. As we do not trust BIOSes to enumerate the
|
||||
packages in a consistent way, we introduced the concept of logical package
|
||||
@ -79,9 +79,7 @@ Package-related topology information in the kernel:
|
||||
The maximum possible number of packages in the system. Helpful for per
|
||||
package facilities to preallocate per package information.
|
||||
|
||||
- cpu_llc_id:
|
||||
|
||||
A per-CPU variable containing:
|
||||
- cpuinfo_x86.topo.llc_id:
|
||||
|
||||
- On Intel, the first APIC ID of the list of CPUs sharing the Last Level
|
||||
Cache
|
||||
|
@ -772,7 +772,7 @@ void amd_uncore_l3_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
|
||||
info.split.aux_data = 0;
|
||||
info.split.num_pmcs = NUM_COUNTERS_L2;
|
||||
info.split.gid = 0;
|
||||
info.split.cid = get_llc_id(cpu);
|
||||
info.split.cid = per_cpu_llc_id(cpu);
|
||||
|
||||
if (boot_cpu_data.x86 >= 0x17)
|
||||
info.split.num_pmcs = NUM_COUNTERS_L3;
|
||||
|
@ -74,7 +74,7 @@ int uncore_device_to_die(struct pci_dev *dev)
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->initialized && cpu_to_node(cpu) == node)
|
||||
return c->logical_die_id;
|
||||
return c->topo.logical_die_id;
|
||||
}
|
||||
|
||||
return -1;
|
||||
|
@ -196,7 +196,7 @@ static int hv_vtl_apicid_to_vp_id(u32 apic_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
|
||||
static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
|
||||
{
|
||||
int vp_id;
|
||||
|
||||
|
@ -288,7 +288,7 @@ static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
|
||||
free_page((unsigned long)vmsa);
|
||||
}
|
||||
|
||||
int hv_snp_boot_ap(int cpu, unsigned long start_ip)
|
||||
int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
|
||||
{
|
||||
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
|
||||
__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||
|
@ -54,7 +54,7 @@ extern int local_apic_timer_c2_ok;
|
||||
extern bool apic_is_disabled;
|
||||
extern unsigned int lapic_timer_period;
|
||||
|
||||
extern int cpuid_to_apicid[];
|
||||
extern u32 cpuid_to_apicid[];
|
||||
|
||||
extern enum apic_intr_mode_id apic_intr_mode;
|
||||
enum apic_intr_mode_id {
|
||||
@ -292,19 +292,19 @@ struct apic {
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
bool (*apic_id_registered)(void);
|
||||
|
||||
bool (*check_apicid_used)(physid_mask_t *map, int apicid);
|
||||
bool (*check_apicid_used)(physid_mask_t *map, u32 apicid);
|
||||
void (*init_apic_ldr)(void);
|
||||
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
|
||||
u32 (*cpu_present_to_apicid)(int mps_cpu);
|
||||
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
|
||||
|
||||
u32 (*get_apic_id)(unsigned long x);
|
||||
u32 (*set_apic_id)(unsigned int id);
|
||||
u32 (*get_apic_id)(u32 id);
|
||||
u32 (*set_apic_id)(u32 apicid);
|
||||
|
||||
/* wakeup_secondary_cpu */
|
||||
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
|
||||
int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip);
|
||||
/* wakeup secondary CPU using 64-bit wakeup point */
|
||||
int (*wakeup_secondary_cpu_64)(int apicid, unsigned long start_eip);
|
||||
int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip);
|
||||
|
||||
char *name;
|
||||
};
|
||||
@ -322,8 +322,8 @@ struct apic_override {
|
||||
void (*send_IPI_self)(int vector);
|
||||
u64 (*icr_read)(void);
|
||||
void (*icr_write)(u32 low, u32 high);
|
||||
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
|
||||
int (*wakeup_secondary_cpu_64)(int apicid, unsigned long start_eip);
|
||||
int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip);
|
||||
int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -493,16 +493,6 @@ static inline bool lapic_vector_set_in_irr(unsigned int vector)
|
||||
return !!(irr & (1U << (vector % 32)));
|
||||
}
|
||||
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
|
||||
if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID))
|
||||
return (x >> 24) & 0xFF;
|
||||
else
|
||||
return (x >> 24) & 0x0F;
|
||||
}
|
||||
|
||||
/*
|
||||
* Warm reset vector position:
|
||||
*/
|
||||
@ -517,9 +507,9 @@ extern void generic_bigsmp_probe(void);
|
||||
|
||||
extern struct apic apic_noop;
|
||||
|
||||
static inline unsigned int read_apic_id(void)
|
||||
static inline u32 read_apic_id(void)
|
||||
{
|
||||
unsigned int reg = apic_read(APIC_ID);
|
||||
u32 reg = apic_read(APIC_ID);
|
||||
|
||||
return apic->get_apic_id(reg);
|
||||
}
|
||||
@ -538,13 +528,12 @@ extern int default_apic_id_valid(u32 apicid);
|
||||
extern u32 apic_default_calc_apicid(unsigned int cpu);
|
||||
extern u32 apic_flat_calc_apicid(unsigned int cpu);
|
||||
|
||||
extern bool default_check_apicid_used(physid_mask_t *map, int apicid);
|
||||
extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
|
||||
extern int default_cpu_present_to_apicid(int mps_cpu);
|
||||
extern u32 default_cpu_present_to_apicid(int mps_cpu);
|
||||
|
||||
#else /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
static inline unsigned int read_apic_id(void) { return 0; }
|
||||
static inline u32 read_apic_id(void) { return 0; }
|
||||
|
||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
|
@ -7,9 +7,6 @@ extern unsigned int memory_caching_control;
|
||||
#define CACHE_MTRR 0x01
|
||||
#define CACHE_PAT 0x02
|
||||
|
||||
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
|
||||
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
|
||||
|
||||
void cache_disable(void);
|
||||
void cache_enable(void);
|
||||
void set_cache_aps_delayed_init(bool val);
|
||||
|
@ -37,7 +37,7 @@ extern int mp_bus_id_to_type[MAX_MP_BUSSES];
|
||||
|
||||
extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
|
||||
|
||||
extern unsigned int boot_cpu_physical_apicid;
|
||||
extern u32 boot_cpu_physical_apicid;
|
||||
extern u8 boot_cpu_apic_version;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
@ -276,11 +276,11 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
bool hv_ghcb_negotiate_protocol(void);
|
||||
void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
|
||||
int hv_snp_boot_ap(int cpu, unsigned long start_ip);
|
||||
int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
|
||||
#else
|
||||
static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
|
||||
static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
|
||||
static inline int hv_snp_boot_ap(int cpu, unsigned long start_ip) { return 0; }
|
||||
static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
|
||||
|
@ -75,11 +75,36 @@ extern u16 __read_mostly tlb_lld_4m[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lld_1g[NR_INFO];
|
||||
|
||||
/*
|
||||
* CPU type and hardware bug flags. Kept separately for each CPU.
|
||||
* Members of this structure are referenced in head_32.S, so think twice
|
||||
* before touching them. [mj]
|
||||
* CPU type and hardware bug flags. Kept separately for each CPU.
|
||||
*/
|
||||
|
||||
struct cpuinfo_topology {
|
||||
// Real APIC ID read from the local APIC
|
||||
u32 apicid;
|
||||
// The initial APIC ID provided by CPUID
|
||||
u32 initial_apicid;
|
||||
|
||||
// Physical package ID
|
||||
u32 pkg_id;
|
||||
|
||||
// Physical die ID on AMD, Relative on Intel
|
||||
u32 die_id;
|
||||
|
||||
// Compute unit ID - AMD specific
|
||||
u32 cu_id;
|
||||
|
||||
// Core ID relative to the package
|
||||
u32 core_id;
|
||||
|
||||
// Logical ID mappings
|
||||
u32 logical_pkg_id;
|
||||
u32 logical_die_id;
|
||||
|
||||
// Cache level topology IDs
|
||||
u32 llc_id;
|
||||
u32 l2c_id;
|
||||
};
|
||||
|
||||
struct cpuinfo_x86 {
|
||||
__u8 x86; /* CPU family */
|
||||
__u8 x86_vendor; /* CPU vendor */
|
||||
@ -96,7 +121,6 @@ struct cpuinfo_x86 {
|
||||
__u8 x86_phys_bits;
|
||||
/* CPUID returned core id bits: */
|
||||
__u8 x86_coreid_bits;
|
||||
__u8 cu_id;
|
||||
/* Max extended CPUID function supported: */
|
||||
__u32 extended_cpuid_level;
|
||||
/* Maximum supported CPUID level, -1=no CPUID: */
|
||||
@ -112,6 +136,7 @@ struct cpuinfo_x86 {
|
||||
};
|
||||
char x86_vendor_id[16];
|
||||
char x86_model_id[64];
|
||||
struct cpuinfo_topology topo;
|
||||
/* in KB - valid for CPUS which support this call: */
|
||||
unsigned int x86_cache_size;
|
||||
int x86_cache_alignment; /* In bytes */
|
||||
@ -125,19 +150,9 @@ struct cpuinfo_x86 {
|
||||
u64 ppin;
|
||||
/* cpuid returned max cores value: */
|
||||
u16 x86_max_cores;
|
||||
u16 apicid;
|
||||
u16 initial_apicid;
|
||||
u16 x86_clflush_size;
|
||||
/* number of cores as seen by the OS: */
|
||||
u16 booted_cores;
|
||||
/* Physical processor id: */
|
||||
u16 phys_proc_id;
|
||||
/* Logical processor id: */
|
||||
u16 logical_proc_id;
|
||||
/* Core id: */
|
||||
u16 cpu_core_id;
|
||||
u16 cpu_die_id;
|
||||
u16 logical_die_id;
|
||||
/* Index into per_cpu list: */
|
||||
u16 cpu_index;
|
||||
/* Is SMT active on this core? */
|
||||
@ -678,7 +693,15 @@ extern int set_tsc_mode(unsigned int val);
|
||||
|
||||
DECLARE_PER_CPU(u64, msr_misc_features_shadow);
|
||||
|
||||
extern u16 get_llc_id(unsigned int cpu);
|
||||
static inline u32 per_cpu_llc_id(unsigned int cpu)
|
||||
{
|
||||
return per_cpu(cpu_info.topo.llc_id, cpu);
|
||||
}
|
||||
|
||||
static inline u32 per_cpu_l2c_id(unsigned int cpu)
|
||||
{
|
||||
return per_cpu(cpu_info.topo.l2c_id, cpu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
extern u32 amd_get_nodes_per_socket(void);
|
||||
|
@ -17,10 +17,8 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
|
||||
/* cpus sharing the last level cache: */
|
||||
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
|
||||
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
|
||||
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
|
||||
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);
|
||||
|
||||
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
|
||||
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid);
|
||||
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
|
||||
|
||||
struct task_struct;
|
||||
|
@ -105,17 +105,17 @@ static inline void setup_node_to_cpumask_map(void) { }
|
||||
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
extern const struct cpumask *cpu_clustergroup_mask(int cpu);
|
||||
|
||||
#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||
#define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id)
|
||||
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
#define topology_logical_package_id(cpu) (cpu_data(cpu).topo.logical_pkg_id)
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id)
|
||||
#define topology_logical_die_id(cpu) (cpu_data(cpu).topo.logical_die_id)
|
||||
#define topology_die_id(cpu) (cpu_data(cpu).topo.die_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).topo.core_id)
|
||||
#define topology_ppin(cpu) (cpu_data(cpu).ppin)
|
||||
|
||||
extern unsigned int __max_die_per_package;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define topology_cluster_id(cpu) (per_cpu(cpu_l2c_id, cpu))
|
||||
#define topology_cluster_id(cpu) (cpu_data(cpu).topo.l2c_id)
|
||||
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
|
||||
#define topology_cluster_cpumask(cpu) (cpu_clustergroup_mask(cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
|
@ -177,7 +177,7 @@ struct x86_init_ops {
|
||||
* struct x86_cpuinit_ops - platform specific cpu hotplug setups
|
||||
* @setup_percpu_clockev: set up the per cpu clock event device
|
||||
* @early_percpu_clock_init: early init of the per cpu clock event device
|
||||
* @fixup_cpu_id: fixup function for cpuinfo_x86::phys_proc_id
|
||||
* @fixup_cpu_id: fixup function for cpuinfo_x86::topo.pkg_id
|
||||
* @parallel_bringup: Parallel bringup control
|
||||
*/
|
||||
struct x86_cpuinit_ops {
|
||||
|
@ -362,7 +362,7 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static int acpi_wakeup_cpu(int apicid, unsigned long start_ip)
|
||||
static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip)
|
||||
{
|
||||
/*
|
||||
* Remap mailbox memory only for the first call to acpi_wakeup_cpu().
|
||||
@ -859,7 +859,7 @@ int acpi_unmap_cpu(int cpu)
|
||||
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
|
||||
#endif
|
||||
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = -1;
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
|
||||
set_cpu_present(cpu, false);
|
||||
num_processors--;
|
||||
|
||||
|
@ -394,7 +394,7 @@ int amd_get_subcaches(int cpu)
|
||||
|
||||
pci_read_config_dword(link, 0x1d4, &mask);
|
||||
|
||||
return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
|
||||
return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
|
||||
}
|
||||
|
||||
int amd_set_subcaches(int cpu, unsigned long mask)
|
||||
@ -420,7 +420,7 @@ int amd_set_subcaches(int cpu, unsigned long mask)
|
||||
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
|
||||
}
|
||||
|
||||
cuid = cpu_data(cpu).cpu_core_id;
|
||||
cuid = cpu_data(cpu).topo.core_id;
|
||||
mask <<= 4 * cuid;
|
||||
mask |= (0xf ^ (1 << cuid)) << 26;
|
||||
|
||||
|
@ -36,6 +36,8 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include <asm/trace/irq_vectors.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/pc-conf-reg.h>
|
||||
@ -70,7 +72,7 @@ unsigned int num_processors;
|
||||
unsigned disabled_cpus;
|
||||
|
||||
/* Processor that is doing the boot up */
|
||||
unsigned int boot_cpu_physical_apicid __ro_after_init = -1U;
|
||||
u32 boot_cpu_physical_apicid __ro_after_init = BAD_APICID;
|
||||
EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
|
||||
|
||||
u8 boot_cpu_apic_version __ro_after_init;
|
||||
@ -85,7 +87,7 @@ physid_mask_t phys_cpu_present_map;
|
||||
* disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
|
||||
* avoid undefined behaviour caused by sending INIT from AP to BSP.
|
||||
*/
|
||||
static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID;
|
||||
static u32 disabled_cpu_apicid __ro_after_init = BAD_APICID;
|
||||
|
||||
/*
|
||||
* This variable controls which CPUs receive external NMIs. By default,
|
||||
@ -109,7 +111,7 @@ static inline bool apic_accessible(void)
|
||||
/*
|
||||
* Map cpu index to physical APIC ID
|
||||
*/
|
||||
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
|
||||
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid, BAD_APICID);
|
||||
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
|
||||
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
|
||||
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
|
||||
@ -1763,7 +1765,7 @@ static void __x2apic_enable(void)
|
||||
static int __init setup_nox2apic(char *str)
|
||||
{
|
||||
if (x2apic_enabled()) {
|
||||
int apicid = native_apic_msr_read(APIC_ID);
|
||||
u32 apicid = native_apic_msr_read(APIC_ID);
|
||||
|
||||
if (apicid >= 255) {
|
||||
pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
|
||||
@ -2316,13 +2318,11 @@ static int nr_logical_cpuids = 1;
|
||||
/*
|
||||
* Used to store mapping between logical CPU IDs and APIC IDs.
|
||||
*/
|
||||
int cpuid_to_apicid[] = {
|
||||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
u32 cpuid_to_apicid[] = { [0 ... NR_CPUS - 1] = BAD_APICID, };
|
||||
|
||||
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
||||
{
|
||||
return phys_id == cpuid_to_apicid[cpu];
|
||||
return phys_id == (u64)cpuid_to_apicid[cpu];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@ -2344,6 +2344,15 @@ static int __init smp_init_primary_thread_mask(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/*
|
||||
* XEN/PV provides either none or useless topology information.
|
||||
* Pretend that all vCPUs are primary threads.
|
||||
*/
|
||||
if (xen_pv_domain()) {
|
||||
cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (cpu = 0; cpu < nr_logical_cpuids; cpu++)
|
||||
cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]);
|
||||
return 0;
|
||||
@ -2382,7 +2391,7 @@ static int allocate_logical_cpuid(int apicid)
|
||||
return nr_logical_cpuids++;
|
||||
}
|
||||
|
||||
static void cpu_update_apic(int cpu, int apicid)
|
||||
static void cpu_update_apic(int cpu, u32 apicid)
|
||||
{
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
|
||||
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||
@ -2535,7 +2544,7 @@ static struct {
|
||||
*/
|
||||
int active;
|
||||
/* r/w apic fields */
|
||||
unsigned int apic_id;
|
||||
u32 apic_id;
|
||||
unsigned int apic_taskpri;
|
||||
unsigned int apic_ldr;
|
||||
unsigned int apic_dfr;
|
||||
|
@ -18,7 +18,7 @@ u32 apic_flat_calc_apicid(unsigned int cpu)
|
||||
return 1U << cpu;
|
||||
}
|
||||
|
||||
bool default_check_apicid_used(physid_mask_t *map, int apicid)
|
||||
bool default_check_apicid_used(physid_mask_t *map, u32 apicid)
|
||||
{
|
||||
return physid_isset(apicid, *map);
|
||||
}
|
||||
@ -28,7 +28,7 @@ void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
|
||||
*retmap = *phys_map;
|
||||
}
|
||||
|
||||
int default_cpu_present_to_apicid(int mps_cpu)
|
||||
u32 default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
return (int)per_cpu(x86_cpu_to_apicid, mps_cpu);
|
||||
|
@ -56,17 +56,17 @@ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
|
||||
_flat_send_IPI_mask(mask, vector);
|
||||
}
|
||||
|
||||
static unsigned int flat_get_apic_id(unsigned long x)
|
||||
static u32 flat_get_apic_id(u32 x)
|
||||
{
|
||||
return (x >> 24) & 0xFF;
|
||||
}
|
||||
|
||||
static u32 set_apic_id(unsigned int id)
|
||||
static u32 set_apic_id(u32 id)
|
||||
{
|
||||
return (id & 0xFF) << 24;
|
||||
}
|
||||
|
||||
static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
static u32 flat_phys_pkg_id(u32 initial_apic_id, int index_msb)
|
||||
{
|
||||
return initial_apic_id >> index_msb;
|
||||
}
|
||||
@ -158,8 +158,6 @@ static struct apic apic_physflat __ro_after_init = {
|
||||
|
||||
.disable_esr = 0,
|
||||
|
||||
.check_apicid_used = NULL,
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.phys_pkg_id = flat_phys_pkg_id,
|
||||
|
||||
|
@ -18,6 +18,8 @@
|
||||
|
||||
#include <asm/apic.h>
|
||||
|
||||
#include "local.h"
|
||||
|
||||
static void noop_send_IPI(int cpu, int vector) { }
|
||||
static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { }
|
||||
static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { }
|
||||
@ -25,10 +27,10 @@ static void noop_send_IPI_allbutself(int vector) { }
|
||||
static void noop_send_IPI_all(int vector) { }
|
||||
static void noop_send_IPI_self(int vector) { }
|
||||
static void noop_apic_icr_write(u32 low, u32 id) { }
|
||||
static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip) { return -1; }
|
||||
static int noop_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip) { return -1; }
|
||||
static u64 noop_apic_icr_read(void) { return 0; }
|
||||
static int noop_phys_pkg_id(int cpuid_apic, int index_msb) { return 0; }
|
||||
static unsigned int noop_get_apic_id(unsigned long x) { return 0; }
|
||||
static u32 noop_phys_pkg_id(u32 cpuid_apic, int index_msb) { return 0; }
|
||||
static u32 noop_get_apic_id(u32 apicid) { return 0; }
|
||||
static void noop_apic_eoi(void) { }
|
||||
|
||||
static u32 noop_apic_read(u32 reg)
|
||||
|
@ -25,7 +25,7 @@ static const struct apic apic_numachip1;
|
||||
static const struct apic apic_numachip2;
|
||||
static void (*numachip_apic_icr_write)(int apicid, unsigned int val) __read_mostly;
|
||||
|
||||
static unsigned int numachip1_get_apic_id(unsigned long x)
|
||||
static u32 numachip1_get_apic_id(u32 x)
|
||||
{
|
||||
unsigned long value;
|
||||
unsigned int id = (x >> 24) & 0xff;
|
||||
@ -38,12 +38,12 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
|
||||
return id;
|
||||
}
|
||||
|
||||
static u32 numachip1_set_apic_id(unsigned int id)
|
||||
static u32 numachip1_set_apic_id(u32 id)
|
||||
{
|
||||
return (id & 0xff) << 24;
|
||||
}
|
||||
|
||||
static unsigned int numachip2_get_apic_id(unsigned long x)
|
||||
static u32 numachip2_get_apic_id(u32 x)
|
||||
{
|
||||
u64 mcfg;
|
||||
|
||||
@ -51,12 +51,12 @@ static unsigned int numachip2_get_apic_id(unsigned long x)
|
||||
return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24);
|
||||
}
|
||||
|
||||
static u32 numachip2_set_apic_id(unsigned int id)
|
||||
static u32 numachip2_set_apic_id(u32 id)
|
||||
{
|
||||
return id << 24;
|
||||
}
|
||||
|
||||
static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
static u32 numachip_phys_pkg_id(u32 initial_apic_id, int index_msb)
|
||||
{
|
||||
return initial_apic_id >> index_msb;
|
||||
}
|
||||
@ -71,7 +71,7 @@ static void numachip2_apic_icr_write(int apicid, unsigned int val)
|
||||
numachip2_write32_lcsr(NUMACHIP2_APIC_ICR, (apicid << 12) | val);
|
||||
}
|
||||
|
||||
static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
static int numachip_wakeup_secondary(u32 phys_apicid, unsigned long start_rip)
|
||||
{
|
||||
numachip_apic_icr_write(phys_apicid, APIC_DM_INIT);
|
||||
numachip_apic_icr_write(phys_apicid, APIC_DM_STARTUP |
|
||||
@ -161,7 +161,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
|
||||
u64 val;
|
||||
u32 nodes = 1;
|
||||
|
||||
this_cpu_write(cpu_llc_id, node);
|
||||
c->topo.llc_id = node;
|
||||
|
||||
/* Account for nodes per socket in multi-core-module processors */
|
||||
if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
|
||||
@ -169,7 +169,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
|
||||
nodes = ((val >> 3) & 7) + 1;
|
||||
}
|
||||
|
||||
c->phys_proc_id = node / nodes;
|
||||
c->topo.pkg_id = node / nodes;
|
||||
}
|
||||
|
||||
static int __init numachip_system_init(void)
|
||||
|
@ -13,12 +13,12 @@
|
||||
|
||||
#include "local.h"
|
||||
|
||||
static unsigned bigsmp_get_apic_id(unsigned long x)
|
||||
static u32 bigsmp_get_apic_id(u32 x)
|
||||
{
|
||||
return (x >> 24) & 0xFF;
|
||||
}
|
||||
|
||||
static bool bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
|
||||
static bool bigsmp_check_apicid_used(physid_mask_t *map, u32 apicid)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -29,7 +29,7 @@ static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *re
|
||||
physids_promote(0xFFL, retmap);
|
||||
}
|
||||
|
||||
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
static u32 bigsmp_phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int convert_apicid_to_cpu(int apic_id)
|
||||
static int convert_apicid_to_cpu(u32 apic_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -294,7 +294,8 @@ static int convert_apicid_to_cpu(int apic_id)
|
||||
|
||||
int safe_smp_processor_id(void)
|
||||
{
|
||||
int apicid, cpuid;
|
||||
u32 apicid;
|
||||
int cpuid;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC))
|
||||
return 0;
|
||||
|
@ -15,9 +15,9 @@
|
||||
|
||||
/* X2APIC */
|
||||
void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest);
|
||||
unsigned int x2apic_get_apic_id(unsigned long id);
|
||||
u32 x2apic_set_apic_id(unsigned int id);
|
||||
int x2apic_phys_pkg_id(int initial_apicid, int index_msb);
|
||||
u32 x2apic_get_apic_id(u32 id);
|
||||
u32 x2apic_set_apic_id(u32 id);
|
||||
u32 x2apic_phys_pkg_id(u32 initial_apicid, int index_msb);
|
||||
|
||||
void x2apic_send_IPI_all(int vector);
|
||||
void x2apic_send_IPI_allbutself(int vector);
|
||||
@ -64,6 +64,7 @@ void default_send_IPI_all(int vector);
|
||||
void default_send_IPI_self(int vector);
|
||||
|
||||
bool default_apic_id_registered(void);
|
||||
bool default_check_apicid_used(physid_mask_t *map, u32 apicid);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector);
|
||||
|
@ -18,11 +18,21 @@
|
||||
|
||||
#include "local.h"
|
||||
|
||||
static int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
static u32 default_phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
static u32 default_get_apic_id(u32 x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
|
||||
if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID))
|
||||
return (x >> 24) & 0xFF;
|
||||
else
|
||||
return (x >> 24) & 0x0F;
|
||||
}
|
||||
|
||||
/* should be called last. */
|
||||
static int probe_default(void)
|
||||
{
|
||||
|
@ -124,17 +124,17 @@ static int x2apic_phys_probe(void)
|
||||
return apic == &apic_x2apic_phys;
|
||||
}
|
||||
|
||||
unsigned int x2apic_get_apic_id(unsigned long id)
|
||||
u32 x2apic_get_apic_id(u32 id)
|
||||
{
|
||||
return id;
|
||||
}
|
||||
|
||||
u32 x2apic_set_apic_id(unsigned int id)
|
||||
u32 x2apic_set_apic_id(u32 id)
|
||||
{
|
||||
return id;
|
||||
}
|
||||
|
||||
int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
u32 x2apic_phys_pkg_id(u32 initial_apicid, int index_msb)
|
||||
{
|
||||
return initial_apicid >> index_msb;
|
||||
}
|
||||
|
@ -701,7 +701,7 @@ static __init void build_uv_gr_table(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
static int uv_wakeup_secondary(u32 phys_apicid, unsigned long start_rip)
|
||||
{
|
||||
unsigned long val;
|
||||
int pnode;
|
||||
@ -779,7 +779,7 @@ static void uv_send_IPI_all(int vector)
|
||||
uv_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static u32 set_apic_id(unsigned int id)
|
||||
static u32 set_apic_id(u32 id)
|
||||
{
|
||||
return id;
|
||||
}
|
||||
@ -789,7 +789,7 @@ static unsigned int uv_read_apic_id(void)
|
||||
return x2apic_get_apic_id(apic_read(APIC_ID));
|
||||
}
|
||||
|
||||
static int uv_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
static u32 uv_phys_pkg_id(u32 initial_apicid, int index_msb)
|
||||
{
|
||||
return uv_read_apic_id() >> index_msb;
|
||||
}
|
||||
|
@ -54,6 +54,8 @@ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
|
||||
obj-$(CONFIG_HYPERVISOR_GUEST) += vmware.o hypervisor.o mshyperv.o
|
||||
obj-$(CONFIG_ACRN_GUEST) += acrn.o
|
||||
|
||||
obj-$(CONFIG_DEBUG_FS) += debugfs.o
|
||||
|
||||
quiet_cmd_mkcapflags = MKCAP $@
|
||||
cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $@ $^
|
||||
|
||||
|
@ -382,7 +382,7 @@ static int nearby_node(int apicid)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fix up cpu_core_id for pre-F17h systems to be in the
|
||||
* Fix up topo::core_id for pre-F17h systems to be in the
|
||||
* [0 .. cores_per_node - 1] range. Not really needed but
|
||||
* kept so as not to break existing setups.
|
||||
*/
|
||||
@ -394,7 +394,7 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
|
||||
cus_per_node = c->x86_max_cores / nodes_per_socket;
|
||||
c->cpu_core_id %= cus_per_node;
|
||||
c->topo.core_id %= cus_per_node;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -405,8 +405,6 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
static void amd_get_topology(struct cpuinfo_x86 *c)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* get information required for multi-node processors */
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
int err;
|
||||
@ -414,13 +412,13 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
||||
|
||||
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
c->cpu_die_id = ecx & 0xff;
|
||||
c->topo.die_id = ecx & 0xff;
|
||||
|
||||
if (c->x86 == 0x15)
|
||||
c->cu_id = ebx & 0xff;
|
||||
c->topo.cu_id = ebx & 0xff;
|
||||
|
||||
if (c->x86 >= 0x17) {
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
c->topo.core_id = ebx & 0xff;
|
||||
|
||||
if (smp_num_siblings > 1)
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
@ -434,15 +432,14 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
||||
if (!err)
|
||||
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
cacheinfo_amd_init_llc_id(c, cpu);
|
||||
cacheinfo_amd_init_llc_id(c);
|
||||
|
||||
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
||||
c->cpu_die_id = value & 7;
|
||||
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
|
||||
c->topo.die_id = value & 7;
|
||||
c->topo.llc_id = c->topo.die_id;
|
||||
} else
|
||||
return;
|
||||
|
||||
@ -459,15 +456,14 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
||||
static void amd_detect_cmp(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned bits;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
bits = c->x86_coreid_bits;
|
||||
/* Low order bits define the core id (index of core in socket) */
|
||||
c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
|
||||
c->topo.core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
/* Convert the initial APIC ID into the socket ID */
|
||||
c->phys_proc_id = c->initial_apicid >> bits;
|
||||
c->topo.pkg_id = c->topo.initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
|
||||
c->topo.llc_id = c->topo.die_id = c->topo.pkg_id;
|
||||
}
|
||||
|
||||
u32 amd_get_nodes_per_socket(void)
|
||||
@ -481,11 +477,11 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
|
||||
#ifdef CONFIG_NUMA
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned apicid = c->apicid;
|
||||
unsigned apicid = c->topo.apicid;
|
||||
|
||||
node = numa_cpu_node(cpu);
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = get_llc_id(cpu);
|
||||
node = per_cpu_llc_id(cpu);
|
||||
|
||||
/*
|
||||
* On multi-fabric platform (e.g. Numascale NumaChip) a
|
||||
@ -515,7 +511,7 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
|
||||
* through CPU mapping may alter the outcome, directly
|
||||
* access __apicid_to_node[].
|
||||
*/
|
||||
int ht_nodeid = c->initial_apicid;
|
||||
int ht_nodeid = c->topo.initial_apicid;
|
||||
|
||||
if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = __apicid_to_node[ht_nodeid];
|
||||
@ -1061,7 +1057,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
set_cpu_cap(c, X86_FEATURE_FSRS);
|
||||
|
||||
/* get apicid instead of initial apic id from cpuid */
|
||||
c->apicid = read_apic_id();
|
||||
c->topo.apicid = read_apic_id();
|
||||
|
||||
/* K6s reports MCEs but don't actually have all the MSRs */
|
||||
if (c->x86 < 6)
|
||||
|
@ -661,7 +661,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
|
||||
return i;
|
||||
}
|
||||
|
||||
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
|
||||
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* We may have multiple LLCs if L3 caches exist, so check if we
|
||||
@ -672,13 +672,13 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
|
||||
|
||||
if (c->x86 < 0x17) {
|
||||
/* LLC is at the node level. */
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
|
||||
c->topo.llc_id = c->topo.die_id;
|
||||
} else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
|
||||
/*
|
||||
* LLC is at the core complex level.
|
||||
* Core complex ID is ApicId[3] for these processors.
|
||||
*/
|
||||
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
|
||||
c->topo.llc_id = c->topo.apicid >> 3;
|
||||
} else {
|
||||
/*
|
||||
* LLC ID is calculated from the number of threads sharing the
|
||||
@ -694,12 +694,12 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
|
||||
if (num_sharing_cache) {
|
||||
int bits = get_count_order(num_sharing_cache);
|
||||
|
||||
per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
|
||||
c->topo.llc_id = c->topo.apicid >> bits;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
|
||||
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* We may have multiple LLCs if L3 caches exist, so check if we
|
||||
@ -712,7 +712,7 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
|
||||
* LLC is at the core complex level.
|
||||
* Core complex ID is ApicId[3] for these processors.
|
||||
*/
|
||||
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
|
||||
c->topo.llc_id = c->topo.apicid >> 3;
|
||||
}
|
||||
|
||||
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
||||
@ -740,9 +740,6 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
||||
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
||||
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int cpu = c->cpu_index;
|
||||
#endif
|
||||
|
||||
if (c->cpuid_level > 3) {
|
||||
static int is_initialized;
|
||||
@ -776,13 +773,13 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
new_l2 = this_leaf.size/1024;
|
||||
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
||||
index_msb = get_count_order(num_threads_sharing);
|
||||
l2_id = c->apicid & ~((1 << index_msb) - 1);
|
||||
l2_id = c->topo.apicid & ~((1 << index_msb) - 1);
|
||||
break;
|
||||
case 3:
|
||||
new_l3 = this_leaf.size/1024;
|
||||
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
||||
index_msb = get_count_order(num_threads_sharing);
|
||||
l3_id = c->apicid & ~((1 << index_msb) - 1);
|
||||
l3_id = c->topo.apicid & ~((1 << index_msb) - 1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -856,30 +853,24 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
|
||||
if (new_l2) {
|
||||
l2 = new_l2;
|
||||
#ifdef CONFIG_SMP
|
||||
per_cpu(cpu_llc_id, cpu) = l2_id;
|
||||
per_cpu(cpu_l2c_id, cpu) = l2_id;
|
||||
#endif
|
||||
c->topo.llc_id = l2_id;
|
||||
c->topo.l2c_id = l2_id;
|
||||
}
|
||||
|
||||
if (new_l3) {
|
||||
l3 = new_l3;
|
||||
#ifdef CONFIG_SMP
|
||||
per_cpu(cpu_llc_id, cpu) = l3_id;
|
||||
#endif
|
||||
c->topo.llc_id = l3_id;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
|
||||
* If llc_id is not yet set, this means cpuid_level < 4 which in
|
||||
* turns means that the only possibility is SMT (as indicated in
|
||||
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
|
||||
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
|
||||
* c->phys_proc_id.
|
||||
* c->topo.pkg_id.
|
||||
*/
|
||||
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
|
||||
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
|
||||
#endif
|
||||
if (c->topo.llc_id == BAD_APICID)
|
||||
c->topo.llc_id = c->topo.pkg_id;
|
||||
|
||||
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
||||
|
||||
@ -915,7 +906,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
|
||||
unsigned int apicid, nshared, first, last;
|
||||
|
||||
nshared = base->eax.split.num_threads_sharing + 1;
|
||||
apicid = cpu_data(cpu).apicid;
|
||||
apicid = cpu_data(cpu).topo.apicid;
|
||||
first = apicid - (apicid % nshared);
|
||||
last = first + nshared - 1;
|
||||
|
||||
@ -924,14 +915,14 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
|
||||
if (!this_cpu_ci->info_list)
|
||||
continue;
|
||||
|
||||
apicid = cpu_data(i).apicid;
|
||||
apicid = cpu_data(i).topo.apicid;
|
||||
if ((apicid < first) || (apicid > last))
|
||||
continue;
|
||||
|
||||
this_leaf = this_cpu_ci->info_list + index;
|
||||
|
||||
for_each_online_cpu(sibling) {
|
||||
apicid = cpu_data(sibling).apicid;
|
||||
apicid = cpu_data(sibling).topo.apicid;
|
||||
if ((apicid < first) || (apicid > last))
|
||||
continue;
|
||||
cpumask_set_cpu(sibling,
|
||||
@ -969,7 +960,7 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
|
||||
index_msb = get_count_order(num_threads_sharing);
|
||||
|
||||
for_each_online_cpu(i)
|
||||
if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
|
||||
if (cpu_data(i).topo.apicid >> index_msb == c->topo.apicid >> index_msb) {
|
||||
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
|
||||
|
||||
if (i == cpu || !sib_cpu_ci->info_list)
|
||||
@ -1024,7 +1015,7 @@ static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
|
||||
|
||||
num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
|
||||
index_msb = get_count_order(num_threads_sharing);
|
||||
id4_regs->id = c->apicid >> index_msb;
|
||||
id4_regs->id = c->topo.apicid >> index_msb;
|
||||
}
|
||||
|
||||
int populate_cache_leaves(unsigned int cpu)
|
||||
|
@ -75,18 +75,6 @@ u32 elf_hwcap2 __read_mostly;
|
||||
int smp_num_siblings = 1;
|
||||
EXPORT_SYMBOL(smp_num_siblings);
|
||||
|
||||
/* Last level cache ID of each logical CPU */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
|
||||
|
||||
u16 get_llc_id(unsigned int cpu)
|
||||
{
|
||||
return per_cpu(cpu_llc_id, cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_llc_id);
|
||||
|
||||
/* L2 cache ID of each logical CPU */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID;
|
||||
|
||||
static struct ppin_info {
|
||||
int feature;
|
||||
int msr_ppin_ctl;
|
||||
@ -915,7 +903,7 @@ void detect_ht(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
|
||||
index_msb = get_count_order(smp_num_siblings);
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
|
||||
c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
|
||||
|
||||
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
|
||||
|
||||
@ -923,8 +911,8 @@ void detect_ht(struct cpuinfo_x86 *c)
|
||||
|
||||
core_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1768,15 +1756,15 @@ static void generic_identify(struct cpuinfo_x86 *c)
|
||||
get_cpu_address_sizes(c);
|
||||
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
|
||||
c->topo.initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
|
||||
#ifdef CONFIG_X86_32
|
||||
# ifdef CONFIG_SMP
|
||||
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
||||
c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
|
||||
# else
|
||||
c->apicid = c->initial_apicid;
|
||||
c->topo.apicid = c->topo.initial_apicid;
|
||||
# endif
|
||||
#endif
|
||||
c->phys_proc_id = c->initial_apicid;
|
||||
c->topo.pkg_id = c->topo.initial_apicid;
|
||||
}
|
||||
|
||||
get_model_name(c); /* Default name */
|
||||
@ -1806,18 +1794,19 @@ static void generic_identify(struct cpuinfo_x86 *c)
|
||||
static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int apicid, cpu = smp_processor_id();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
u32 apicid;
|
||||
|
||||
apicid = apic->cpu_present_to_apicid(cpu);
|
||||
|
||||
if (apicid != c->apicid) {
|
||||
if (apicid != c->topo.apicid) {
|
||||
pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
|
||||
cpu, apicid, c->initial_apicid);
|
||||
cpu, apicid, c->topo.initial_apicid);
|
||||
}
|
||||
BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
|
||||
BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
|
||||
BUG_ON(topology_update_package_map(c->topo.pkg_id, cpu));
|
||||
BUG_ON(topology_update_die_map(c->topo.die_id, cpu));
|
||||
#else
|
||||
c->logical_proc_id = 0;
|
||||
c->topo.logical_pkg_id = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1836,7 +1825,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||
c->x86_model_id[0] = '\0'; /* Unset */
|
||||
c->x86_max_cores = 1;
|
||||
c->x86_coreid_bits = 0;
|
||||
c->cu_id = 0xff;
|
||||
c->topo.cu_id = 0xff;
|
||||
c->topo.llc_id = BAD_APICID;
|
||||
c->topo.l2c_id = BAD_APICID;
|
||||
#ifdef CONFIG_X86_64
|
||||
c->x86_clflush_size = 64;
|
||||
c->x86_phys_bits = 36;
|
||||
@ -1862,7 +1853,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||
apply_forced_caps(c);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
||||
c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -78,6 +78,9 @@ extern int detect_ht_early(struct cpuinfo_x86 *c);
|
||||
extern void detect_ht(struct cpuinfo_x86 *c);
|
||||
extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
|
||||
|
||||
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c);
|
||||
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c);
|
||||
|
||||
unsigned int aperfmperf_get_khz(int cpu);
|
||||
void cpu_select_mitigations(void);
|
||||
|
||||
|
58
arch/x86/kernel/cpu/debugfs.c
Normal file
58
arch/x86/kernel/cpu/debugfs.c
Normal file
@ -0,0 +1,58 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
static int cpu_debug_show(struct seq_file *m, void *p)
|
||||
{
|
||||
unsigned long cpu = (unsigned long)m->private;
|
||||
struct cpuinfo_x86 *c = per_cpu_ptr(&cpu_info, cpu);
|
||||
|
||||
seq_printf(m, "online: %d\n", cpu_online(cpu));
|
||||
if (!c->initialized)
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "initial_apicid: %x\n", c->topo.initial_apicid);
|
||||
seq_printf(m, "apicid: %x\n", c->topo.apicid);
|
||||
seq_printf(m, "pkg_id: %u\n", c->topo.pkg_id);
|
||||
seq_printf(m, "die_id: %u\n", c->topo.die_id);
|
||||
seq_printf(m, "cu_id: %u\n", c->topo.cu_id);
|
||||
seq_printf(m, "core_id: %u\n", c->topo.core_id);
|
||||
seq_printf(m, "logical_pkg_id: %u\n", c->topo.logical_pkg_id);
|
||||
seq_printf(m, "logical_die_id: %u\n", c->topo.logical_die_id);
|
||||
seq_printf(m, "llc_id: %u\n", c->topo.llc_id);
|
||||
seq_printf(m, "l2c_id: %u\n", c->topo.l2c_id);
|
||||
seq_printf(m, "max_cores: %u\n", c->x86_max_cores);
|
||||
seq_printf(m, "max_die_per_pkg: %u\n", __max_die_per_package);
|
||||
seq_printf(m, "smp_num_siblings: %u\n", smp_num_siblings);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_debug_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, cpu_debug_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations dfs_cpu_ops = {
|
||||
.open = cpu_debug_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static __init int cpu_init_debugfs(void)
|
||||
{
|
||||
struct dentry *dir, *base = debugfs_create_dir("topo", arch_debugfs_dir);
|
||||
unsigned long id;
|
||||
char name[24];
|
||||
|
||||
dir = debugfs_create_dir("cpus", base);
|
||||
for_each_possible_cpu(id) {
|
||||
sprintf(name, "%lu", id);
|
||||
debugfs_create_file(name, 0444, dir, (void *)id, &dfs_cpu_ops);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
late_initcall(cpu_init_debugfs);
|
@ -63,8 +63,6 @@ static void hygon_get_topology_early(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* get information required for multi-node processors */
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
int err;
|
||||
@ -72,9 +70,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
|
||||
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
c->cpu_die_id = ecx & 0xff;
|
||||
c->topo.die_id = ecx & 0xff;
|
||||
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
c->topo.core_id = ebx & 0xff;
|
||||
|
||||
if (smp_num_siblings > 1)
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
@ -87,17 +85,20 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
if (!err)
|
||||
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
/* Socket ID is ApicId[6] for these processors. */
|
||||
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
|
||||
/*
|
||||
* Socket ID is ApicId[6] for the processors with model <= 0x3
|
||||
* when running on host.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
|
||||
c->topo.pkg_id = c->topo.apicid >> APICID_SOCKET_ID_BIT;
|
||||
|
||||
cacheinfo_hygon_init_llc_id(c, cpu);
|
||||
cacheinfo_hygon_init_llc_id(c);
|
||||
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
||||
c->cpu_die_id = value & 7;
|
||||
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
|
||||
c->topo.die_id = value & 7;
|
||||
c->topo.llc_id = c->topo.die_id;
|
||||
} else
|
||||
return;
|
||||
|
||||
@ -112,15 +113,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
static void hygon_detect_cmp(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int bits;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
bits = c->x86_coreid_bits;
|
||||
/* Low order bits define the core id (index of core in socket) */
|
||||
c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
|
||||
c->topo.core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
/* Convert the initial APIC ID into the socket ID */
|
||||
c->phys_proc_id = c->initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
|
||||
c->topo.pkg_id = c->topo.initial_apicid >> bits;
|
||||
/* Use package ID also for last level cache */
|
||||
c->topo.llc_id = c->topo.die_id = c->topo.pkg_id;
|
||||
}
|
||||
|
||||
static void srat_detect_node(struct cpuinfo_x86 *c)
|
||||
@ -128,11 +128,11 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
|
||||
#ifdef CONFIG_NUMA
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned int apicid = c->apicid;
|
||||
unsigned int apicid = c->topo.apicid;
|
||||
|
||||
node = numa_cpu_node(cpu);
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
node = c->topo.llc_id;
|
||||
|
||||
/*
|
||||
* On multi-fabric platform (e.g. Numascale NumaChip) a
|
||||
@ -161,7 +161,7 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
|
||||
* through CPU mapping may alter the outcome, directly
|
||||
* access __apicid_to_node[].
|
||||
*/
|
||||
int ht_nodeid = c->initial_apicid;
|
||||
int ht_nodeid = c->topo.initial_apicid;
|
||||
|
||||
if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = __apicid_to_node[ht_nodeid];
|
||||
@ -303,7 +303,7 @@ static void init_hygon(struct cpuinfo_x86 *c)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
/* get apicid instead of initial apic id from cpuid */
|
||||
c->apicid = read_apic_id();
|
||||
c->topo.apicid = read_apic_id();
|
||||
|
||||
/*
|
||||
* XXX someone from Hygon needs to confirm this DTRT
|
||||
|
@ -314,19 +314,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
||||
setup_clear_cpu_cap(X86_FEATURE_PGE);
|
||||
}
|
||||
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
|
||||
/*
|
||||
* If HTT (EDX[28]) is set EBX[16:23] contain the number of
|
||||
* apicids which are reserved per package. Store the resulting
|
||||
* shift value for the package management code.
|
||||
*/
|
||||
if (edx & (1U << 28))
|
||||
c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
|
||||
}
|
||||
|
||||
check_memory_type_self_snoop_errata(c);
|
||||
|
||||
/*
|
||||
|
@ -103,9 +103,9 @@ int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
|
||||
m.socketid = -1;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu_data(cpu).initial_apicid == lapic_id) {
|
||||
if (cpu_data(cpu).topo.initial_apicid == lapic_id) {
|
||||
m.extcpu = cpu;
|
||||
m.socketid = cpu_data(m.extcpu).phys_proc_id;
|
||||
m.socketid = cpu_data(m.extcpu).topo.pkg_id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
|
||||
m->time = __ktime_get_real_seconds();
|
||||
m->cpuvendor = boot_cpu_data.x86_vendor;
|
||||
m->cpuid = cpuid_eax(1);
|
||||
m->socketid = cpu_data(m->extcpu).phys_proc_id;
|
||||
m->apicid = cpu_data(m->extcpu).initial_apicid;
|
||||
m->socketid = cpu_data(m->extcpu).topo.pkg_id;
|
||||
m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
|
||||
m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
|
||||
m->ppin = cpu_data(m->extcpu).ppin;
|
||||
m->microcode = boot_cpu_data.microcode;
|
||||
|
@ -20,13 +20,13 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
|
||||
unsigned int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
||||
seq_printf(m, "physical id\t: %d\n", c->topo.pkg_id);
|
||||
seq_printf(m, "siblings\t: %d\n",
|
||||
cpumask_weight(topology_core_cpumask(cpu)));
|
||||
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
||||
seq_printf(m, "core id\t\t: %d\n", c->topo.core_id);
|
||||
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
|
||||
seq_printf(m, "apicid\t\t: %d\n", c->apicid);
|
||||
seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
|
||||
seq_printf(m, "apicid\t\t: %d\n", c->topo.apicid);
|
||||
seq_printf(m, "initial apicid\t: %d\n", c->topo.initial_apicid);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
|
||||
/*
|
||||
* initial apic id, which also represents 32-bit extended x2apic id.
|
||||
*/
|
||||
c->initial_apicid = edx;
|
||||
c->topo.initial_apicid = edx;
|
||||
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
|
||||
#endif
|
||||
return 0;
|
||||
@ -108,7 +108,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
* Populate HT related information from sub-leaf level 0.
|
||||
*/
|
||||
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
|
||||
c->initial_apicid = edx;
|
||||
c->topo.initial_apicid = edx;
|
||||
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
|
||||
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
@ -146,20 +146,19 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
die_select_mask = (~(-1 << die_plus_mask_width)) >>
|
||||
core_plus_mask_width;
|
||||
|
||||
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid,
|
||||
ht_mask_width) & core_select_mask;
|
||||
|
||||
if (die_level_present) {
|
||||
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
c->topo.die_id = apic->phys_pkg_id(c->topo.initial_apicid,
|
||||
core_plus_mask_width) & die_select_mask;
|
||||
}
|
||||
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
pkg_mask_width);
|
||||
c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, pkg_mask_width);
|
||||
/*
|
||||
* Reinit the apicid, now that we have extended initial_apicid.
|
||||
*/
|
||||
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
||||
c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
|
||||
|
||||
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
|
||||
__max_die_per_package = (die_level_siblings / core_level_siblings);
|
||||
|
@ -65,20 +65,6 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c)
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||
}
|
||||
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
|
||||
/*
|
||||
* If HTT (EDX[28]) is set EBX[16:23] contain the number of
|
||||
* apicids which are reserved per package. Store the resulting
|
||||
* shift value for the package management code.
|
||||
*/
|
||||
if (edx & (1U << 28))
|
||||
c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void init_zhaoxin(struct cpuinfo_x86 *c)
|
||||
|
@ -500,13 +500,13 @@ static bool pv_sched_yield_supported(void)
|
||||
static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu, apic_id, icr;
|
||||
int min = 0, max = 0;
|
||||
int cpu, min = 0, max = 0;
|
||||
#ifdef CONFIG_X86_64
|
||||
__uint128_t ipi_bitmap = 0;
|
||||
#else
|
||||
u64 ipi_bitmap = 0;
|
||||
#endif
|
||||
u32 apic_id, icr;
|
||||
long ret;
|
||||
|
||||
if (cpumask_empty(mask))
|
||||
@ -1028,8 +1028,8 @@ arch_initcall(activate_jump_labels);
|
||||
/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
|
||||
static void kvm_kick_cpu(int cpu)
|
||||
{
|
||||
int apicid;
|
||||
unsigned long flags = 0;
|
||||
u32 apicid;
|
||||
|
||||
apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
||||
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
|
||||
|
@ -966,7 +966,7 @@ static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
|
||||
free_page((unsigned long)vmsa);
|
||||
}
|
||||
|
||||
static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
|
||||
static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
|
||||
{
|
||||
struct sev_es_save_area *cur_vmsa, *vmsa;
|
||||
struct ghcb_state state;
|
||||
|
@ -125,7 +125,20 @@ struct mwait_cpu_dead {
|
||||
*/
|
||||
static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
|
||||
|
||||
/* Logical package management. We might want to allocate that dynamically */
|
||||
/* Logical package management. */
|
||||
struct logical_maps {
|
||||
u32 phys_pkg_id;
|
||||
u32 phys_die_id;
|
||||
u32 logical_pkg_id;
|
||||
u32 logical_die_id;
|
||||
};
|
||||
|
||||
/* Temporary workaround until the full topology mechanics is in place */
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(struct logical_maps, logical_maps) = {
|
||||
.phys_pkg_id = U32_MAX,
|
||||
.phys_die_id = U32_MAX,
|
||||
};
|
||||
|
||||
unsigned int __max_logical_packages __read_mostly;
|
||||
EXPORT_SYMBOL(__max_logical_packages);
|
||||
static unsigned int logical_packages __read_mostly;
|
||||
@ -338,10 +351,8 @@ int topology_phys_to_logical_pkg(unsigned int phys_pkg)
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->initialized && c->phys_proc_id == phys_pkg)
|
||||
return c->logical_proc_id;
|
||||
if (per_cpu(logical_maps.phys_pkg_id, cpu) == phys_pkg)
|
||||
return per_cpu(logical_maps.logical_pkg_id, cpu);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -356,14 +367,12 @@ EXPORT_SYMBOL(topology_phys_to_logical_pkg);
|
||||
*/
|
||||
static int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
|
||||
{
|
||||
int cpu, proc_id = cpu_data(cur_cpu).phys_proc_id;
|
||||
int cpu, proc_id = cpu_data(cur_cpu).topo.pkg_id;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->initialized && c->cpu_die_id == die_id &&
|
||||
c->phys_proc_id == proc_id)
|
||||
return c->logical_die_id;
|
||||
if (per_cpu(logical_maps.phys_pkg_id, cpu) == proc_id &&
|
||||
per_cpu(logical_maps.phys_die_id, cpu) == die_id)
|
||||
return per_cpu(logical_maps.logical_die_id, cpu);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -388,7 +397,9 @@ int topology_update_package_map(unsigned int pkg, unsigned int cpu)
|
||||
cpu, pkg, new);
|
||||
}
|
||||
found:
|
||||
cpu_data(cpu).logical_proc_id = new;
|
||||
per_cpu(logical_maps.phys_pkg_id, cpu) = pkg;
|
||||
per_cpu(logical_maps.logical_pkg_id, cpu) = new;
|
||||
cpu_data(cpu).topo.logical_pkg_id = new;
|
||||
return 0;
|
||||
}
|
||||
/**
|
||||
@ -411,7 +422,9 @@ int topology_update_die_map(unsigned int die, unsigned int cpu)
|
||||
cpu, die, new);
|
||||
}
|
||||
found:
|
||||
cpu_data(cpu).logical_die_id = new;
|
||||
per_cpu(logical_maps.phys_die_id, cpu) = die;
|
||||
per_cpu(logical_maps.logical_die_id, cpu) = new;
|
||||
cpu_data(cpu).topo.logical_die_id = new;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -422,8 +435,8 @@ static void __init smp_store_boot_cpu_info(void)
|
||||
|
||||
*c = boot_cpu_data;
|
||||
c->cpu_index = id;
|
||||
topology_update_package_map(c->phys_proc_id, id);
|
||||
topology_update_die_map(c->cpu_die_id, id);
|
||||
topology_update_package_map(c->topo.pkg_id, id);
|
||||
topology_update_die_map(c->topo.die_id, id);
|
||||
c->initialized = true;
|
||||
}
|
||||
|
||||
@ -477,21 +490,21 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
||||
|
||||
if (c->phys_proc_id == o->phys_proc_id &&
|
||||
c->cpu_die_id == o->cpu_die_id &&
|
||||
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
|
||||
if (c->cpu_core_id == o->cpu_core_id)
|
||||
if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->topo.die_id == o->topo.die_id &&
|
||||
per_cpu_llc_id(cpu1) == per_cpu_llc_id(cpu2)) {
|
||||
if (c->topo.core_id == o->topo.core_id)
|
||||
return topology_sane(c, o, "smt");
|
||||
|
||||
if ((c->cu_id != 0xff) &&
|
||||
(o->cu_id != 0xff) &&
|
||||
(c->cu_id == o->cu_id))
|
||||
if ((c->topo.cu_id != 0xff) &&
|
||||
(o->topo.cu_id != 0xff) &&
|
||||
(c->topo.cu_id == o->topo.cu_id))
|
||||
return topology_sane(c, o, "smt");
|
||||
}
|
||||
|
||||
} else if (c->phys_proc_id == o->phys_proc_id &&
|
||||
c->cpu_die_id == o->cpu_die_id &&
|
||||
c->cpu_core_id == o->cpu_core_id) {
|
||||
} else if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->topo.die_id == o->topo.die_id &&
|
||||
c->topo.core_id == o->topo.core_id) {
|
||||
return topology_sane(c, o, "smt");
|
||||
}
|
||||
|
||||
@ -500,8 +513,8 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
|
||||
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
{
|
||||
if (c->phys_proc_id == o->phys_proc_id &&
|
||||
c->cpu_die_id == o->cpu_die_id)
|
||||
if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->topo.die_id == o->topo.die_id)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -511,11 +524,11 @@ static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
||||
|
||||
/* If the arch didn't set up l2c_id, fall back to SMT */
|
||||
if (per_cpu(cpu_l2c_id, cpu1) == BAD_APICID)
|
||||
if (per_cpu_l2c_id(cpu1) == BAD_APICID)
|
||||
return match_smt(c, o);
|
||||
|
||||
/* Do not match if L2 cache id does not match: */
|
||||
if (per_cpu(cpu_l2c_id, cpu1) != per_cpu(cpu_l2c_id, cpu2))
|
||||
if (per_cpu_l2c_id(cpu1) != per_cpu_l2c_id(cpu2))
|
||||
return false;
|
||||
|
||||
return topology_sane(c, o, "l2c");
|
||||
@ -528,7 +541,7 @@ static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
*/
|
||||
static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
{
|
||||
if (c->phys_proc_id == o->phys_proc_id)
|
||||
if (c->topo.pkg_id == o->topo.pkg_id)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -561,11 +574,11 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
bool intel_snc = id && id->driver_data;
|
||||
|
||||
/* Do not match if we do not have a valid APICID for cpu: */
|
||||
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
|
||||
if (per_cpu_llc_id(cpu1) == BAD_APICID)
|
||||
return false;
|
||||
|
||||
/* Do not match if LLC id does not match: */
|
||||
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
|
||||
if (per_cpu_llc_id(cpu1) != per_cpu_llc_id(cpu2))
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -810,7 +823,7 @@ static void __init smp_quirk_init_udelay(void)
|
||||
/*
|
||||
* Wake up AP by INIT, INIT, STARTUP sequence.
|
||||
*/
|
||||
static void send_init_sequence(int phys_apicid)
|
||||
static void send_init_sequence(u32 phys_apicid)
|
||||
{
|
||||
int maxlvt = lapic_get_maxlvt();
|
||||
|
||||
@ -836,7 +849,7 @@ static void send_init_sequence(int phys_apicid)
|
||||
/*
|
||||
* Wake up AP by INIT, INIT, STARTUP sequence.
|
||||
*/
|
||||
static int wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
||||
static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip)
|
||||
{
|
||||
unsigned long send_status = 0, accept_status = 0;
|
||||
int num_starts, j, maxlvt;
|
||||
@ -983,7 +996,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
* Returns zero if startup was successfully sent, else error code from
|
||||
* ->wakeup_secondary_cpu.
|
||||
*/
|
||||
static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
|
||||
{
|
||||
unsigned long start_ip = real_mode_header->trampoline_start;
|
||||
int ret;
|
||||
@ -1051,7 +1064,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
|
||||
int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int apicid = apic->cpu_present_to_apicid(cpu);
|
||||
u32 apicid = apic->cpu_present_to_apicid(cpu);
|
||||
int err;
|
||||
|
||||
lockdep_assert_irqs_enabled();
|
||||
@ -1406,7 +1419,7 @@ static void remove_siblinginfo(int cpu)
|
||||
cpumask_clear(topology_sibling_cpumask(cpu));
|
||||
cpumask_clear(topology_core_cpumask(cpu));
|
||||
cpumask_clear(topology_die_cpumask(cpu));
|
||||
c->cpu_core_id = 0;
|
||||
c->topo.core_id = 0;
|
||||
c->booted_cores = 0;
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
|
||||
recompute_smt_state();
|
||||
|
@ -127,7 +127,7 @@ static void __init vsmp_cap_cpus(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
static u32 apicid_phys_pkg_id(u32 initial_apic_id, int index_msb)
|
||||
{
|
||||
return read_apic_id() >> index_msb;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] = {
|
||||
|
||||
int numa_cpu_node(int cpu)
|
||||
{
|
||||
int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
|
||||
if (apicid != BAD_APICID)
|
||||
return __apicid_to_node[apicid];
|
||||
@ -783,7 +783,7 @@ void __init init_gi_nodes(void)
|
||||
void __init init_cpu_to_node(void)
|
||||
{
|
||||
int cpu;
|
||||
u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
|
||||
u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
|
||||
|
||||
BUG_ON(cpu_to_apicid == NULL);
|
||||
|
||||
|
@ -33,13 +33,13 @@ static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
|
||||
return 0xfd;
|
||||
}
|
||||
|
||||
static u32 xen_set_apic_id(unsigned int x)
|
||||
static u32 xen_set_apic_id(u32 x)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return x;
|
||||
}
|
||||
|
||||
static unsigned int xen_get_apic_id(unsigned long x)
|
||||
static u32 xen_get_apic_id(u32 x)
|
||||
{
|
||||
return ((x)>>24) & 0xFFu;
|
||||
}
|
||||
@ -110,15 +110,15 @@ static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return xen_pv_domain();
|
||||
}
|
||||
|
||||
static int xen_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
static u32 xen_phys_pkg_id(u32 initial_apic_id, int index_msb)
|
||||
{
|
||||
return initial_apic_id >> index_msb;
|
||||
}
|
||||
|
||||
static int xen_cpu_present_to_apicid(int cpu)
|
||||
static u32 xen_cpu_present_to_apicid(int cpu)
|
||||
{
|
||||
if (cpu_present(cpu))
|
||||
return cpu_data(cpu).apicid;
|
||||
return cpu_data(cpu).topo.apicid;
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
@ -2218,7 +2218,7 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
|
||||
if (first_cpu_of_numa_node >= nr_cpu_ids)
|
||||
return -1;
|
||||
#ifdef CONFIG_X86_64
|
||||
return cpu_data(first_cpu_of_numa_node).apicid;
|
||||
return cpu_data(first_cpu_of_numa_node).topo.apicid;
|
||||
#else
|
||||
return first_cpu_of_numa_node;
|
||||
#endif
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/topology.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
@ -134,15 +135,13 @@ static DEVICE_ATTR_RO(power1_crit);
|
||||
static void do_read_registers_on_cu(void *_data)
|
||||
{
|
||||
struct fam15h_power_data *data = _data;
|
||||
int cpu, cu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
int cu;
|
||||
|
||||
/*
|
||||
* With the new x86 topology modelling, cpu core id actually
|
||||
* is compute unit id.
|
||||
*/
|
||||
cu = cpu_data(cpu).cpu_core_id;
|
||||
cu = topology_core_id(smp_processor_id());
|
||||
|
||||
rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
|
||||
rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
|
||||
|
@ -12442,9 +12442,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
|
||||
int max_core_id, min_core_id;
|
||||
struct lpfc_vector_map_info *cpup;
|
||||
struct lpfc_vector_map_info *new_cpup;
|
||||
#ifdef CONFIG_X86
|
||||
struct cpuinfo_x86 *cpuinfo;
|
||||
#endif
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
struct lpfc_hdwq_stat *c_stat;
|
||||
#endif
|
||||
@ -12458,9 +12455,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
|
||||
for_each_present_cpu(cpu) {
|
||||
cpup = &phba->sli4_hba.cpu_map[cpu];
|
||||
#ifdef CONFIG_X86
|
||||
cpuinfo = &cpu_data(cpu);
|
||||
cpup->phys_id = cpuinfo->phys_proc_id;
|
||||
cpup->core_id = cpuinfo->cpu_core_id;
|
||||
cpup->phys_id = topology_physical_package_id(cpu);
|
||||
cpup->core_id = topology_core_id(cpu);
|
||||
if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
|
||||
cpup->flag |= LPFC_CPU_MAP_HYPER;
|
||||
#else
|
||||
|
@ -447,7 +447,7 @@ static ssize_t remove_cpu_store(struct device *dev,
|
||||
if (cpu_online(cpu))
|
||||
remove_cpu(cpu);
|
||||
|
||||
lapicid = cpu_data(cpu).apicid;
|
||||
lapicid = cpu_data(cpu).topo.apicid;
|
||||
dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
|
||||
ret = hcall_sos_remove_cpu(lapicid);
|
||||
if (ret < 0) {
|
||||
|
18
kernel/cpu.c
18
kernel/cpu.c
@ -659,11 +659,19 @@ static inline bool cpu_smt_thread_allowed(unsigned int cpu)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool cpu_smt_allowed(unsigned int cpu)
|
||||
static inline bool cpu_bootable(unsigned int cpu)
|
||||
{
|
||||
if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
|
||||
return true;
|
||||
|
||||
/* All CPUs are bootable if controls are not configured */
|
||||
if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
|
||||
return true;
|
||||
|
||||
/* All CPUs are bootable if CPU is not SMT capable */
|
||||
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
||||
return true;
|
||||
|
||||
if (topology_is_primary_thread(cpu))
|
||||
return true;
|
||||
|
||||
@ -685,7 +693,7 @@ bool cpu_smt_possible(void)
|
||||
EXPORT_SYMBOL_GPL(cpu_smt_possible);
|
||||
|
||||
#else
|
||||
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
|
||||
static inline bool cpu_bootable(unsigned int cpu) { return true; }
|
||||
#endif
|
||||
|
||||
static inline enum cpuhp_state
|
||||
@ -788,10 +796,10 @@ static int bringup_wait_for_ap_online(unsigned int cpu)
|
||||
* SMT soft disabling on X86 requires to bring the CPU out of the
|
||||
* BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
|
||||
* CPU marked itself as booted_once in notify_cpu_starting() so the
|
||||
* cpu_smt_allowed() check will now return false if this is not the
|
||||
* cpu_bootable() check will now return false if this is not the
|
||||
* primary sibling.
|
||||
*/
|
||||
if (!cpu_smt_allowed(cpu))
|
||||
if (!cpu_bootable(cpu))
|
||||
return -ECANCELED;
|
||||
return 0;
|
||||
}
|
||||
@ -1744,7 +1752,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (!cpu_smt_allowed(cpu)) {
|
||||
if (!cpu_bootable(cpu)) {
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user