x86/CPU/AMD: Remove amd_get_nb_id()
The Last Level Cache ID is returned by amd_get_nb_id(). In practice, this value is the same as the AMD NodeId for callers of this function. The NodeId is saved in struct cpuinfo_x86.cpu_die_id. Replace calls to amd_get_nb_id() with the logical CPU's cpu_die_id and remove the function. Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20201109210659.754018-3-Yazen.Ghannam@amd.com
This commit is contained in:
committed by
Borislav Petkov
parent
028c221ed1
commit
db970bd231
@@ -538,7 +538,7 @@ static void amd_pmu_cpu_starting(int cpu)
|
|||||||
if (!x86_pmu.amd_nb_constraints)
|
if (!x86_pmu.amd_nb_constraints)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nb_id = amd_get_nb_id(cpu);
|
nb_id = topology_die_id(cpu);
|
||||||
WARN_ON_ONCE(nb_id == BAD_APICID);
|
WARN_ON_ONCE(nb_id == BAD_APICID);
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
|
|||||||
@@ -813,10 +813,8 @@ extern int set_tsc_mode(unsigned int val);
|
|||||||
DECLARE_PER_CPU(u64, msr_misc_features_shadow);
|
DECLARE_PER_CPU(u64, msr_misc_features_shadow);
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_SUP_AMD
|
#ifdef CONFIG_CPU_SUP_AMD
|
||||||
extern u16 amd_get_nb_id(int cpu);
|
|
||||||
extern u32 amd_get_nodes_per_socket(void);
|
extern u32 amd_get_nodes_per_socket(void);
|
||||||
#else
|
#else
|
||||||
static inline u16 amd_get_nb_id(int cpu) { return 0; }
|
|
||||||
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
|
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -384,7 +384,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
|
|||||||
|
|
||||||
int amd_get_subcaches(int cpu)
|
int amd_get_subcaches(int cpu)
|
||||||
{
|
{
|
||||||
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
|
struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
|
||||||
unsigned int mask;
|
unsigned int mask;
|
||||||
|
|
||||||
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
||||||
@@ -398,7 +398,7 @@ int amd_get_subcaches(int cpu)
|
|||||||
int amd_set_subcaches(int cpu, unsigned long mask)
|
int amd_set_subcaches(int cpu, unsigned long mask)
|
||||||
{
|
{
|
||||||
static unsigned int reset, ban;
|
static unsigned int reset, ban;
|
||||||
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
|
struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
int cuid;
|
int cuid;
|
||||||
|
|
||||||
|
|||||||
@@ -424,12 +424,6 @@ clear_ppin:
|
|||||||
clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
|
clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
|
||||||
}
|
}
|
||||||
|
|
||||||
u16 amd_get_nb_id(int cpu)
|
|
||||||
{
|
|
||||||
return per_cpu(cpu_llc_id, cpu);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(amd_get_nb_id);
|
|
||||||
|
|
||||||
u32 amd_get_nodes_per_socket(void)
|
u32 amd_get_nodes_per_socket(void)
|
||||||
{
|
{
|
||||||
return nodes_per_socket;
|
return nodes_per_socket;
|
||||||
|
|||||||
@@ -580,7 +580,7 @@ static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
|
|||||||
if (index < 3)
|
if (index < 3)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
node = amd_get_nb_id(smp_processor_id());
|
node = topology_die_id(smp_processor_id());
|
||||||
this_leaf->nb = node_to_amd_nb(node);
|
this_leaf->nb = node_to_amd_nb(node);
|
||||||
if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
|
if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
|
||||||
amd_calc_l3_indices(this_leaf->nb);
|
amd_calc_l3_indices(this_leaf->nb);
|
||||||
|
|||||||
@@ -1341,7 +1341,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (is_shared_bank(bank)) {
|
if (is_shared_bank(bank)) {
|
||||||
nb = node_to_amd_nb(amd_get_nb_id(cpu));
|
nb = node_to_amd_nb(topology_die_id(cpu));
|
||||||
|
|
||||||
/* threshold descriptor already initialized on this node? */
|
/* threshold descriptor already initialized on this node? */
|
||||||
if (nb && nb->bank4) {
|
if (nb && nb->bank4) {
|
||||||
@@ -1445,7 +1445,7 @@ static void threshold_remove_bank(struct threshold_bank *bank)
|
|||||||
* The last CPU on this node using the shared bank is going
|
* The last CPU on this node using the shared bank is going
|
||||||
* away, remove that bank now.
|
* away, remove that bank now.
|
||||||
*/
|
*/
|
||||||
nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id()));
|
nb = node_to_amd_nb(topology_die_id(smp_processor_id()));
|
||||||
nb->bank4 = NULL;
|
nb->bank4 = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -522,8 +522,8 @@ static void do_inject(void)
|
|||||||
if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
|
if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
|
||||||
b == 4 &&
|
b == 4 &&
|
||||||
boot_cpu_data.x86 < 0x17) {
|
boot_cpu_data.x86 < 0x17) {
|
||||||
toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
|
toggle_nb_mca_mst_cpu(topology_die_id(cpu));
|
||||||
cpu = get_nbc_for_node(amd_get_nb_id(cpu));
|
cpu = get_nbc_for_node(topology_die_id(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
|
|||||||
@@ -1133,7 +1133,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
|
|||||||
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
|
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
|
||||||
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
|
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
|
||||||
{
|
{
|
||||||
u16 mce_nid = amd_get_nb_id(m->extcpu);
|
u16 mce_nid = topology_die_id(m->extcpu);
|
||||||
struct mem_ctl_info *mci;
|
struct mem_ctl_info *mci;
|
||||||
u8 start_bit = 1;
|
u8 start_bit = 1;
|
||||||
u8 end_bit = 47;
|
u8 end_bit = 47;
|
||||||
@@ -3046,7 +3046,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
|
|||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
if (amd_get_nb_id(cpu) == nid)
|
if (topology_die_id(cpu) == nid)
|
||||||
cpumask_set_cpu(cpu, mask);
|
cpumask_set_cpu(cpu, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -869,7 +869,7 @@ static void decode_mc3_mce(struct mce *m)
|
|||||||
static void decode_mc4_mce(struct mce *m)
|
static void decode_mc4_mce(struct mce *m)
|
||||||
{
|
{
|
||||||
unsigned int fam = x86_family(m->cpuid);
|
unsigned int fam = x86_family(m->cpuid);
|
||||||
int node_id = amd_get_nb_id(m->extcpu);
|
int node_id = topology_die_id(m->extcpu);
|
||||||
u16 ec = EC(m->status);
|
u16 ec = EC(m->status);
|
||||||
u8 xec = XEC(m->status, 0x1f);
|
u8 xec = XEC(m->status, 0x1f);
|
||||||
u8 offset = 0;
|
u8 offset = 0;
|
||||||
|
|||||||
Reference in New Issue
Block a user