mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 apic updates from Ingo Molnar: "The main changes are: - Persistent CPU/node numbering across CPU hotplug/unplug events. This is a pretty involved series of changes that first fetches all the information during bootup and then uses it for the various hotplug/unplug methods. (Gu Zheng, Dou Liyang) - IO-APIC hot-add/remove fixes and enhancements. (Rui Wang) - ... various fixes, cleanups and enhancements" * 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) x86/apic: Fix silent & fatal merge conflict in __generic_processor_info() acpi: Fix broken error check in map_processor() acpi: Validate processor id when mapping the processor acpi: Provide mechanism to validate processors in the ACPI tables x86/acpi: Set persistent cpuid <-> nodeid mapping when booting x86/acpi: Enable MADT APIs to return disabled apicids x86/acpi: Introduce persistent storage for cpuid <-> apicid mapping x86/acpi: Enable acpi to register all possible cpus at boot time x86/numa: Online memory-less nodes at boot time x86/apic: Get rid of apic_version[] array x86/apic: Order irq_enter/exit() calls correctly vs. ack_APIC_irq() x86/ioapic: Ignore root bridges without a companion ACPI device x86/apic: Update comment about disabling processor focus x86/smpboot: Check APIC ID before setting up default routing x86/ioapic: Fix IOAPIC failing to request resource x86/ioapic: Fix lost IOAPIC resource after hot-removal and hotadd x86/ioapic: Fix setup_res() failing to get resource x86/ioapic: Support hot-removal of IOAPICs present during boot x86/ioapic: Change prototype of acpi_ioapic_add() x86/apic, ACPI: Fix incorrect assignment when handling apic/x2apic entries ...
This commit is contained in:
commit
110a9e42b6
@ -796,7 +796,7 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
|
||||
* ACPI based hotplug CPU support
|
||||
*/
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
/*
|
||||
|
@ -650,8 +650,8 @@ static inline void entering_ack_irq(void)
|
||||
|
||||
static inline void ipi_entering_ack_irq(void)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
irq_enter();
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
static inline void exiting_irq(void)
|
||||
@ -661,9 +661,8 @@ static inline void exiting_irq(void)
|
||||
|
||||
static inline void exiting_ack_irq(void)
|
||||
{
|
||||
irq_exit();
|
||||
/* Ack only at the end to avoid potential reentry */
|
||||
ack_APIC_irq();
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
extern void ioapic_zap_locks(void);
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/apicdef.h>
|
||||
|
||||
extern int apic_version[];
|
||||
extern int pic_mode;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -40,6 +39,7 @@ extern int mp_bus_id_to_type[MAX_MP_BUSSES];
|
||||
extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
|
||||
|
||||
extern unsigned int boot_cpu_physical_apicid;
|
||||
extern u8 boot_cpu_apic_version;
|
||||
extern unsigned long mp_lapic_addr;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
@ -86,6 +86,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
|
||||
#endif
|
||||
|
||||
int generic_processor_info(int apicid, int version);
|
||||
int __generic_processor_info(int apicid, int version, bool enabled);
|
||||
|
||||
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_LOCAL_APIC)
|
||||
|
||||
|
@ -176,15 +176,10 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
++disabled_cpus;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (boot_cpu_physical_apicid != -1U)
|
||||
ver = apic_version[boot_cpu_physical_apicid];
|
||||
ver = boot_cpu_apic_version;
|
||||
|
||||
cpu = generic_processor_info(id, ver);
|
||||
cpu = __generic_processor_info(id, ver, enabled);
|
||||
if (cpu >= 0)
|
||||
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
|
||||
|
||||
@ -282,6 +277,8 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
|
||||
if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
|
||||
return -EINVAL;
|
||||
|
||||
acpi_table_print_madt_entry(header);
|
||||
|
||||
acpi_lapic_addr = lapic_addr_ovr->address;
|
||||
|
||||
return 0;
|
||||
@ -705,7 +702,7 @@ static void __init acpi_set_irq_model_ioapic(void)
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
#include <acpi/processor.h>
|
||||
|
||||
static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int nid;
|
||||
@ -716,6 +713,7 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
numa_set_node(cpu, nid);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
|
||||
@ -998,21 +996,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC))
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Note that the LAPIC address is obtained from the MADT (32-bit value)
|
||||
* and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value).
|
||||
*/
|
||||
|
||||
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
|
||||
acpi_parse_lapic_addr_ovr, 0);
|
||||
if (count < 0) {
|
||||
printk(KERN_ERR PREFIX
|
||||
"Error parsing LAPIC address override entry\n");
|
||||
return count;
|
||||
}
|
||||
|
||||
register_lapic_address(acpi_lapic_addr);
|
||||
|
||||
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
|
||||
acpi_parse_sapic, MAX_LOCAL_APIC);
|
||||
|
||||
|
@ -64,6 +64,8 @@ unsigned disabled_cpus;
|
||||
unsigned int boot_cpu_physical_apicid = -1U;
|
||||
EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
|
||||
|
||||
u8 boot_cpu_apic_version;
|
||||
|
||||
/*
|
||||
* The highest APIC ID seen during enumeration.
|
||||
*/
|
||||
@ -1374,7 +1376,6 @@ void setup_local_APIC(void)
|
||||
* Actually disabling the focus CPU check just makes the hang less
|
||||
* frequent as it makes the interrupt distributon model be more
|
||||
* like LRU than MRU (the short-term load is more even across CPUs).
|
||||
* See also the comment in end_level_ioapic_irq(). --macro
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1816,8 +1817,7 @@ void __init init_apic_mappings(void)
|
||||
* since smp_sanity_check is prepared for such a case
|
||||
* and disable smp mode
|
||||
*/
|
||||
apic_version[new_apicid] =
|
||||
GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1828,17 +1828,14 @@ void __init register_lapic_address(unsigned long address)
|
||||
if (!x2apic_mode) {
|
||||
set_fixmap_nocache(FIX_APIC_BASE, address);
|
||||
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
|
||||
APIC_BASE, mp_lapic_addr);
|
||||
APIC_BASE, address);
|
||||
}
|
||||
if (boot_cpu_physical_apicid == -1U) {
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
apic_version[boot_cpu_physical_apicid] =
|
||||
GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
}
|
||||
}
|
||||
|
||||
int apic_version[MAX_LOCAL_APIC];
|
||||
|
||||
/*
|
||||
* Local APIC interrupts
|
||||
*/
|
||||
@ -2027,7 +2024,53 @@ void disconnect_bsp_APIC(int virt_wire_setup)
|
||||
apic_write(APIC_LVT1, value);
|
||||
}
|
||||
|
||||
int generic_processor_info(int apicid, int version)
|
||||
/*
|
||||
* The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
|
||||
* contiguously, it equals to current allocated max logical CPU ID plus 1.
|
||||
* All allocated CPU ID should be in [0, nr_logical_cpuidi), so the maximum of
|
||||
* nr_logical_cpuids is nr_cpu_ids.
|
||||
*
|
||||
* NOTE: Reserve 0 for BSP.
|
||||
*/
|
||||
static int nr_logical_cpuids = 1;
|
||||
|
||||
/*
|
||||
* Used to store mapping between logical CPU IDs and APIC IDs.
|
||||
*/
|
||||
static int cpuid_to_apicid[] = {
|
||||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
|
||||
/*
|
||||
* Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
|
||||
* and cpuid_to_apicid[] synchronized.
|
||||
*/
|
||||
static int allocate_logical_cpuid(int apicid)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* cpuid <-> apicid mapping is persistent, so when a cpu is up,
|
||||
* check if the kernel has allocated a cpuid for it.
|
||||
*/
|
||||
for (i = 0; i < nr_logical_cpuids; i++) {
|
||||
if (cpuid_to_apicid[i] == apicid)
|
||||
return i;
|
||||
}
|
||||
|
||||
/* Allocate a new cpuid. */
|
||||
if (nr_logical_cpuids >= nr_cpu_ids) {
|
||||
WARN_ONCE(1, "Only %d processors supported."
|
||||
"Processor %d/0x%x and the rest are ignored.\n",
|
||||
nr_cpu_ids - 1, nr_logical_cpuids, apicid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpuid_to_apicid[nr_logical_cpuids] = apicid;
|
||||
return nr_logical_cpuids++;
|
||||
}
|
||||
|
||||
int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
{
|
||||
int cpu, max = nr_cpu_ids;
|
||||
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
|
||||
@ -2102,8 +2145,16 @@ int generic_processor_info(int apicid, int version)
|
||||
* for BSP.
|
||||
*/
|
||||
cpu = 0;
|
||||
} else
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
|
||||
/* Logical cpuid 0 is reserved for BSP. */
|
||||
cpuid_to_apicid[0] = apicid;
|
||||
} else {
|
||||
cpu = allocate_logical_cpuid(apicid);
|
||||
if (cpu < 0) {
|
||||
disabled_cpus++;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This can happen on physical hotplug. The sanity check at boot time
|
||||
@ -2120,8 +2171,6 @@ int generic_processor_info(int apicid, int version)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
num_processors++;
|
||||
|
||||
/*
|
||||
* Validate version
|
||||
*/
|
||||
@ -2130,14 +2179,12 @@ int generic_processor_info(int apicid, int version)
|
||||
cpu, apicid);
|
||||
version = 0x10;
|
||||
}
|
||||
apic_version[apicid] = version;
|
||||
|
||||
if (version != apic_version[boot_cpu_physical_apicid]) {
|
||||
if (version != boot_cpu_apic_version) {
|
||||
pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
|
||||
apic_version[boot_cpu_physical_apicid], cpu, version);
|
||||
boot_cpu_apic_version, cpu, version);
|
||||
}
|
||||
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
if (apicid > max_physical_apicid)
|
||||
max_physical_apicid = apicid;
|
||||
|
||||
@ -2150,11 +2197,23 @@ int generic_processor_info(int apicid, int version)
|
||||
apic->x86_32_early_logical_apicid(cpu);
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
set_cpu_present(cpu, true);
|
||||
|
||||
if (enabled) {
|
||||
num_processors++;
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
} else {
|
||||
disabled_cpus++;
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
return __generic_processor_info(apicid, version, true);
|
||||
}
|
||||
|
||||
int hard_smp_processor_id(void)
|
||||
{
|
||||
return read_apic_id();
|
||||
@ -2277,7 +2336,7 @@ int __init APIC_init_uniprocessor(void)
|
||||
* Complain if the BIOS pretends there is one.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC) &&
|
||||
APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
|
||||
APIC_INTEGRATED(boot_cpu_apic_version)) {
|
||||
pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
|
||||
boot_cpu_physical_apicid);
|
||||
return -1;
|
||||
|
@ -1593,7 +1593,7 @@ void __init setup_ioapic_ids_from_mpc(void)
|
||||
* no meaning without the serial APIC bus.
|
||||
*/
|
||||
if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
|
||||
|| APIC_XAPIC(boot_cpu_apic_version))
|
||||
return;
|
||||
setup_ioapic_ids_from_mpc_nocheck();
|
||||
}
|
||||
@ -2423,7 +2423,7 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
static u8 io_apic_unique_id(int idx, u8 id)
|
||||
{
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
||||
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
|
||||
!APIC_XAPIC(boot_cpu_apic_version))
|
||||
return io_apic_get_unique_id(idx, id);
|
||||
else
|
||||
return id;
|
||||
|
@ -152,7 +152,7 @@ early_param("apic", parse_apic);
|
||||
|
||||
void __init default_setup_apic_routing(void)
|
||||
{
|
||||
int version = apic_version[boot_cpu_physical_apicid];
|
||||
int version = boot_cpu_apic_version;
|
||||
|
||||
if (num_possible_cpus() > 8) {
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
|
@ -499,6 +499,9 @@ void __init default_get_smp_config(unsigned int early)
|
||||
{
|
||||
struct mpf_intel *mpf = mpf_found;
|
||||
|
||||
if (!smp_found_config)
|
||||
return;
|
||||
|
||||
if (!mpf)
|
||||
return;
|
||||
|
||||
|
@ -1219,8 +1219,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
/*
|
||||
* get boot-time SMP configuration:
|
||||
*/
|
||||
if (smp_found_config)
|
||||
get_smp_config();
|
||||
get_smp_config();
|
||||
|
||||
prefill_possible_map();
|
||||
|
||||
|
@ -691,7 +691,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
|
||||
* Give the other CPU some time to accept the IPI.
|
||||
*/
|
||||
udelay(200);
|
||||
if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
|
||||
if (APIC_INTEGRATED(boot_cpu_apic_version)) {
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
||||
apic_write(APIC_ESR, 0);
|
||||
@ -718,7 +718,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
||||
/*
|
||||
* Be paranoid about clearing APIC errors.
|
||||
*/
|
||||
if (APIC_INTEGRATED(apic_version[phys_apicid])) {
|
||||
if (APIC_INTEGRATED(boot_cpu_apic_version)) {
|
||||
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
||||
apic_write(APIC_ESR, 0);
|
||||
apic_read(APIC_ESR);
|
||||
@ -757,7 +757,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
||||
* Determine this based on the APIC version.
|
||||
* If we don't have an integrated APIC, don't send the STARTUP IPIs.
|
||||
*/
|
||||
if (APIC_INTEGRATED(apic_version[phys_apicid]))
|
||||
if (APIC_INTEGRATED(boot_cpu_apic_version))
|
||||
num_starts = 2;
|
||||
else
|
||||
num_starts = 0;
|
||||
@ -995,7 +995,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
/*
|
||||
* Be paranoid about clearing APIC errors.
|
||||
*/
|
||||
if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
|
||||
if (APIC_INTEGRATED(boot_cpu_apic_version)) {
|
||||
apic_write(APIC_ESR, 0);
|
||||
apic_read(APIC_ESR);
|
||||
}
|
||||
@ -1250,7 +1250,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
|
||||
/*
|
||||
* If we couldn't find a local APIC, then get out of here now!
|
||||
*/
|
||||
if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
|
||||
if (APIC_INTEGRATED(boot_cpu_apic_version) &&
|
||||
!boot_cpu_has(X86_FEATURE_APIC)) {
|
||||
if (!disable_apic) {
|
||||
pr_err("BIOS bug, local APIC #%d not detected!...\n",
|
||||
@ -1334,14 +1334,13 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
||||
break;
|
||||
}
|
||||
|
||||
default_setup_apic_routing();
|
||||
|
||||
if (read_apic_id() != boot_cpu_physical_apicid) {
|
||||
panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
|
||||
read_apic_id(), boot_cpu_physical_apicid);
|
||||
/* Or can we switch back to PIC here? */
|
||||
}
|
||||
|
||||
default_setup_apic_routing();
|
||||
cpu0_logical_apicid = apic_bsp_setup(false);
|
||||
|
||||
pr_info("CPU%d: ", 0);
|
||||
|
@ -52,21 +52,6 @@ static __init int find_northbridge(void)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static __init void early_get_boot_cpu_id(void)
|
||||
{
|
||||
/*
|
||||
* need to get the APIC ID of the BSP so can use that to
|
||||
* create apicid_to_node in amd_scan_nodes()
|
||||
*/
|
||||
#ifdef CONFIG_X86_MPPARSE
|
||||
/*
|
||||
* get boot-time SMP configuration:
|
||||
*/
|
||||
if (smp_found_config)
|
||||
early_get_smp_config();
|
||||
#endif
|
||||
}
|
||||
|
||||
int __init amd_numa_init(void)
|
||||
{
|
||||
u64 start = PFN_PHYS(0);
|
||||
@ -180,8 +165,11 @@ int __init amd_numa_init(void)
|
||||
cores = 1 << bits;
|
||||
apicid_base = 0;
|
||||
|
||||
/* get the APIC ID of the BSP early for systems with apicid lifting */
|
||||
early_get_boot_cpu_id();
|
||||
/*
|
||||
* get boot-time SMP configuration:
|
||||
*/
|
||||
early_get_smp_config();
|
||||
|
||||
if (boot_cpu_physical_apicid > 0) {
|
||||
pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid);
|
||||
apicid_base = boot_cpu_physical_apicid;
|
||||
|
@ -722,22 +722,19 @@ void __init x86_numa_init(void)
|
||||
numa_init(dummy_numa_init);
|
||||
}
|
||||
|
||||
static __init int find_near_online_node(int node)
|
||||
static void __init init_memory_less_node(int nid)
|
||||
{
|
||||
int n, val;
|
||||
int min_val = INT_MAX;
|
||||
int best_node = -1;
|
||||
unsigned long zones_size[MAX_NR_ZONES] = {0};
|
||||
unsigned long zholes_size[MAX_NR_ZONES] = {0};
|
||||
|
||||
for_each_online_node(n) {
|
||||
val = node_distance(node, n);
|
||||
/* Allocate and initialize node data. Memory-less node is now online.*/
|
||||
alloc_node_data(nid);
|
||||
free_area_init_node(nid, zones_size, 0, zholes_size);
|
||||
|
||||
if (val < min_val) {
|
||||
min_val = val;
|
||||
best_node = n;
|
||||
}
|
||||
}
|
||||
|
||||
return best_node;
|
||||
/*
|
||||
* All zonelists will be built later in start_kernel() after per cpu
|
||||
* areas are initialized.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
@ -766,8 +763,10 @@ void __init init_cpu_to_node(void)
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
continue;
|
||||
|
||||
if (!node_online(node))
|
||||
node = find_near_online_node(node);
|
||||
init_memory_less_node(node);
|
||||
|
||||
numa_set_node(cpu, node);
|
||||
}
|
||||
}
|
||||
|
@ -182,6 +182,11 @@ int __weak arch_register_cpu(int cpu)
|
||||
|
||||
void __weak arch_unregister_cpu(int cpu) {}
|
||||
|
||||
int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
||||
{
|
||||
unsigned long long sta;
|
||||
@ -300,8 +305,11 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
||||
* Extra Processor objects may be enumerated on MP systems with
|
||||
* less than the max # of CPUs. They should be ignored _iff
|
||||
* they are physically not present.
|
||||
*
|
||||
* NOTE: Even if the processor has a cpuid, it may not be present
|
||||
* because cpuid <-> apicid mapping is persistent now.
|
||||
*/
|
||||
if (invalid_logical_cpuid(pr->id)) {
|
||||
if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
|
||||
int ret = acpi_processor_hotadd_init(pr);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -573,8 +581,102 @@ static struct acpi_scan_handler processor_container_handler = {
|
||||
.attach = acpi_processor_container_attach,
|
||||
};
|
||||
|
||||
/* The number of the unique processor IDs */
|
||||
static int nr_unique_ids __initdata;
|
||||
|
||||
/* The number of the duplicate processor IDs */
|
||||
static int nr_duplicate_ids __initdata;
|
||||
|
||||
/* Used to store the unique processor IDs */
|
||||
static int unique_processor_ids[] __initdata = {
|
||||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
|
||||
/* Used to store the duplicate processor IDs */
|
||||
static int duplicate_processor_ids[] __initdata = {
|
||||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
|
||||
static void __init processor_validated_ids_update(int proc_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Firstly, compare the proc_id with duplicate IDs, if the proc_id is
|
||||
* already in the IDs, do nothing.
|
||||
*/
|
||||
for (i = 0; i < nr_duplicate_ids; i++) {
|
||||
if (duplicate_processor_ids[i] == proc_id)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Secondly, compare the proc_id with unique IDs, if the proc_id is in
|
||||
* the IDs, put it in the duplicate IDs.
|
||||
*/
|
||||
for (i = 0; i < nr_unique_ids; i++) {
|
||||
if (unique_processor_ids[i] == proc_id) {
|
||||
duplicate_processor_ids[nr_duplicate_ids] = proc_id;
|
||||
nr_duplicate_ids++;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Lastly, the proc_id is a unique ID, put it in the unique IDs.
|
||||
*/
|
||||
unique_processor_ids[nr_unique_ids] = proc_id;
|
||||
nr_unique_ids++;
|
||||
}
|
||||
|
||||
static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
|
||||
u32 lvl,
|
||||
void *context,
|
||||
void **rv)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
acpi_handle_info(handle, "Not get the processor object\n");
|
||||
else
|
||||
processor_validated_ids_update(object.processor.proc_id);
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static void __init acpi_processor_check_duplicates(void)
|
||||
{
|
||||
/* Search all processor nodes in ACPI namespace */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
acpi_processor_ids_walk,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
bool __init acpi_processor_validate_proc_id(int proc_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* compare the proc_id with duplicate IDs, if the proc_id is already
|
||||
* in the duplicate IDs, return true, otherwise, return false.
|
||||
*/
|
||||
for (i = 0; i < nr_duplicate_ids; i++) {
|
||||
if (duplicate_processor_ids[i] == proc_id)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void __init acpi_processor_init(void)
|
||||
{
|
||||
acpi_processor_check_duplicates();
|
||||
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
|
||||
acpi_scan_add_handler(&processor_container_handler);
|
||||
}
|
||||
|
@ -1195,6 +1195,7 @@ static int __init acpi_init(void)
|
||||
acpi_wakeup_device_init();
|
||||
acpi_debugger_init();
|
||||
acpi_setup_sb_notify_handler();
|
||||
acpi_set_processor_mapping();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -40,10 +40,8 @@ int acpi_sysfs_init(void);
|
||||
void acpi_container_init(void);
|
||||
void acpi_memory_hotplug_init(void);
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
int acpi_ioapic_add(struct acpi_pci_root *root);
|
||||
int acpi_ioapic_remove(struct acpi_pci_root *root);
|
||||
#else
|
||||
static inline int acpi_ioapic_add(struct acpi_pci_root *root) { return 0; }
|
||||
static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
|
||||
#endif
|
||||
#ifdef CONFIG_ACPI_DOCK
|
||||
|
@ -46,7 +46,7 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
|
||||
struct resource_win win;
|
||||
|
||||
res->flags = 0;
|
||||
if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
|
||||
if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
|
||||
return AE_OK;
|
||||
|
||||
if (!acpi_dev_resource_memory(acpi_res, res)) {
|
||||
@ -97,7 +97,7 @@ static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl,
|
||||
unsigned long long gsi_base;
|
||||
struct acpi_pci_ioapic *ioapic;
|
||||
struct pci_dev *dev = NULL;
|
||||
struct resource *res = NULL;
|
||||
struct resource *res = NULL, *pci_res = NULL, *crs_res;
|
||||
char *type = NULL;
|
||||
|
||||
if (!acpi_is_ioapic(handle, &type))
|
||||
@ -137,23 +137,30 @@ static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl,
|
||||
pci_set_master(dev);
|
||||
if (pci_request_region(dev, 0, type))
|
||||
goto exit_disable;
|
||||
res = &dev->resource[0];
|
||||
pci_res = &dev->resource[0];
|
||||
ioapic->pdev = dev;
|
||||
} else {
|
||||
pci_dev_put(dev);
|
||||
dev = NULL;
|
||||
|
||||
res = &ioapic->res;
|
||||
acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, res);
|
||||
if (res->flags == 0) {
|
||||
acpi_handle_warn(handle, "failed to get resource\n");
|
||||
goto exit_free;
|
||||
} else if (request_resource(&iomem_resource, res)) {
|
||||
acpi_handle_warn(handle, "failed to insert resource\n");
|
||||
goto exit_free;
|
||||
}
|
||||
}
|
||||
|
||||
crs_res = &ioapic->res;
|
||||
acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, crs_res);
|
||||
crs_res->name = type;
|
||||
crs_res->flags |= IORESOURCE_BUSY;
|
||||
if (crs_res->flags == 0) {
|
||||
acpi_handle_warn(handle, "failed to get resource\n");
|
||||
goto exit_release;
|
||||
} else if (insert_resource(&iomem_resource, crs_res)) {
|
||||
acpi_handle_warn(handle, "failed to insert resource\n");
|
||||
goto exit_release;
|
||||
}
|
||||
|
||||
/* try pci resource first, then "_CRS" resource */
|
||||
res = pci_res;
|
||||
if (!res || !res->flags)
|
||||
res = crs_res;
|
||||
|
||||
if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) {
|
||||
acpi_handle_warn(handle, "failed to register IOAPIC\n");
|
||||
goto exit_release;
|
||||
@ -174,14 +181,13 @@ done:
|
||||
exit_release:
|
||||
if (dev)
|
||||
pci_release_region(dev, 0);
|
||||
else
|
||||
release_resource(res);
|
||||
if (ioapic->res.flags && ioapic->res.parent)
|
||||
release_resource(&ioapic->res);
|
||||
exit_disable:
|
||||
if (dev)
|
||||
pci_disable_device(dev);
|
||||
exit_put:
|
||||
pci_dev_put(dev);
|
||||
exit_free:
|
||||
kfree(ioapic);
|
||||
exit:
|
||||
mutex_unlock(&ioapic_list_lock);
|
||||
@ -189,13 +195,13 @@ exit:
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
int acpi_ioapic_add(struct acpi_pci_root *root)
|
||||
int acpi_ioapic_add(acpi_handle root_handle)
|
||||
{
|
||||
acpi_status status, retval = AE_OK;
|
||||
|
||||
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root->device->handle,
|
||||
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root_handle,
|
||||
UINT_MAX, handle_ioapic_add, NULL,
|
||||
root->device->handle, (void **)&retval);
|
||||
root_handle, (void **)&retval);
|
||||
|
||||
return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
|
||||
}
|
||||
@ -217,9 +223,9 @@ int acpi_ioapic_remove(struct acpi_pci_root *root)
|
||||
pci_release_region(ioapic->pdev, 0);
|
||||
pci_disable_device(ioapic->pdev);
|
||||
pci_dev_put(ioapic->pdev);
|
||||
} else if (ioapic->res.flags && ioapic->res.parent) {
|
||||
release_resource(&ioapic->res);
|
||||
}
|
||||
if (ioapic->res.flags && ioapic->res.parent)
|
||||
release_resource(&ioapic->res);
|
||||
list_del(&ioapic->list);
|
||||
kfree(ioapic);
|
||||
}
|
||||
|
@ -614,7 +614,17 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
if (hotadd) {
|
||||
pcibios_resource_survey_bus(root->bus);
|
||||
pci_assign_unassigned_root_bus_resources(root->bus);
|
||||
acpi_ioapic_add(root);
|
||||
/*
|
||||
* This is only called for the hotadd case. For the boot-time
|
||||
* case, we need to wait until after PCI initialization in
|
||||
* order to deal with IOAPICs mapped in on a PCI BAR.
|
||||
*
|
||||
* This is currently x86-specific, because acpi_ioapic_add()
|
||||
* is an empty function without CONFIG_ACPI_HOTPLUG_IOAPIC.
|
||||
* And CONFIG_ACPI_HOTPLUG_IOAPIC depends on CONFIG_X86_IO_APIC
|
||||
* (see drivers/acpi/Kconfig).
|
||||
*/
|
||||
acpi_ioapic_add(root->device->handle);
|
||||
}
|
||||
|
||||
pci_lock_rescan_remove();
|
||||
|
@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
|
||||
}
|
||||
|
||||
static int map_lapic_id(struct acpi_subtable_header *entry,
|
||||
u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
|
||||
{
|
||||
struct acpi_madt_local_apic *lapic =
|
||||
container_of(entry, struct acpi_madt_local_apic, header);
|
||||
|
||||
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (lapic->processor_id != acpi_id)
|
||||
@ -48,12 +48,13 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static int map_x2apic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
{
|
||||
struct acpi_madt_local_x2apic *apic =
|
||||
container_of(entry, struct acpi_madt_local_x2apic, header);
|
||||
|
||||
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration && (apic->uid == acpi_id)) {
|
||||
@ -65,12 +66,13 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static int map_lsapic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
{
|
||||
struct acpi_madt_local_sapic *lsapic =
|
||||
container_of(entry, struct acpi_madt_local_sapic, header);
|
||||
|
||||
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration) {
|
||||
@ -87,12 +89,13 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
|
||||
* Retrieve the ARM CPU physical identifier (MPIDR)
|
||||
*/
|
||||
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
|
||||
bool ignore_disabled)
|
||||
{
|
||||
struct acpi_madt_generic_interrupt *gicc =
|
||||
container_of(entry, struct acpi_madt_generic_interrupt, header);
|
||||
|
||||
if (!(gicc->flags & ACPI_MADT_ENABLED))
|
||||
if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
/* device_declaration means Device object in DSDT, in the
|
||||
@ -109,7 +112,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
||||
int type, u32 acpi_id)
|
||||
int type, u32 acpi_id, bool ignore_disabled)
|
||||
{
|
||||
unsigned long madt_end, entry;
|
||||
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
|
||||
@ -127,16 +130,20 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
||||
struct acpi_subtable_header *header =
|
||||
(struct acpi_subtable_header *)entry;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id))
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
break;
|
||||
}
|
||||
entry += header->length;
|
||||
@ -156,14 +163,15 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
|
||||
if (!madt)
|
||||
return PHYS_CPUID_INVALID;
|
||||
|
||||
rv = map_madt_entry(madt, 1, acpi_id);
|
||||
rv = map_madt_entry(madt, 1, acpi_id, true);
|
||||
|
||||
early_acpi_os_unmap_memory(madt, tbl_size);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
|
||||
bool ignore_disabled)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
@ -184,30 +192,38 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
|
||||
header = (struct acpi_subtable_header *)obj->buffer.pointer;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
|
||||
map_lapic_id(header, acpi_id, &phys_id);
|
||||
map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id);
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id);
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id);
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled);
|
||||
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
|
||||
u32 acpi_id, bool ignore_disabled)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
|
||||
phys_id = map_mat_entry(handle, type, acpi_id);
|
||||
phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
|
||||
if (invalid_phys_cpuid(phys_id))
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
|
||||
ignore_disabled);
|
||||
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
return __acpi_get_phys_id(handle, type, acpi_id, true);
|
||||
}
|
||||
|
||||
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
@ -264,6 +280,79 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
static bool __init
|
||||
map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
|
||||
{
|
||||
int type, id;
|
||||
u32 acpi_id;
|
||||
acpi_status status;
|
||||
acpi_object_type acpi_type;
|
||||
unsigned long long tmp;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_get_type(handle, &acpi_type);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
|
||||
switch (acpi_type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = object.processor.proc_id;
|
||||
|
||||
/* validate the acpi_id */
|
||||
if(acpi_processor_validate_proc_id(acpi_id))
|
||||
return false;
|
||||
break;
|
||||
case ACPI_TYPE_DEVICE:
|
||||
status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = tmp;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
|
||||
|
||||
*phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
|
||||
id = acpi_map_cpuid(*phys_id, acpi_id);
|
||||
|
||||
if (id < 0)
|
||||
return false;
|
||||
*cpuid = id;
|
||||
return true;
|
||||
}
|
||||
|
||||
static acpi_status __init
|
||||
set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
|
||||
void **rv)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
int cpu_id;
|
||||
|
||||
if (!map_processor(handle, &phys_id, &cpu_id))
|
||||
return AE_ERROR;
|
||||
|
||||
acpi_map_cpu2node(handle, cpu_id, phys_id);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
void __init acpi_set_processor_mapping(void)
|
||||
{
|
||||
/* Set persistent cpu <-> node mapping for all processors. */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, set_processor_node_mapping,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
#else
|
||||
void __init acpi_set_processor_mapping(void) {}
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
|
||||
u64 *phys_addr, int *ioapic_id)
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
#include "pci.h"
|
||||
|
||||
unsigned int pci_flags;
|
||||
@ -1852,8 +1853,13 @@ void __init pci_assign_unassigned_resources(void)
|
||||
{
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
list_for_each_entry(root_bus, &pci_root_buses, node)
|
||||
list_for_each_entry(root_bus, &pci_root_buses, node) {
|
||||
pci_assign_unassigned_root_bus_resources(root_bus);
|
||||
|
||||
/* Make sure the root bridge has a companion ACPI device: */
|
||||
if (ACPI_HANDLE(root_bus->bridge))
|
||||
acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
|
||||
}
|
||||
}
|
||||
|
||||
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
|
||||
|
@ -269,12 +269,18 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
|
||||
return phys_id == PHYS_CPUID_INVALID;
|
||||
}
|
||||
|
||||
/* Validate the processor object's proc_id */
|
||||
bool acpi_processor_validate_proc_id(int proc_id);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
/* Arch dependent functions for cpu hotplug support */
|
||||
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
|
||||
int acpi_unmap_cpu(int cpu);
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
void acpi_set_processor_mapping(void);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
|
||||
#endif
|
||||
@ -758,6 +764,12 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
|
||||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
int acpi_ioapic_add(acpi_handle root);
|
||||
#else
|
||||
static inline int acpi_ioapic_add(acpi_handle root) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
|
||||
u32 pm1a_ctrl, u32 pm1b_ctrl));
|
||||
|
Loading…
Reference in New Issue
Block a user