linux/arch/powerpc/platforms/powernv/opal-wrappers.S

313 lines
11 KiB
ArmAsm
Raw Normal View History

/*
* PowerNV OPAL API wrappers
*
* Copyright 2011 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/jump_label.h>
#include <asm/ppc_asm.h>
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/opal.h>
.section ".text"
#ifdef CONFIG_TRACEPOINTS
#ifdef HAVE_JUMP_LABEL
#define OPAL_BRANCH(LABEL) \
ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key)
#else
.section ".toc","aw"
.globl opal_tracepoint_refcount
opal_tracepoint_refcount:
.llong 0
.section ".text"
/*
* We branch around this in early init by using an unconditional cpu
* feature.
*/
#define OPAL_BRANCH(LABEL) \
BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
ld r11,opal_tracepoint_refcount@toc(r2); \
cmpdi r11,0; \
bne- LABEL; \
1:
#endif
#else
#define OPAL_BRANCH(LABEL)
#endif
/*
* DO_OPAL_CALL assumes:
* r0 = opal call token
* r12 = msr
* LR has been saved
*/
#define DO_OPAL_CALL() \
mfcr r11; \
stw r11,8(r1); \
li r11,0; \
ori r11,r11,MSR_EE; \
std r12,PACASAVEDMSR(r13); \
andc r12,r12,r11; \
mtmsrd r12,1; \
LOAD_REG_ADDR(r11,opal_return); \
mtlr r11; \
li r11,MSR_DR|MSR_IR|MSR_LE;\
andc r12,r12,r11; \
mtspr SPRN_HSRR1,r12; \
LOAD_REG_ADDR(r11,opal); \
ld r12,8(r11); \
ld r2,0(r11); \
mtspr SPRN_HSRR0,r12; \
hrfid
#define OPAL_CALL(name, token) \
_GLOBAL_TOC(name); \
mfmsr r12; \
mflr r0; \
andi. r11,r12,MSR_IR|MSR_DR; \
std r0,PPC_LR_STKOFF(r1); \
li r0,token; \
beq opal_real_call; \
OPAL_BRANCH(opal_tracepoint_entry) \
DO_OPAL_CALL()
opal_return:
/*
* Fixup endian on OPAL return... we should be able to simplify
* this by instead converting the below trampoline to a set of
* bytes (always BE) since MSR:LE will end up fixed up as a side
* effect of the rfid.
*/
FIXUP_ENDIAN
ld r2,PACATOC(r13);
lwz r4,8(r1);
ld r5,PPC_LR_STKOFF(r1);
ld r6,PACASAVEDMSR(r13);
mtcr r4;
mtspr SPRN_HSRR0,r5;
mtspr SPRN_HSRR1,r6;
hrfid
opal_real_call:
mfcr r11
stw r11,8(r1)
/* Set opal return address */
LOAD_REG_ADDR(r11, opal_return_realmode)
mtlr r11
li r11,MSR_LE
andc r12,r12,r11
mtspr SPRN_HSRR1,r12
LOAD_REG_ADDR(r11,opal)
ld r12,8(r11)
ld r2,0(r11)
mtspr SPRN_HSRR0,r12
hrfid
opal_return_realmode:
FIXUP_ENDIAN
ld r2,PACATOC(r13);
lwz r11,8(r1);
ld r12,PPC_LR_STKOFF(r1)
mtcr r11;
mtlr r12
blr
#ifdef CONFIG_TRACEPOINTS
opal_tracepoint_entry:
stdu r1,-STACKFRAMESIZE(r1)
std r0,STK_REG(R23)(r1)
std r3,STK_REG(R24)(r1)
std r4,STK_REG(R25)(r1)
std r5,STK_REG(R26)(r1)
std r6,STK_REG(R27)(r1)
std r7,STK_REG(R28)(r1)
std r8,STK_REG(R29)(r1)
std r9,STK_REG(R30)(r1)
std r10,STK_REG(R31)(r1)
mr r3,r0
addi r4,r1,STK_REG(R24)
bl __trace_opal_entry
ld r0,STK_REG(R23)(r1)
ld r3,STK_REG(R24)(r1)
ld r4,STK_REG(R25)(r1)
ld r5,STK_REG(R26)(r1)
ld r6,STK_REG(R27)(r1)
ld r7,STK_REG(R28)(r1)
ld r8,STK_REG(R29)(r1)
ld r9,STK_REG(R30)(r1)
ld r10,STK_REG(R31)(r1)
/* setup LR so we return via tracepoint_return */
LOAD_REG_ADDR(r11,opal_tracepoint_return)
std r11,16(r1)
mfmsr r12
DO_OPAL_CALL()
opal_tracepoint_return:
std r3,STK_REG(R31)(r1)
mr r4,r3
ld r3,STK_REG(R23)(r1)
bl __trace_opal_exit
ld r3,STK_REG(R31)(r1)
addi r1,r1,STACKFRAMESIZE
ld r0,16(r1)
mtlr r0
blr
#endif
OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL);
OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE);
OPAL_CALL(opal_rtc_read, OPAL_RTC_READ);
OPAL_CALL(opal_rtc_write, OPAL_RTC_WRITE);
OPAL_CALL(opal_cec_power_down, OPAL_CEC_POWER_DOWN);
OPAL_CALL(opal_cec_reboot, OPAL_CEC_REBOOT);
OPAL_CALL(opal_cec_reboot2, OPAL_CEC_REBOOT2);
OPAL_CALL(opal_read_nvram, OPAL_READ_NVRAM);
OPAL_CALL(opal_write_nvram, OPAL_WRITE_NVRAM);
OPAL_CALL(opal_handle_interrupt, OPAL_HANDLE_INTERRUPT);
OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS);
OPAL_CALL(opal_pci_set_hub_tce_memory, OPAL_PCI_SET_HUB_TCE_MEMORY);
OPAL_CALL(opal_pci_set_phb_tce_memory, OPAL_PCI_SET_PHB_TCE_MEMORY);
OPAL_CALL(opal_pci_config_read_byte, OPAL_PCI_CONFIG_READ_BYTE);
OPAL_CALL(opal_pci_config_read_half_word, OPAL_PCI_CONFIG_READ_HALF_WORD);
OPAL_CALL(opal_pci_config_read_word, OPAL_PCI_CONFIG_READ_WORD);
OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE);
OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD);
OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD);
OPAL_CALL(opal_set_xive, OPAL_SET_XIVE);
OPAL_CALL(opal_get_xive, OPAL_GET_XIVE);
OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER);
OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS);
OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR);
OPAL_CALL(opal_pci_eeh_freeze_set, OPAL_PCI_EEH_FREEZE_SET);
OPAL_CALL(opal_pci_err_inject, OPAL_PCI_ERR_INJECT);
OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC);
OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE);
OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW);
OPAL_CALL(opal_pci_map_pe_mmio_window, OPAL_PCI_MAP_PE_MMIO_WINDOW);
OPAL_CALL(opal_pci_set_phb_table_memory, OPAL_PCI_SET_PHB_TABLE_MEMORY);
OPAL_CALL(opal_pci_set_pe, OPAL_PCI_SET_PE);
OPAL_CALL(opal_pci_set_peltv, OPAL_PCI_SET_PELTV);
OPAL_CALL(opal_pci_set_mve, OPAL_PCI_SET_MVE);
OPAL_CALL(opal_pci_set_mve_enable, OPAL_PCI_SET_MVE_ENABLE);
OPAL_CALL(opal_pci_get_xive_reissue, OPAL_PCI_GET_XIVE_REISSUE);
OPAL_CALL(opal_pci_set_xive_reissue, OPAL_PCI_SET_XIVE_REISSUE);
OPAL_CALL(opal_pci_set_xive_pe, OPAL_PCI_SET_XIVE_PE);
OPAL_CALL(opal_get_xive_source, OPAL_GET_XIVE_SOURCE);
OPAL_CALL(opal_get_msi_32, OPAL_GET_MSI_32);
OPAL_CALL(opal_get_msi_64, OPAL_GET_MSI_64);
OPAL_CALL(opal_start_cpu, OPAL_START_CPU);
OPAL_CALL(opal_query_cpu_status, OPAL_QUERY_CPU_STATUS);
OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL);
OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW);
OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL);
OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET);
OPAL_CALL(opal_pci_get_hub_diag_data, OPAL_PCI_GET_HUB_DIAG_DATA);
OPAL_CALL(opal_pci_get_phb_diag_data, OPAL_PCI_GET_PHB_DIAG_DATA);
OPAL_CALL(opal_pci_fence_phb, OPAL_PCI_FENCE_PHB);
OPAL_CALL(opal_pci_reinit, OPAL_PCI_REINIT);
OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR);
OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS);
OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS);
OPAL_CALL(opal_get_dpo_status, OPAL_GET_DPO_STATUS);
OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED);
OPAL_CALL(opal_pci_next_error, OPAL_PCI_NEXT_ERROR);
OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL);
OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI);
OPAL_CALL(opal_pci_get_phb_diag_data2, OPAL_PCI_GET_PHB_DIAG_DATA2);
OPAL_CALL(opal_xscom_read, OPAL_XSCOM_READ);
OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE);
OPAL_CALL(opal_lpc_read, OPAL_LPC_READ);
OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE);
OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU);
OPAL_CALL(opal_reinit_cpus, OPAL_REINIT_CPUS);
powerpc/powernv: Read OPAL error log and export it through sysfs Based on a patch by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> This patch adds support to read error logs from OPAL and export them to userspace through a sysfs interface. We export each log entry as a directory in /sys/firmware/opal/elog/ Currently, OPAL will buffer up to 128 error log records, we don't need to have any knowledge of this limit on the Linux side as that is actually largely transparent to us. Each error log entry has the following files: id, type, acknowledge, raw. Currently we just export the raw binary error log in the 'raw' attribute. In a future patch, we may parse more of the error log to make it a bit easier for userspace (e.g. to be able to display a brief summary in petitboot without having to have a full parser). If we have >128 logs from OPAL, we'll only be notified of 128 until userspace starts acknowledging them. This limitation may be lifted in the future and with this patch, that should "just work" from the linux side. A userspace daemon should: - wait for error log entries using normal mechanisms (we announce creation) - read error log entry - save error log entry safely to disk - acknowledge the error log entry - rinse, repeat. On the Linux side, we read the error log when we're notified of it. This possibly isn't ideal as it would be better to only read them on-demand. However, this doesn't really work with current OPAL interface, so we read the error log immediately when notified at the moment. I've tested this pretty extensively and am rather confident that the linux side of things works rather well. There is currently an issue with the service processor side of things for >128 error logs though. Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-02-28 00:58:32 +00:00
OPAL_CALL(opal_read_elog, OPAL_ELOG_READ);
OPAL_CALL(opal_send_ack_elog, OPAL_ELOG_ACK);
OPAL_CALL(opal_get_elog_size, OPAL_ELOG_SIZE);
OPAL_CALL(opal_resend_pending_logs, OPAL_ELOG_RESEND);
OPAL_CALL(opal_write_elog, OPAL_ELOG_WRITE);
OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE);
OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN);
powerpc/powernv Platform dump interface This enables support for userspace to fetch and initiate FSP and Platform dumps from the service processor (via firmware) through sysfs. Based on original patch from Vasant Hegde <hegdevasant@linux.vnet.ibm.com> Flow: - We register for OPAL notification events. - OPAL sends new dump available notification. - We make information on dump available via sysfs - Userspace requests dump contents - We retrieve the dump via OPAL interface - User copies the dump data - userspace sends ack for dump - We send ACK to OPAL. sysfs files: - We add the /sys/firmware/opal/dump directory - echoing 1 (well, anything, but in future we may support different dump types) to /sys/firmware/opal/dump/initiate_dump will initiate a dump. - Each dump that we've been notified of gets a directory in /sys/firmware/opal/dump/ with a name of the dump type and ID (in hex, as this is what's used elsewhere to identify the dump). - Each dump has files: id, type, dump and acknowledge dump is binary and is the dump itself. echoing 'ack' to acknowledge (currently any string will do) will acknowledge the dump and it will soon after disappear from sysfs. OPAL APIs: - opal_dump_init() - opal_dump_info() - opal_dump_read() - opal_dump_ack() - opal_dump_resend_notification() Currently we are only ever notified for one dump at a time (until the user explicitly acks the current dump, then we get a notification of the next dump), but this kernel code should "just work" when OPAL starts notifying us of all the dumps present. Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-03-02 23:25:42 +00:00
OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT);
OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO);
OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2);
OPAL_CALL(opal_dump_read, OPAL_DUMP_READ);
OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK);
OPAL_CALL(opal_get_msg, OPAL_GET_MSG);
OPAL_CALL(opal_write_oppanel_async, OPAL_WRITE_OPPANEL_ASYNC);
OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION);
powerpc/powernv Platform dump interface This enables support for userspace to fetch and initiate FSP and Platform dumps from the service processor (via firmware) through sysfs. Based on original patch from Vasant Hegde <hegdevasant@linux.vnet.ibm.com> Flow: - We register for OPAL notification events. - OPAL sends new dump available notification. - We make information on dump available via sysfs - Userspace requests dump contents - We retrieve the dump via OPAL interface - User copies the dump data - userspace sends ack for dump - We send ACK to OPAL. sysfs files: - We add the /sys/firmware/opal/dump directory - echoing 1 (well, anything, but in future we may support different dump types) to /sys/firmware/opal/dump/initiate_dump will initiate a dump. - Each dump that we've been notified of gets a directory in /sys/firmware/opal/dump/ with a name of the dump type and ID (in hex, as this is what's used elsewhere to identify the dump). - Each dump has files: id, type, dump and acknowledge dump is binary and is the dump itself. echoing 'ack' to acknowledge (currently any string will do) will acknowledge the dump and it will soon after disappear from sysfs. OPAL APIs: - opal_dump_init() - opal_dump_info() - opal_dump_read() - opal_dump_ack() - opal_dump_resend_notification() Currently we are only ever notified for one dump at a time (until the user explicitly acks the current dump, then we get a notification of the next dump), but this kernel code should "just work" when OPAL starts notifying us of all the dumps present. Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-03-02 23:25:42 +00:00
OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND);
OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
powerpc/powernv: Introduce sysfs control for fastsleep workaround behavior Fastsleep is one of the idle state which cpuidle subsystem currently uses on power8 machines. In this state L2 cache is brought down to a threshold voltage. Therefore when the core is in fastsleep, the communication between L2 and L3 needs to be fenced. But there is a bug in the current power8 chips surrounding this fencing. OPAL provides a workaround which precludes the possibility of hitting this bug. But running with this workaround applied causes checkstop if any correctable error in L2 cache directory is detected. Hence OPAL also provides a way to undo the workaround. In the existing implementation, workaround is applied by the last thread of the core entering fastsleep and undone by the first thread waking up. But this has a performance cost. These OPAL calls account for roughly 4000 cycles everytime the core has to enter or wakeup from fastsleep. This patch introduces a sysfs attribute (fastsleep_workaround_applyonce) to choose the behavior of this workaround. By default, fastsleep_workaround_applyonce = 0. In this case, workaround is applied/undone everytime the core enters/exits fastsleep. fastsleep_workaround_applyonce = 1. In this case the workaround is applied once on all the cores and never undone. This can be triggered by echo 1 > /sys/devices/system/cpu/fastsleep_workaround_applyonce For simplicity this attribute can be modified only once. Implying, once fastsleep_workaround_applyonce is changed to 1, it cannot be reverted to the default state. Signed-off-by: Shreyas B. Prabhu <shreyas@linux.vnet.ibm.com> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-04-20 05:02:58 +00:00
OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE);
OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
OPAL_CALL(opal_flash_read, OPAL_FLASH_READ);
OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE);
OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG);
OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR);
OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR);
powerpc/powernv: Add a kmsg_dumper that flushes console output on panic On BMC machines, console output is controlled by the OPAL firmware and is only flushed when its pollers are called. When the kernel is in a panic state, it no longer calls these pollers and thus console output does not completely flush, causing some output from the panic to be lost. Output is only actually lost when the kernel is configured to not power off or reboot after panic (i.e. CONFIG_PANIC_TIMEOUT is set to 0) since OPAL flushes the console buffer as part of its power down routines. Before this patch, however, only partial output would be printed during the timeout wait. This patch adds a new kmsg_dumper which gets called at panic time to ensure panic output is not lost. It accomplishes this by calling OPAL_CONSOLE_FLUSH in the OPAL API, and if that is not available, the pollers are called enough times to (hopefully) completely flush the buffer. The flushing mechanism will only affect output printed at and before the kmsg_dump call in kernel/panic.c:panic(). As such, the "end Kernel panic" message may still be truncated as follows: >Call Trace: >[c000000f1f603b00] [c0000000008e9458] dump_stack+0x90/0xbc (unreliable) >[c000000f1f603b30] [c0000000008e7e78] panic+0xf8/0x2c4 >[c000000f1f603bc0] [c000000000be4860] mount_block_root+0x288/0x33c >[c000000f1f603c80] [c000000000be4d14] prepare_namespace+0x1f4/0x254 >[c000000f1f603d00] [c000000000be43e8] kernel_init_freeable+0x318/0x350 >[c000000f1f603dc0] [c00000000000bd74] kernel_init+0x24/0x130 >[c000000f1f603e30] [c0000000000095b0] ret_from_kernel_thread+0x5c/0xac >---[ end Kernel panic - not This functionality is implemented as a kmsg_dumper as it seems to be the most sensible way to introduce platform-specific functionality to the panic function. Signed-off-by: Russell Currey <ruscur@russell.cc> Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-11-27 06:23:07 +00:00
OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);
OPAL_CALL(opal_get_device_tree, OPAL_GET_DEVICE_TREE);
OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE);
OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE);
OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE);
OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR);
OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR);
OPAL_CALL(opal_int_eoi, OPAL_INT_EOI);
OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR);
OPAL_CALL(opal_xive_reset, OPAL_XIVE_RESET);
OPAL_CALL(opal_xive_get_irq_info, OPAL_XIVE_GET_IRQ_INFO);
OPAL_CALL(opal_xive_get_irq_config, OPAL_XIVE_GET_IRQ_CONFIG);
OPAL_CALL(opal_xive_set_irq_config, OPAL_XIVE_SET_IRQ_CONFIG);
OPAL_CALL(opal_xive_get_queue_info, OPAL_XIVE_GET_QUEUE_INFO);
OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO);
OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE);
OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK);
OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK);
OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ);
OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ);
OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO);
OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO);
OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC);
OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP);
OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR);