forked from Minki/linux
drm/nouveau/pmu: initialise SW state for falcon from constructor
This will allow us to register the falcon with ACR, and further customise its behaviour by providing the nvkm_falcon_func structure directly. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
989863d7cb
commit
2952a2b42e
@ -2,12 +2,12 @@
|
||||
#ifndef __NVKM_PMU_H__
|
||||
#define __NVKM_PMU_H__
|
||||
#include <core/subdev.h>
|
||||
#include <engine/falcon.h>
|
||||
#include <core/falcon.h>
|
||||
|
||||
struct nvkm_pmu {
|
||||
const struct nvkm_pmu_func *func;
|
||||
struct nvkm_subdev subdev;
|
||||
struct nvkm_falcon *falcon;
|
||||
struct nvkm_falcon falcon;
|
||||
struct nvkm_msgqueue *queue;
|
||||
|
||||
struct {
|
||||
|
@ -134,19 +134,12 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
|
||||
return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
|
||||
}
|
||||
|
||||
static void *
|
||||
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
|
||||
nvkm_msgqueue_del(&pmu->queue);
|
||||
nvkm_falcon_del(&pmu->falcon);
|
||||
nvkm_falcon_dtor(&pmu->falcon);
|
||||
return nvkm_pmu(subdev);
|
||||
}
|
||||
|
||||
@ -154,7 +147,6 @@ static const struct nvkm_subdev_func
|
||||
nvkm_pmu = {
|
||||
.dtor = nvkm_pmu_dtor,
|
||||
.preinit = nvkm_pmu_preinit,
|
||||
.oneinit = nvkm_pmu_oneinit,
|
||||
.init = nvkm_pmu_init,
|
||||
.fini = nvkm_pmu_fini,
|
||||
.intr = nvkm_pmu_intr,
|
||||
@ -174,7 +166,10 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
|
||||
return PTR_ERR(fwif);
|
||||
|
||||
pmu->func = fwif->func;
|
||||
return 0;
|
||||
|
||||
return nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
|
||||
nvkm_subdev_name[pmu->subdev.index], 0x10a000,
|
||||
&pmu->falcon);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu)
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gf100_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.code.data = gf100_pmu_code,
|
||||
.code.size = sizeof(gf100_pmu_code),
|
||||
.data.data = gf100_pmu_data,
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gf119_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.code.data = gf119_pmu_code,
|
||||
.code.size = sizeof(gf119_pmu_code),
|
||||
.data.data = gf119_pmu_data,
|
||||
|
@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gk104_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.code.data = gk104_pmu_code,
|
||||
.code.size = sizeof(gk104_pmu_code),
|
||||
.data.data = gk104_pmu_data,
|
||||
|
@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gk110_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.code.data = gk110_pmu_code,
|
||||
.code.size = sizeof(gk110_pmu_code),
|
||||
.data.data = gk110_pmu_data,
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gk208_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.code.data = gk208_pmu_code,
|
||||
.code.size = sizeof(gk208_pmu_code),
|
||||
.data.data = gk208_pmu_data,
|
||||
|
@ -95,7 +95,7 @@ static void
|
||||
gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
|
||||
struct gk20a_pmu_dvfs_dev_status *status)
|
||||
{
|
||||
struct nvkm_falcon *falcon = pmu->base.falcon;
|
||||
struct nvkm_falcon *falcon = &pmu->base.falcon;
|
||||
|
||||
status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
|
||||
status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
|
||||
@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
|
||||
static void
|
||||
gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
|
||||
{
|
||||
struct nvkm_falcon *falcon = pmu->base.falcon;
|
||||
struct nvkm_falcon *falcon = &pmu->base.falcon;
|
||||
|
||||
nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
|
||||
nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
|
||||
@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu)
|
||||
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
|
||||
nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
|
||||
|
||||
nvkm_falcon_put(pmu->falcon, &pmu->subdev);
|
||||
nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu)
|
||||
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
|
||||
struct nvkm_subdev *subdev = &pmu->subdev;
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
struct nvkm_falcon *falcon = pmu->falcon;
|
||||
struct nvkm_falcon *falcon = &pmu->falcon;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_falcon_get(falcon, subdev);
|
||||
@ -196,6 +196,7 @@ gk20a_dvfs_data= {
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gk20a_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.enabled = gf100_pmu_enabled,
|
||||
.init = gk20a_pmu_init,
|
||||
.fini = gk20a_pmu_fini,
|
||||
|
@ -28,6 +28,7 @@
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gm107_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.code.data = gm107_pmu_code,
|
||||
.code.size = sizeof(gm107_pmu_code),
|
||||
.data.data = gm107_pmu_data,
|
||||
|
@ -41,6 +41,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gm20b_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.enabled = gf100_pmu_enabled,
|
||||
.intr = gt215_pmu_intr,
|
||||
.recv = gm20b_pmu_recv,
|
||||
@ -55,7 +56,7 @@ MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
|
||||
int
|
||||
gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
|
||||
{
|
||||
return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, pmu->falcon,
|
||||
return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
|
||||
NVKM_ACR_LSF_PMU, "pmu/",
|
||||
ver, fwif->acr);
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gp100_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.enabled = gf100_pmu_enabled,
|
||||
.reset = gf100_pmu_reset,
|
||||
};
|
||||
|
@ -39,6 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gp102_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.enabled = gp102_pmu_enabled,
|
||||
.reset = gp102_pmu_reset,
|
||||
};
|
||||
|
@ -28,6 +28,7 @@ gp10b_pmu_acr = {
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gp10b_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.enabled = gf100_pmu_enabled,
|
||||
.intr = gt215_pmu_intr,
|
||||
.recv = gm20b_pmu_recv,
|
||||
|
@ -241,8 +241,23 @@ gt215_pmu_init(struct nvkm_pmu *pmu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct nvkm_falcon_func
|
||||
gt215_pmu_flcn = {
|
||||
.load_imem = nvkm_falcon_v1_load_imem,
|
||||
.load_dmem = nvkm_falcon_v1_load_dmem,
|
||||
.read_dmem = nvkm_falcon_v1_read_dmem,
|
||||
.bind_context = nvkm_falcon_v1_bind_context,
|
||||
.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
|
||||
.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
|
||||
.set_start_addr = nvkm_falcon_v1_set_start_addr,
|
||||
.start = nvkm_falcon_v1_start,
|
||||
.enable = nvkm_falcon_v1_enable,
|
||||
.disable = nvkm_falcon_v1_disable,
|
||||
};
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gt215_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.code.data = gt215_pmu_code,
|
||||
.code.size = sizeof(gt215_pmu_code),
|
||||
.data.data = gt215_pmu_data,
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include <subdev/pmu/fuc/os.h>
|
||||
|
||||
struct nvkm_pmu_func {
|
||||
const struct nvkm_falcon_func *flcn;
|
||||
|
||||
struct {
|
||||
u32 *data;
|
||||
u32 size;
|
||||
@ -27,6 +29,7 @@ struct nvkm_pmu_func {
|
||||
void (*pgob)(struct nvkm_pmu *, bool);
|
||||
};
|
||||
|
||||
extern const struct nvkm_falcon_func gt215_pmu_flcn;
|
||||
int gt215_pmu_init(struct nvkm_pmu *);
|
||||
void gt215_pmu_fini(struct nvkm_pmu *);
|
||||
void gt215_pmu_intr(struct nvkm_pmu *);
|
||||
|
@ -1150,7 +1150,7 @@ acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
||||
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
|
||||
addr_code = (base + pdesc->app_resident_code_offset) >> 8;
|
||||
addr_data = (base + pdesc->app_resident_data_offset) >> 8;
|
||||
addr_args = pmu->falcon->data.limit;
|
||||
addr_args = pmu->falcon.data.limit;
|
||||
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
|
||||
|
||||
desc->dma_idx = FALCON_DMAIDX_UCODE;
|
||||
|
@ -126,7 +126,7 @@ acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
||||
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
|
||||
addr_code = base + pdesc->app_resident_code_offset;
|
||||
addr_data = base + pdesc->app_resident_data_offset;
|
||||
addr_args = pmu->falcon->data.limit;
|
||||
addr_args = pmu->falcon.data.limit;
|
||||
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
|
||||
|
||||
desc->dma_idx = FALCON_DMAIDX_UCODE;
|
||||
|
@ -40,7 +40,7 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
||||
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
|
||||
addr_code = base + pdesc->app_resident_code_offset;
|
||||
addr_data = base + pdesc->app_resident_data_offset;
|
||||
addr_args = pmu->falcon->data.limit;
|
||||
addr_args = pmu->falcon.data.limit;
|
||||
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
|
||||
|
||||
desc->ctx_dma = FALCON_DMAIDX_UCODE;
|
||||
|
@ -133,13 +133,13 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
|
||||
|
||||
switch (sb->acr->boot_falcon) {
|
||||
case NVKM_SECBOOT_FALCON_PMU:
|
||||
sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
|
||||
sb->halt_falcon = sb->boot_falcon = &subdev->device->pmu->falcon;
|
||||
break;
|
||||
case NVKM_SECBOOT_FALCON_SEC2:
|
||||
/* we must keep SEC2 alive forever since ACR will run on it */
|
||||
nvkm_engine_ref(&subdev->device->sec2->engine);
|
||||
sb->boot_falcon = subdev->device->sec2->falcon;
|
||||
sb->halt_falcon = subdev->device->pmu->falcon;
|
||||
sb->halt_falcon = &subdev->device->pmu->falcon;
|
||||
break;
|
||||
default:
|
||||
nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
|
||||
|
@ -110,7 +110,7 @@ acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
|
||||
return ret;
|
||||
|
||||
/* Allocate the PMU queue corresponding to the FW version */
|
||||
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
|
||||
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, &pmu->falcon,
|
||||
sb, &pmu->queue);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -123,10 +123,10 @@ acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
|
||||
{
|
||||
struct nvkm_device *device = sb->subdev.device;
|
||||
struct nvkm_pmu *pmu = device->pmu;
|
||||
u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
|
||||
u32 addr_args = pmu->falcon.data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
|
||||
int ret;
|
||||
|
||||
ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
|
||||
ret = acr_ls_msgqueue_post_run(pmu->queue, &pmu->falcon, addr_args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user