Merge branch 'linux-4.8' of git://github.com/skeggsb/linux into drm-next
Here's an initial drm-next pull for nouveau 4.8, highlights: - GK20A/GM20B volt and clock improvements. - Initial support for GP100/GP104 GPUs, GP104 will not yet support acceleration due to NVIDIA having not released firmware for them as of yet. * 'linux-4.8' of git://github.com/skeggsb/linux: (97 commits) drm/nouveau/bus: remove cpu_coherent flag drm/nouveau/ttm: remove special handling of coherent objects drm/nouveau: check for supported chipset before booting fbdev off the hw drm/nouveau/ce/gp104: initial support drm/nouveau/fifo/gp104: initial support drm/nouveau/disp/gp104: initial support drm/nouveau/dma/gp104: initial support drm/nouveau/ltc/gp104: initial support drm/nouveau/ibus/gp104: initial support drm/nouveau/i2c/gp104: initial support drm/nouveau/gpio/gp104: initial support drm/nouveau/fuse/gp104: initial support drm/nouveau/bus/gp104: initial support drm/nouveau/bar/gp104: initial support drm/nouveau/mmu/gp104: initial support drm/nouveau/fb/gp104: initial support drm/nouveau/imem/gp104: initial support drm/nouveau/devinit/gp104: initial support drm/nouveau/bios/gp104: initial support drm/nouveau/tmr/gp104: initial support ...
This commit is contained in:
commit
1640142b3d
@ -29,6 +29,7 @@ struct nv_device_info_v0 {
|
||||
#define NV_DEVICE_INFO_V0_FERMI 0x07
|
||||
#define NV_DEVICE_INFO_V0_KEPLER 0x08
|
||||
#define NV_DEVICE_INFO_V0_MAXWELL 0x09
|
||||
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
|
||||
__u8 family;
|
||||
__u8 pad06[2];
|
||||
__u64 ram_size;
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
|
||||
#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
|
||||
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
|
||||
#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
|
||||
|
||||
#define NV50_DISP /* cl5070.h */ 0x00005070
|
||||
#define G82_DISP /* cl5070.h */ 0x00008270
|
||||
@ -50,6 +51,8 @@
|
||||
#define GK110_DISP /* cl5070.h */ 0x00009270
|
||||
#define GM107_DISP /* cl5070.h */ 0x00009470
|
||||
#define GM200_DISP /* cl5070.h */ 0x00009570
|
||||
#define GP100_DISP /* cl5070.h */ 0x00009770
|
||||
#define GP104_DISP /* cl5070.h */ 0x00009870
|
||||
|
||||
#define NV31_MPEG 0x00003174
|
||||
#define G82_MPEG 0x00008274
|
||||
@ -86,6 +89,8 @@
|
||||
#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
|
||||
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
|
||||
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
|
||||
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
|
||||
#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
|
||||
|
||||
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
|
||||
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
|
||||
@ -105,6 +110,8 @@
|
||||
#define MAXWELL_A /* cl9097.h */ 0x0000b097
|
||||
#define MAXWELL_B /* cl9097.h */ 0x0000b197
|
||||
|
||||
#define PASCAL_A /* cl9097.h */ 0x0000c097
|
||||
|
||||
#define NV74_BSP 0x000074b0
|
||||
|
||||
#define GT212_MSVLD 0x000085b1
|
||||
@ -128,6 +135,8 @@
|
||||
#define FERMI_DMA 0x000090b5
|
||||
#define KEPLER_DMA_COPY_A 0x0000a0b5
|
||||
#define MAXWELL_DMA_COPY_A 0x0000b0b5
|
||||
#define PASCAL_DMA_COPY_A 0x0000c0b5
|
||||
#define PASCAL_DMA_COPY_B 0x0000c1b5
|
||||
|
||||
#define FERMI_DECOMPRESS 0x000090b8
|
||||
|
||||
@ -137,6 +146,7 @@
|
||||
#define KEPLER_COMPUTE_B 0x0000a1c0
|
||||
#define MAXWELL_COMPUTE_A 0x0000b0c0
|
||||
#define MAXWELL_COMPUTE_B 0x0000b1c0
|
||||
#define PASCAL_COMPUTE_A 0x0000c0c0
|
||||
|
||||
#define NV74_CIPHER 0x000074c1
|
||||
#endif
|
||||
|
@ -33,7 +33,10 @@ enum nvkm_devidx {
|
||||
NVKM_ENGINE_CE0,
|
||||
NVKM_ENGINE_CE1,
|
||||
NVKM_ENGINE_CE2,
|
||||
NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
|
||||
NVKM_ENGINE_CE3,
|
||||
NVKM_ENGINE_CE4,
|
||||
NVKM_ENGINE_CE5,
|
||||
NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
|
||||
|
||||
NVKM_ENGINE_CIPHER,
|
||||
NVKM_ENGINE_DISP,
|
||||
@ -50,7 +53,8 @@ enum nvkm_devidx {
|
||||
|
||||
NVKM_ENGINE_NVENC0,
|
||||
NVKM_ENGINE_NVENC1,
|
||||
NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
|
||||
NVKM_ENGINE_NVENC2,
|
||||
NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
|
||||
|
||||
NVKM_ENGINE_NVDEC,
|
||||
NVKM_ENGINE_PM,
|
||||
@ -102,6 +106,7 @@ struct nvkm_device {
|
||||
NV_C0 = 0xc0,
|
||||
NV_E0 = 0xe0,
|
||||
GM100 = 0x110,
|
||||
GP100 = 0x130,
|
||||
} card_type;
|
||||
u32 chipset;
|
||||
u8 chiprev;
|
||||
@ -136,7 +141,7 @@ struct nvkm_device {
|
||||
struct nvkm_volt *volt;
|
||||
|
||||
struct nvkm_engine *bsp;
|
||||
struct nvkm_engine *ce[3];
|
||||
struct nvkm_engine *ce[6];
|
||||
struct nvkm_engine *cipher;
|
||||
struct nvkm_disp *disp;
|
||||
struct nvkm_dma *dma;
|
||||
@ -149,7 +154,7 @@ struct nvkm_device {
|
||||
struct nvkm_engine *mspdec;
|
||||
struct nvkm_engine *msppp;
|
||||
struct nvkm_engine *msvld;
|
||||
struct nvkm_engine *nvenc[2];
|
||||
struct nvkm_engine *nvenc[3];
|
||||
struct nvkm_engine *nvdec;
|
||||
struct nvkm_pm *pm;
|
||||
struct nvkm_engine *sec;
|
||||
@ -170,7 +175,6 @@ struct nvkm_device_func {
|
||||
void (*fini)(struct nvkm_device *, bool suspend);
|
||||
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
|
||||
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
|
||||
bool cpu_coherent;
|
||||
};
|
||||
|
||||
struct nvkm_device_quirk {
|
||||
@ -206,7 +210,7 @@ struct nvkm_device_chip {
|
||||
int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
|
||||
|
||||
int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
|
||||
int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
|
||||
@ -219,7 +223,7 @@ struct nvkm_device_chip {
|
||||
int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
|
||||
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
|
||||
|
@ -28,6 +28,7 @@ struct nvkm_device_tegra {
|
||||
} iommu;
|
||||
|
||||
int gpu_speedo;
|
||||
int gpu_speedo_id;
|
||||
};
|
||||
|
||||
struct nvkm_device_tegra_func {
|
||||
|
@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
#endif
|
||||
|
@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
#endif
|
||||
|
@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
|
||||
int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
|
||||
int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
|
||||
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
|
||||
int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
|
||||
#endif
|
||||
|
@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
|
||||
int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
|
||||
int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
|
||||
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
|
||||
int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
|
||||
#endif
|
||||
|
@ -7,6 +7,9 @@ struct nvkm_bios {
|
||||
u32 size;
|
||||
u8 *data;
|
||||
|
||||
u32 image0_size;
|
||||
u32 imaged_addr;
|
||||
|
||||
u32 bmp_offset;
|
||||
u32 bit_offset;
|
||||
|
||||
@ -22,10 +25,9 @@ struct nvkm_bios {
|
||||
u8 nvbios_checksum(const u8 *data, int size);
|
||||
u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
|
||||
int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
|
||||
|
||||
#define nvbios_rd08(b,o) (b)->data[(o)]
|
||||
#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
|
||||
#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
|
||||
u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
|
||||
u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
|
||||
u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
|
||||
|
||||
int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
|
||||
#endif
|
||||
|
@ -56,6 +56,8 @@ struct nvkm_fb {
|
||||
int regions;
|
||||
} tile;
|
||||
|
||||
u8 page;
|
||||
|
||||
struct nvkm_memory *mmu_rd;
|
||||
struct nvkm_memory *mmu_wr;
|
||||
};
|
||||
@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/ramcfg.h>
|
||||
|
@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
|
||||
int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
|
||||
int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
|
||||
int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
|
||||
int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
|
||||
#endif
|
||||
|
@ -7,11 +7,14 @@ struct nvkm_mc {
|
||||
struct nvkm_subdev subdev;
|
||||
};
|
||||
|
||||
void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
|
||||
void nvkm_mc_intr_unarm(struct nvkm_mc *);
|
||||
void nvkm_mc_intr_rearm(struct nvkm_mc *);
|
||||
void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
|
||||
void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
|
||||
void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
|
||||
void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
|
||||
void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
|
||||
void nvkm_mc_intr(struct nvkm_device *, bool *handled);
|
||||
void nvkm_mc_intr_unarm(struct nvkm_device *);
|
||||
void nvkm_mc_intr_rearm(struct nvkm_device *);
|
||||
void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
|
||||
void nvkm_mc_unk260(struct nvkm_device *, u32 data);
|
||||
|
||||
int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
|
||||
int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
|
||||
@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
|
||||
int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
|
||||
int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
|
||||
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
|
||||
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
|
||||
#endif
|
||||
|
@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
|
||||
/* pcie functions */
|
||||
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
|
||||
|
@ -43,9 +43,8 @@ struct nvkm_secboot {
|
||||
const struct nvkm_secboot_func *func;
|
||||
struct nvkm_subdev subdev;
|
||||
|
||||
enum nvkm_devidx devidx;
|
||||
u32 base;
|
||||
u32 irq_mask;
|
||||
u32 enable_mask;
|
||||
};
|
||||
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
|
||||
|
||||
|
@ -8,10 +8,11 @@ struct nvkm_top {
|
||||
struct list_head device;
|
||||
};
|
||||
|
||||
u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
|
||||
u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
|
||||
enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
|
||||
enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
|
||||
u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
|
||||
u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
|
||||
u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
|
||||
enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
|
||||
enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
|
||||
|
||||
int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
|
||||
#endif
|
||||
|
@ -12,6 +12,9 @@ struct nvkm_volt {
|
||||
u32 uv;
|
||||
u8 vid;
|
||||
} vid[256];
|
||||
|
||||
u32 max_uv;
|
||||
u32 min_uv;
|
||||
};
|
||||
|
||||
int nvkm_volt_get(struct nvkm_volt *);
|
||||
|
@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
|
||||
case NV_DEVICE_INFO_V0_FERMI:
|
||||
case NV_DEVICE_INFO_V0_KEPLER:
|
||||
case NV_DEVICE_INFO_V0_MAXWELL:
|
||||
case NV_DEVICE_INFO_V0_PASCAL:
|
||||
return NVIF_CLASS_SW_GF100;
|
||||
}
|
||||
|
||||
|
@ -209,8 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
nvbo->tile_flags = tile_flags;
|
||||
nvbo->bo.bdev = &drm->ttm.bdev;
|
||||
|
||||
if (!nvxx_device(&drm->device)->func->cpu_coherent)
|
||||
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
|
||||
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
nvbo->page_shift = 12;
|
||||
if (drm->client.vm) {
|
||||
@ -424,13 +423,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* TTM buffers allocated using the DMA API already have a mapping, let's
|
||||
* use it instead.
|
||||
*/
|
||||
if (!nvbo->force_coherent)
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
|
||||
&nvbo->kmap);
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
|
||||
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
return ret;
|
||||
@ -442,12 +435,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
|
||||
if (!nvbo)
|
||||
return;
|
||||
|
||||
/*
|
||||
* TTM buffers allocated using the DMA API already had a coherent
|
||||
* mapping which we used, no need to unmap.
|
||||
*/
|
||||
if (!nvbo->force_coherent)
|
||||
ttm_bo_kunmap(&nvbo->kmap);
|
||||
ttm_bo_kunmap(&nvbo->kmap);
|
||||
}
|
||||
|
||||
void
|
||||
@ -506,35 +494,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
|
||||
{
|
||||
struct ttm_dma_tt *dma_tt;
|
||||
u8 *m = mem;
|
||||
|
||||
index *= sz;
|
||||
|
||||
if (m) {
|
||||
/* kmap'd address, return the corresponding offset */
|
||||
m += index;
|
||||
} else {
|
||||
/* DMA-API mapping, lookup the right address */
|
||||
dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
|
||||
m = dma_tt->cpu_address[index / PAGE_SIZE];
|
||||
m += index % PAGE_SIZE;
|
||||
}
|
||||
|
||||
return m;
|
||||
}
|
||||
#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
|
||||
|
||||
void
|
||||
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
|
||||
{
|
||||
bool is_iomem;
|
||||
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
|
||||
|
||||
mem = nouveau_bo_mem_index(nvbo, index, mem);
|
||||
mem += index;
|
||||
|
||||
if (is_iomem)
|
||||
iowrite16_native(val, (void __force __iomem *)mem);
|
||||
@ -548,7 +514,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
|
||||
bool is_iomem;
|
||||
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
|
||||
|
||||
mem = nouveau_bo_mem_index(nvbo, index, mem);
|
||||
mem += index;
|
||||
|
||||
if (is_iomem)
|
||||
return ioread32_native((void __force __iomem *)mem);
|
||||
@ -562,7 +528,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
|
||||
bool is_iomem;
|
||||
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
|
||||
|
||||
mem = nouveau_bo_mem_index(nvbo, index, mem);
|
||||
mem += index;
|
||||
|
||||
if (is_iomem)
|
||||
iowrite32_native(val, (void __force __iomem *)mem);
|
||||
@ -1103,6 +1069,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
|
||||
struct ttm_mem_reg *, struct ttm_mem_reg *);
|
||||
int (*init)(struct nouveau_channel *, u32 handle);
|
||||
} _methods[] = {
|
||||
{ "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
|
||||
{ "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
||||
{ "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
||||
{ "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
||||
{ "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
||||
{ "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
||||
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
||||
@ -1491,14 +1461,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
|
||||
dev = drm->dev;
|
||||
pdev = device->dev;
|
||||
|
||||
/*
|
||||
* Objects matching this condition have been marked as force_coherent,
|
||||
* so use the DMA API for them.
|
||||
*/
|
||||
if (!nvxx_device(&drm->device)->func->cpu_coherent &&
|
||||
ttm->caching_state == tt_uncached)
|
||||
return ttm_dma_populate(ttm_dma, dev->dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (drm->agp.bridge) {
|
||||
return ttm_agp_tt_populate(ttm);
|
||||
@ -1556,16 +1518,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
dev = drm->dev;
|
||||
pdev = device->dev;
|
||||
|
||||
/*
|
||||
* Objects matching this condition have been marked as force_coherent,
|
||||
* so use the DMA API for them.
|
||||
*/
|
||||
if (!nvxx_device(&drm->device)->func->cpu_coherent &&
|
||||
ttm->caching_state == tt_uncached) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev->dev);
|
||||
return;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (drm->agp.bridge) {
|
||||
ttm_agp_tt_unpopulate(ttm);
|
||||
|
@ -191,7 +191,8 @@ static int
|
||||
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
u32 engine, struct nouveau_channel **pchan)
|
||||
{
|
||||
static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
|
||||
static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
|
||||
MAXWELL_CHANNEL_GPFIFO_A,
|
||||
KEPLER_CHANNEL_GPFIFO_B,
|
||||
KEPLER_CHANNEL_GPFIFO_A,
|
||||
FERMI_CHANNEL_GPFIFO,
|
||||
|
@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev)
|
||||
|
||||
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
|
||||
static const u16 oclass[] = {
|
||||
GP104_DISP,
|
||||
GP100_DISP,
|
||||
GM200_DISP,
|
||||
GM107_DISP,
|
||||
GK110_DISP,
|
||||
|
@ -198,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
|
||||
case KEPLER_CHANNEL_GPFIFO_A:
|
||||
case KEPLER_CHANNEL_GPFIFO_B:
|
||||
case MAXWELL_CHANNEL_GPFIFO_A:
|
||||
case PASCAL_CHANNEL_GPFIFO_A:
|
||||
ret = nvc0_fence_create(drm);
|
||||
break;
|
||||
default:
|
||||
@ -316,7 +317,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
|
||||
if (vga_switcheroo_client_probe_defer(pdev))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/* remove conflicting drivers (vesafb, efifb etc) */
|
||||
/* We need to check that the chipset is supported before booting
|
||||
* fbdev off the hardware, as there's no way to put it back.
|
||||
*/
|
||||
ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_device_del(&device);
|
||||
|
||||
/* Remove conflicting drivers (vesafb, efifb etc). */
|
||||
aper = alloc_apertures(3);
|
||||
if (!aper)
|
||||
return -ENOMEM;
|
||||
@ -430,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
nouveau_vga_init(drm);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (!nvxx_device(&drm->device)->mmu) {
|
||||
ret = -ENOSYS;
|
||||
goto fail_device;
|
||||
}
|
||||
|
||||
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
|
||||
0x1000, NULL, &drm->client.vm);
|
||||
if (ret)
|
||||
|
@ -534,6 +534,40 @@ nouveau_hwmon_get_in0_input(struct device *d,
|
||||
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
|
||||
nouveau_hwmon_get_in0_input, NULL, 0);
|
||||
|
||||
static ssize_t
|
||||
nouveau_hwmon_get_in0_min(struct device *d,
|
||||
struct device_attribute *a, char *buf)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->device);
|
||||
|
||||
if (!volt || !volt->min_uv)
|
||||
return -ENODEV;
|
||||
|
||||
return sprintf(buf, "%i\n", volt->min_uv / 1000);
|
||||
}
|
||||
|
||||
static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO,
|
||||
nouveau_hwmon_get_in0_min, NULL, 0);
|
||||
|
||||
static ssize_t
|
||||
nouveau_hwmon_get_in0_max(struct device *d,
|
||||
struct device_attribute *a, char *buf)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->device);
|
||||
|
||||
if (!volt || !volt->max_uv)
|
||||
return -ENODEV;
|
||||
|
||||
return sprintf(buf, "%i\n", volt->max_uv / 1000);
|
||||
}
|
||||
|
||||
static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO,
|
||||
nouveau_hwmon_get_in0_max, NULL, 0);
|
||||
|
||||
static ssize_t
|
||||
nouveau_hwmon_get_in0_label(struct device *d,
|
||||
struct device_attribute *a, char *buf)
|
||||
@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
|
||||
|
||||
static struct attribute *hwmon_in0_attributes[] = {
|
||||
&sensor_dev_attr_in0_input.dev_attr.attr,
|
||||
&sensor_dev_attr_in0_min.dev_attr.attr,
|
||||
&sensor_dev_attr_in0_max.dev_attr.attr,
|
||||
&sensor_dev_attr_in0_label.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
case NV_DEVICE_INFO_V0_FERMI:
|
||||
case NV_DEVICE_INFO_V0_KEPLER:
|
||||
case NV_DEVICE_INFO_V0_MAXWELL:
|
||||
case NV_DEVICE_INFO_V0_PASCAL:
|
||||
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
|
||||
break;
|
||||
default:
|
||||
|
@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
|
||||
.pushbuf = 0xb0007d00,
|
||||
};
|
||||
static const s32 oclass[] = {
|
||||
GP104_DISP_CORE_CHANNEL_DMA,
|
||||
GP100_DISP_CORE_CHANNEL_DMA,
|
||||
GM200_DISP_CORE_CHANNEL_DMA,
|
||||
GM107_DISP_CORE_CHANNEL_DMA,
|
||||
GK110_DISP_CORE_CHANNEL_DMA,
|
||||
|
@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
|
||||
[NVKM_ENGINE_CE0 ] = "ce0",
|
||||
[NVKM_ENGINE_CE1 ] = "ce1",
|
||||
[NVKM_ENGINE_CE2 ] = "ce2",
|
||||
[NVKM_ENGINE_CE3 ] = "ce3",
|
||||
[NVKM_ENGINE_CE4 ] = "ce4",
|
||||
[NVKM_ENGINE_CE5 ] = "ce5",
|
||||
[NVKM_ENGINE_CIPHER ] = "cipher",
|
||||
[NVKM_ENGINE_DISP ] = "disp",
|
||||
[NVKM_ENGINE_DMAOBJ ] = "dma",
|
||||
@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
|
||||
[NVKM_ENGINE_MSVLD ] = "msvld",
|
||||
[NVKM_ENGINE_NVENC0 ] = "nvenc0",
|
||||
[NVKM_ENGINE_NVENC1 ] = "nvenc1",
|
||||
[NVKM_ENGINE_NVENC2 ] = "nvenc2",
|
||||
[NVKM_ENGINE_NVDEC ] = "nvdec",
|
||||
[NVKM_ENGINE_PM ] = "pm",
|
||||
[NVKM_ENGINE_SEC ] = "sec",
|
||||
@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
|
||||
}
|
||||
}
|
||||
|
||||
nvkm_mc_reset(device->mc, subdev->index);
|
||||
nvkm_mc_reset(device, subdev->index);
|
||||
|
||||
time = ktime_to_us(ktime_get()) - time;
|
||||
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
|
||||
|
@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o
|
||||
nvkm-y += nvkm/engine/ce/gk104.o
|
||||
nvkm-y += nvkm/engine/ce/gm107.o
|
||||
nvkm-y += nvkm/engine/ce/gm200.o
|
||||
nvkm-y += nvkm/engine/ce/gp100.o
|
||||
nvkm-y += nvkm/engine/ce/gp104.o
|
||||
|
102
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
Normal file
102
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright 2015 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include <core/enum.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_enum
|
||||
gp100_ce_launcherr_report[] = {
|
||||
{ 0x0, "NO_ERR" },
|
||||
{ 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
|
||||
{ 0x2, "INVALID_ALIGNMENT" },
|
||||
{ 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
|
||||
{ 0x4, "SRC_LINE_EXCEEDS_PITCH" },
|
||||
{ 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
|
||||
{ 0x6, "DST_LINE_EXCEEDS_PITCH" },
|
||||
{ 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
|
||||
{ 0x8, "BAD_SRC_PIXEL_COMP_REF" },
|
||||
{ 0x9, "INVALID_VALUE" },
|
||||
{ 0xa, "UNUSED_FIELD" },
|
||||
{ 0xb, "INVALID_OPERATION" },
|
||||
{ 0xc, "NO_RESOURCES" },
|
||||
{ 0xd, "INVALID_CONFIG" },
|
||||
{}
|
||||
};
|
||||
|
||||
static void
|
||||
gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &ce->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 stat = nvkm_rd32(device, 0x104418 + base);
|
||||
const struct nvkm_enum *en =
|
||||
nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
|
||||
nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
|
||||
}
|
||||
|
||||
void
|
||||
gp100_ce_intr(struct nvkm_engine *ce)
|
||||
{
|
||||
const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
|
||||
struct nvkm_subdev *subdev = &ce->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 mask = nvkm_rd32(device, 0x10440c + base);
|
||||
u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
|
||||
if (intr & 0x00000001) { //XXX: guess
|
||||
nvkm_warn(subdev, "BLOCKPIPE\n");
|
||||
nvkm_wr32(device, 0x104410 + base, 0x00000001);
|
||||
intr &= ~0x00000001;
|
||||
}
|
||||
if (intr & 0x00000002) { //XXX: guess
|
||||
nvkm_warn(subdev, "NONBLOCKPIPE\n");
|
||||
nvkm_wr32(device, 0x104410 + base, 0x00000002);
|
||||
intr &= ~0x00000002;
|
||||
}
|
||||
if (intr & 0x00000004) {
|
||||
gp100_ce_intr_launcherr(ce, base);
|
||||
nvkm_wr32(device, 0x104410 + base, 0x00000004);
|
||||
intr &= ~0x00000004;
|
||||
}
|
||||
if (intr) {
|
||||
nvkm_warn(subdev, "intr %08x\n", intr);
|
||||
nvkm_wr32(device, 0x104410 + base, intr);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct nvkm_engine_func
|
||||
gp100_ce = {
|
||||
.intr = gp100_ce_intr,
|
||||
.sclass = {
|
||||
{ -1, -1, PASCAL_DMA_COPY_A },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
||||
int
|
||||
gp100_ce_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_engine **pengine)
|
||||
{
|
||||
return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
|
||||
}
|
44
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
Normal file
44
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright 2015 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include <core/enum.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_engine_func
|
||||
gp104_ce = {
|
||||
.intr = gp100_ce_intr,
|
||||
.sclass = {
|
||||
{ -1, -1, PASCAL_DMA_COPY_B },
|
||||
{ -1, -1, PASCAL_DMA_COPY_A },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
||||
int
|
||||
gp104_ce_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_engine **pengine)
|
||||
{
|
||||
return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
|
||||
}
|
@ -4,4 +4,5 @@
|
||||
|
||||
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
|
||||
void gk104_ce_intr(struct nvkm_engine *);
|
||||
void gp100_ce_intr(struct nvkm_engine *);
|
||||
#endif
|
||||
|
@ -2148,6 +2148,67 @@ nv12b_chipset = {
|
||||
.sw = gf100_sw_new,
|
||||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
nv130_chipset = {
|
||||
.name = "GP100",
|
||||
.bar = gf100_bar_new,
|
||||
.bios = nvkm_bios_new,
|
||||
.bus = gf100_bus_new,
|
||||
.devinit = gm200_devinit_new,
|
||||
.fb = gp100_fb_new,
|
||||
.fuse = gm107_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gm200_i2c_new,
|
||||
.ibus = gm200_ibus_new,
|
||||
.imem = nv50_instmem_new,
|
||||
.ltc = gp100_ltc_new,
|
||||
.mc = gp100_mc_new,
|
||||
.mmu = gf100_mmu_new,
|
||||
.secboot = gm200_secboot_new,
|
||||
.pci = gp100_pci_new,
|
||||
.timer = gk20a_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.ce[0] = gp100_ce_new,
|
||||
.ce[1] = gp100_ce_new,
|
||||
.ce[2] = gp100_ce_new,
|
||||
.ce[3] = gp100_ce_new,
|
||||
.ce[4] = gp100_ce_new,
|
||||
.ce[5] = gp100_ce_new,
|
||||
.dma = gf119_dma_new,
|
||||
.disp = gp100_disp_new,
|
||||
.fifo = gp100_fifo_new,
|
||||
.gr = gp100_gr_new,
|
||||
.sw = gf100_sw_new,
|
||||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
nv134_chipset = {
|
||||
.name = "GP104",
|
||||
.bar = gf100_bar_new,
|
||||
.bios = nvkm_bios_new,
|
||||
.bus = gf100_bus_new,
|
||||
.devinit = gm200_devinit_new,
|
||||
.fb = gp104_fb_new,
|
||||
.fuse = gm107_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gm200_i2c_new,
|
||||
.ibus = gm200_ibus_new,
|
||||
.imem = nv50_instmem_new,
|
||||
.ltc = gp100_ltc_new,
|
||||
.mc = gp100_mc_new,
|
||||
.mmu = gf100_mmu_new,
|
||||
.pci = gp100_pci_new,
|
||||
.timer = gk20a_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.ce[0] = gp104_ce_new,
|
||||
.ce[1] = gp104_ce_new,
|
||||
.ce[2] = gp104_ce_new,
|
||||
.ce[3] = gp104_ce_new,
|
||||
.disp = gp104_disp_new,
|
||||
.dma = gf119_dma_new,
|
||||
.fifo = gp100_fifo_new,
|
||||
};
|
||||
|
||||
static int
|
||||
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
|
||||
_(CE0 , device->ce[0] , device->ce[0]);
|
||||
_(CE1 , device->ce[1] , device->ce[1]);
|
||||
_(CE2 , device->ce[2] , device->ce[2]);
|
||||
_(CE3 , device->ce[3] , device->ce[3]);
|
||||
_(CE4 , device->ce[4] , device->ce[4]);
|
||||
_(CE5 , device->ce[5] , device->ce[5]);
|
||||
_(CIPHER , device->cipher , device->cipher);
|
||||
_(DISP , device->disp , &device->disp->engine);
|
||||
_(DMAOBJ , device->dma , &device->dma->engine);
|
||||
@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index)
|
||||
_(MSVLD , device->msvld , device->msvld);
|
||||
_(NVENC0 , device->nvenc[0], device->nvenc[0]);
|
||||
_(NVENC1 , device->nvenc[1], device->nvenc[1]);
|
||||
_(NVENC2 , device->nvenc[2], device->nvenc[2]);
|
||||
_(NVDEC , device->nvdec , device->nvdec);
|
||||
_(PM , device->pm , &device->pm->engine);
|
||||
_(SEC , device->sec , device->sec);
|
||||
@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
case 0x100: device->card_type = NV_E0; break;
|
||||
case 0x110:
|
||||
case 0x120: device->card_type = GM100; break;
|
||||
case 0x130: device->card_type = GP100; break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
case 0x124: device->chip = &nv124_chipset; break;
|
||||
case 0x126: device->chip = &nv126_chipset; break;
|
||||
case 0x12b: device->chip = &nv12b_chipset; break;
|
||||
case 0x130: device->chip = &nv130_chipset; break;
|
||||
case 0x134: device->chip = &nv134_chipset; break;
|
||||
default:
|
||||
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
|
||||
goto done;
|
||||
@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
_(NVKM_ENGINE_CE0 , ce[0]);
|
||||
_(NVKM_ENGINE_CE1 , ce[1]);
|
||||
_(NVKM_ENGINE_CE2 , ce[2]);
|
||||
_(NVKM_ENGINE_CE3 , ce[3]);
|
||||
_(NVKM_ENGINE_CE4 , ce[4]);
|
||||
_(NVKM_ENGINE_CE5 , ce[5]);
|
||||
_(NVKM_ENGINE_CIPHER , cipher);
|
||||
_(NVKM_ENGINE_DISP , disp);
|
||||
_(NVKM_ENGINE_DMAOBJ , dma);
|
||||
@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
_(NVKM_ENGINE_MSVLD , msvld);
|
||||
_(NVKM_ENGINE_NVENC0 , nvenc[0]);
|
||||
_(NVKM_ENGINE_NVENC1 , nvenc[1]);
|
||||
_(NVKM_ENGINE_NVENC2 , nvenc[2]);
|
||||
_(NVKM_ENGINE_NVDEC , nvdec);
|
||||
_(NVKM_ENGINE_PM , pm);
|
||||
_(NVKM_ENGINE_SEC , sec);
|
||||
|
@ -1614,7 +1614,6 @@ nvkm_device_pci_func = {
|
||||
.fini = nvkm_device_pci_fini,
|
||||
.resource_addr = nvkm_device_pci_resource_addr,
|
||||
.resource_size = nvkm_device_pci_resource_size,
|
||||
.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -191,13 +191,11 @@ static irqreturn_t
|
||||
nvkm_device_tegra_intr(int irq, void *arg)
|
||||
{
|
||||
struct nvkm_device_tegra *tdev = arg;
|
||||
struct nvkm_mc *mc = tdev->device.mc;
|
||||
struct nvkm_device *device = &tdev->device;
|
||||
bool handled = false;
|
||||
if (likely(mc)) {
|
||||
nvkm_mc_intr_unarm(mc);
|
||||
nvkm_mc_intr(mc, &handled);
|
||||
nvkm_mc_intr_rearm(mc);
|
||||
}
|
||||
nvkm_mc_intr_unarm(device);
|
||||
nvkm_mc_intr(device, &handled);
|
||||
nvkm_mc_intr_rearm(device);
|
||||
return handled ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
@ -247,7 +245,6 @@ nvkm_device_tegra_func = {
|
||||
.fini = nvkm_device_tegra_fini,
|
||||
.resource_addr = nvkm_device_tegra_resource_addr,
|
||||
.resource_size = nvkm_device_tegra_resource_size,
|
||||
.cpu_coherent = false,
|
||||
};
|
||||
|
||||
int
|
||||
@ -313,6 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
|
||||
goto remove;
|
||||
|
||||
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
|
||||
tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
|
||||
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
|
||||
NVKM_DEVICE_TEGRA, pdev->id, NULL,
|
||||
cfg, dbg, detect, mmio, subdev_mask,
|
||||
|
@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
|
||||
case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
|
||||
case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
|
||||
case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
|
||||
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
|
||||
default:
|
||||
args->v0.family = 0;
|
||||
break;
|
||||
|
@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o
|
||||
nvkm-y += nvkm/engine/disp/gk110.o
|
||||
nvkm-y += nvkm/engine/disp/gm107.o
|
||||
nvkm-y += nvkm/engine/disp/gm200.o
|
||||
nvkm-y += nvkm/engine/disp/gp100.o
|
||||
nvkm-y += nvkm/engine/disp/gp104.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/outp.o
|
||||
nvkm-y += nvkm/engine/disp/outpdp.o
|
||||
@ -45,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o
|
||||
nvkm-y += nvkm/engine/disp/rootgk110.o
|
||||
nvkm-y += nvkm/engine/disp/rootgm107.o
|
||||
nvkm-y += nvkm/engine/disp/rootgm200.o
|
||||
nvkm-y += nvkm/engine/disp/rootgp100.o
|
||||
nvkm-y += nvkm/engine/disp/rootgp104.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/channv50.o
|
||||
nvkm-y += nvkm/engine/disp/changf119.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/dmacnv50.o
|
||||
nvkm-y += nvkm/engine/disp/dmacgf119.o
|
||||
nvkm-y += nvkm/engine/disp/dmacgp104.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/basenv50.o
|
||||
nvkm-y += nvkm/engine/disp/baseg84.o
|
||||
@ -59,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
|
||||
nvkm-y += nvkm/engine/disp/basegf119.o
|
||||
nvkm-y += nvkm/engine/disp/basegk104.o
|
||||
nvkm-y += nvkm/engine/disp/basegk110.o
|
||||
nvkm-y += nvkm/engine/disp/basegp104.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/corenv50.o
|
||||
nvkm-y += nvkm/engine/disp/coreg84.o
|
||||
@ -70,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o
|
||||
nvkm-y += nvkm/engine/disp/coregk110.o
|
||||
nvkm-y += nvkm/engine/disp/coregm107.o
|
||||
nvkm-y += nvkm/engine/disp/coregm200.o
|
||||
nvkm-y += nvkm/engine/disp/coregp100.o
|
||||
nvkm-y += nvkm/engine/disp/coregp104.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/ovlynv50.o
|
||||
nvkm-y += nvkm/engine/disp/ovlyg84.o
|
||||
@ -77,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygt215.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygf119.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygk104.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygp104.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/piocnv50.o
|
||||
nvkm-y += nvkm/engine/disp/piocgf119.o
|
||||
|
38
drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
Normal file
38
drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "dmacnv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
const struct nv50_disp_dmac_oclass
|
||||
gp104_disp_base_oclass = {
|
||||
.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_base_new,
|
||||
.func = &gp104_disp_dmac_func,
|
||||
.mthd = &gf119_disp_base_chan_mthd,
|
||||
.chid = 1,
|
||||
};
|
@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
|
||||
extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
|
||||
|
||||
extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
|
||||
extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
|
||||
|
||||
struct nv50_disp_pioc_oclass {
|
||||
int (*ctor)(const struct nv50_disp_chan_func *,
|
||||
|
@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = {
|
||||
}
|
||||
};
|
||||
|
||||
static void
|
||||
void
|
||||
gf119_disp_core_fini(struct nv50_disp_dmac *chan)
|
||||
{
|
||||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
|
38
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
Normal file
38
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright 2015 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "dmacnv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
const struct nv50_disp_dmac_oclass
|
||||
gp100_disp_core_oclass = {
|
||||
.base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_core_new,
|
||||
.func = &gf119_disp_core_func,
|
||||
.mthd = &gk104_disp_core_chan_mthd,
|
||||
.chid = 0,
|
||||
};
|
78
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
Normal file
78
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "dmacnv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static int
|
||||
gp104_disp_core_init(struct nv50_disp_dmac *chan)
|
||||
{
|
||||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
|
||||
/* enable error reporting */
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
|
||||
|
||||
/* initialise channel for dma command submission */
|
||||
nvkm_wr32(device, 0x611494, chan->push);
|
||||
nvkm_wr32(device, 0x611498, 0x00010000);
|
||||
nvkm_wr32(device, 0x61149c, 0x00000001);
|
||||
nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000, 0x00000000);
|
||||
nvkm_wr32(device, 0x610490, 0x01000013);
|
||||
|
||||
/* wait for it to go inactive */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "core init: %08x\n",
|
||||
nvkm_rd32(device, 0x610490));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct nv50_disp_dmac_func
|
||||
gp104_disp_core_func = {
|
||||
.init = gp104_disp_core_init,
|
||||
.fini = gf119_disp_core_fini,
|
||||
.bind = gf119_disp_dmac_bind,
|
||||
};
|
||||
|
||||
const struct nv50_disp_dmac_oclass
|
||||
gp104_disp_core_oclass = {
|
||||
.base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_core_new,
|
||||
.func = &gp104_disp_core_func,
|
||||
.mthd = &gk104_disp_core_chan_mthd,
|
||||
.chid = 0,
|
||||
};
|
@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
|
||||
chan->base.chid << 27 | 0x00000001);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
|
||||
{
|
||||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
|
66
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
Normal file
66
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "dmacnv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
#include <subdev/timer.h>
|
||||
|
||||
static int
|
||||
gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
|
||||
{
|
||||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->base.chid;
|
||||
|
||||
/* enable error reporting */
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
|
||||
|
||||
/* initialise channel for dma command submission */
|
||||
nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
|
||||
nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
|
||||
nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
|
||||
nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
|
||||
nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
|
||||
|
||||
/* wait for it to go inactive */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d init: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610490 + (chid * 0x10)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct nv50_disp_dmac_func
|
||||
gp104_disp_dmac_func = {
|
||||
.init = gp104_disp_dmac_init,
|
||||
.fini = gf119_disp_dmac_fini,
|
||||
.bind = gf119_disp_dmac_bind,
|
||||
};
|
@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
|
||||
extern const struct nv50_disp_dmac_func nv50_disp_core_func;
|
||||
|
||||
extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
|
||||
void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
|
||||
int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
|
||||
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
|
||||
void gf119_disp_core_fini(struct nv50_disp_dmac *);
|
||||
|
||||
extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
|
||||
|
||||
struct nv50_disp_dmac_oclass {
|
||||
int (*ctor)(const struct nv50_disp_dmac_func *,
|
||||
@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
|
||||
|
||||
extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
|
||||
|
||||
extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
|
||||
|
||||
extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
|
||||
#endif
|
||||
|
@ -76,12 +76,10 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
|
||||
mask |= 0x0001 << or;
|
||||
mask |= 0x0100 << head;
|
||||
|
||||
|
||||
list_for_each_entry(outp, &disp->base.outp, head) {
|
||||
if ((outp->info.hasht & 0xff) == type &&
|
||||
(outp->info.hashm & mask) == mask) {
|
||||
*data = nvbios_outp_match(bios, outp->info.hasht,
|
||||
outp->info.hashm,
|
||||
*data = nvbios_outp_match(bios, outp->info.hasht, mask,
|
||||
ver, hdr, cnt, len, info);
|
||||
if (!*data)
|
||||
return NULL;
|
||||
@ -415,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work)
|
||||
nvkm_wr32(device, 0x6101d0, 0x80000000);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
gf119_disp_intr_error(struct nv50_disp *disp, int chid)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
@ -463,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp)
|
||||
u32 stat = nvkm_rd32(device, 0x61009c);
|
||||
int chid = ffs(stat) - 1;
|
||||
if (chid >= 0)
|
||||
gf119_disp_intr_error(disp, chid);
|
||||
disp->func->intr_error(disp, chid);
|
||||
intr &= ~0x00000002;
|
||||
}
|
||||
|
||||
@ -507,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
|
||||
static const struct nv50_disp_func
|
||||
gf119_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gf119_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gf119_disp_root_oclass,
|
||||
|
@ -27,6 +27,7 @@
|
||||
static const struct nv50_disp_func
|
||||
gk104_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gf119_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gk104_disp_root_oclass,
|
||||
|
@ -27,6 +27,7 @@
|
||||
static const struct nv50_disp_func
|
||||
gk110_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gf119_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gk110_disp_root_oclass,
|
||||
|
@ -27,6 +27,7 @@
|
||||
static const struct nv50_disp_func
|
||||
gm107_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gf119_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gm107_disp_root_oclass,
|
||||
|
@ -27,6 +27,7 @@
|
||||
static const struct nv50_disp_func
|
||||
gm200_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gf119_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gm200_disp_root_oclass,
|
||||
|
55
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
Normal file
55
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright 2015 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
static const struct nv50_disp_func
|
||||
gp100_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gf119_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gp100_disp_root_oclass,
|
||||
.head.vblank_init = gf119_disp_vblank_init,
|
||||
.head.vblank_fini = gf119_disp_vblank_fini,
|
||||
.head.scanoutpos = gf119_disp_root_scanoutpos,
|
||||
.outp.internal.crt = nv50_dac_output_new,
|
||||
.outp.internal.tmds = nv50_sor_output_new,
|
||||
.outp.internal.lvds = nv50_sor_output_new,
|
||||
.outp.internal.dp = gm200_sor_dp_new,
|
||||
.dac.nr = 3,
|
||||
.dac.power = nv50_dac_power,
|
||||
.dac.sense = nv50_dac_sense,
|
||||
.sor.nr = 4,
|
||||
.sor.power = nv50_sor_power,
|
||||
.sor.hda_eld = gf119_hda_eld,
|
||||
.sor.hdmi = gk104_hdmi_ctrl,
|
||||
.sor.magic = gm200_sor_magic,
|
||||
};
|
||||
|
||||
int
|
||||
gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
|
||||
{
|
||||
return gf119_disp_new_(&gp100_disp, device, index, pdisp);
|
||||
}
|
81
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
Normal file
81
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
static void
|
||||
gp104_disp_intr_error(struct nv50_disp *disp, int chid)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
|
||||
u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
|
||||
u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
|
||||
|
||||
nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
|
||||
chid, (mthd & 0x0000ffc), data, mthd, unkn);
|
||||
|
||||
if (chid < ARRAY_SIZE(disp->chan)) {
|
||||
switch (mthd & 0xffc) {
|
||||
case 0x0080:
|
||||
nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x61009c, (1 << chid));
|
||||
nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
|
||||
}
|
||||
|
||||
static const struct nv50_disp_func
|
||||
gp104_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gp104_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gp104_disp_root_oclass,
|
||||
.head.vblank_init = gf119_disp_vblank_init,
|
||||
.head.vblank_fini = gf119_disp_vblank_fini,
|
||||
.head.scanoutpos = gf119_disp_root_scanoutpos,
|
||||
.outp.internal.crt = nv50_dac_output_new,
|
||||
.outp.internal.tmds = nv50_sor_output_new,
|
||||
.outp.internal.lvds = nv50_sor_output_new,
|
||||
.outp.internal.dp = gm200_sor_dp_new,
|
||||
.dac.nr = 3,
|
||||
.dac.power = nv50_dac_power,
|
||||
.dac.sense = nv50_dac_sense,
|
||||
.sor.nr = 4,
|
||||
.sor.power = nv50_sor_power,
|
||||
.sor.hda_eld = gf119_hda_eld,
|
||||
.sor.hdmi = gk104_hdmi_ctrl,
|
||||
.sor.magic = gm200_sor_magic,
|
||||
};
|
||||
|
||||
int
|
||||
gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
|
||||
{
|
||||
return gf119_disp_new_(&gp104_disp, device, index, pdisp);
|
||||
}
|
@ -32,6 +32,7 @@
|
||||
#include <subdev/bios/init.h>
|
||||
#include <subdev/bios/pll.h>
|
||||
#include <subdev/devinit.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
static const struct nvkm_disp_oclass *
|
||||
nv50_disp_root_(struct nvkm_disp *base)
|
||||
@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
|
||||
list_for_each_entry(outp, &disp->base.outp, head) {
|
||||
if ((outp->info.hasht & 0xff) == type &&
|
||||
(outp->info.hashm & mask) == mask) {
|
||||
*data = nvbios_outp_match(bios, outp->info.hasht,
|
||||
outp->info.hashm,
|
||||
*data = nvbios_outp_match(bios, outp->info.hasht, mask,
|
||||
ver, hdr, cnt, len, info);
|
||||
if (!*data)
|
||||
return NULL;
|
||||
@ -426,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
|
||||
return outp;
|
||||
}
|
||||
|
||||
static bool
|
||||
nv50_disp_dptmds_war(struct nvkm_device *device)
|
||||
{
|
||||
switch (device->chipset) {
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
case 0x98:
|
||||
case 0xaa:
|
||||
case 0xac:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
|
||||
{
|
||||
struct nvkm_device *device = disp->base.engine.subdev.device;
|
||||
const u32 soff = __ffs(outp->or) * 0x800;
|
||||
if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
|
||||
switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
|
||||
case 0x00000000:
|
||||
case 0x00030000:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
|
||||
{
|
||||
struct nvkm_device *device = disp->base.engine.subdev.device;
|
||||
const u32 soff = __ffs(outp->or) * 0x800;
|
||||
|
||||
if (!nv50_disp_dptmds_war_needed(disp, outp))
|
||||
return;
|
||||
|
||||
nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
|
||||
nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
|
||||
nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
|
||||
|
||||
nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
|
||||
nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
|
||||
nvkm_usec(device, 400, NVKM_DELAY);
|
||||
nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
|
||||
nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
|
||||
|
||||
if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
|
||||
u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
|
||||
u32 pu_pc = seqctl & 0x0000000f;
|
||||
nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
|
||||
{
|
||||
struct nvkm_device *device = disp->base.engine.subdev.device;
|
||||
const u32 soff = __ffs(outp->or) * 0x800;
|
||||
u32 sorpwr;
|
||||
|
||||
if (!nv50_disp_dptmds_war_needed(disp, outp))
|
||||
return;
|
||||
|
||||
sorpwr = nvkm_rd32(device, 0x61c004 + soff);
|
||||
if (sorpwr & 0x00000001) {
|
||||
u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
|
||||
u32 pd_pc = (seqctl & 0x00000f00) >> 8;
|
||||
u32 pu_pc = seqctl & 0x0000000f;
|
||||
|
||||
nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
|
||||
|
||||
nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
|
||||
break;
|
||||
);
|
||||
nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
|
||||
nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
|
||||
break;
|
||||
);
|
||||
|
||||
nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
|
||||
nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
|
||||
nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
|
||||
|
||||
if (sorpwr & 0x00000001) {
|
||||
nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_disp_update_sppll1(struct nv50_disp *disp)
|
||||
{
|
||||
struct nvkm_device *device = disp->base.engine.subdev.device;
|
||||
bool used = false;
|
||||
int sor;
|
||||
|
||||
if (!nv50_disp_dptmds_war(device))
|
||||
return;
|
||||
|
||||
for (sor = 0; sor < disp->func->sor.nr; sor++) {
|
||||
u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
|
||||
switch (clksor & 0x03000000) {
|
||||
case 0x02000000:
|
||||
case 0x03000000:
|
||||
used = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (used)
|
||||
return;
|
||||
|
||||
nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
|
||||
{
|
||||
@ -679,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
|
||||
|
||||
nvkm_mask(device, hreg, 0x0000000f, hval);
|
||||
nvkm_mask(device, oreg, mask, oval);
|
||||
|
||||
nv50_disp_dptmds_war_2(disp, &outp->info);
|
||||
}
|
||||
|
||||
/* If programming a TMDS output on a SOR that can also be configured for
|
||||
@ -720,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
|
||||
|
||||
if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
|
||||
nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
|
||||
nv50_disp_dptmds_war_3(disp, &outp->info);
|
||||
}
|
||||
|
||||
void
|
||||
@ -767,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work)
|
||||
continue;
|
||||
nv50_disp_intr_unk40_0(disp, head);
|
||||
}
|
||||
nv50_disp_update_sppll1(disp);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x610030, 0x80000000);
|
||||
|
@ -68,6 +68,7 @@ struct nv50_disp_func_outp {
|
||||
|
||||
struct nv50_disp_func {
|
||||
void (*intr)(struct nv50_disp *);
|
||||
void (*intr_error)(struct nv50_disp *, int chid);
|
||||
|
||||
const struct nvkm_event_func *uevent;
|
||||
void (*super)(struct work_struct *);
|
||||
@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int);
|
||||
void gf119_disp_vblank_fini(struct nv50_disp *, int);
|
||||
void gf119_disp_intr(struct nv50_disp *);
|
||||
void gf119_disp_intr_supervisor(struct work_struct *);
|
||||
void gf119_disp_intr_error(struct nv50_disp *, int);
|
||||
#endif
|
||||
|
@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = {
|
||||
}
|
||||
};
|
||||
|
||||
static const struct nv50_disp_chan_mthd
|
||||
const struct nv50_disp_chan_mthd
|
||||
gk104_disp_ovly_chan_mthd = {
|
||||
.name = "Overlay",
|
||||
.addr = 0x001000,
|
||||
|
38
drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
Normal file
38
drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "dmacnv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
const struct nv50_disp_dmac_oclass
|
||||
gp104_disp_ovly_oclass = {
|
||||
.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_ovly_new,
|
||||
.func = &gp104_disp_dmac_func,
|
||||
.mthd = &gk104_disp_ovly_chan_mthd,
|
||||
.chid = 5,
|
||||
};
|
58
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
Normal file
58
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright 2015 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "rootnv50.h"
|
||||
#include "dmacnv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nv50_disp_root_func
|
||||
gp100_disp_root = {
|
||||
.init = gf119_disp_root_init,
|
||||
.fini = gf119_disp_root_fini,
|
||||
.dmac = {
|
||||
&gp100_disp_core_oclass,
|
||||
&gk110_disp_base_oclass,
|
||||
&gk104_disp_ovly_oclass,
|
||||
},
|
||||
.pioc = {
|
||||
&gk104_disp_oimm_oclass,
|
||||
&gk104_disp_curs_oclass,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
|
||||
void *data, u32 size, struct nvkm_object **pobject)
|
||||
{
|
||||
return nv50_disp_root_new_(&gp100_disp_root, disp, oclass,
|
||||
data, size, pobject);
|
||||
}
|
||||
|
||||
const struct nvkm_disp_oclass
|
||||
gp100_disp_root_oclass = {
|
||||
.base.oclass = GP100_DISP,
|
||||
.base.minver = -1,
|
||||
.base.maxver = -1,
|
||||
.ctor = gp100_disp_root_new,
|
||||
};
|
58
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
Normal file
58
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "rootnv50.h"
|
||||
#include "dmacnv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nv50_disp_root_func
|
||||
gp104_disp_root = {
|
||||
.init = gf119_disp_root_init,
|
||||
.fini = gf119_disp_root_fini,
|
||||
.dmac = {
|
||||
&gp104_disp_core_oclass,
|
||||
&gp104_disp_base_oclass,
|
||||
&gp104_disp_ovly_oclass,
|
||||
},
|
||||
.pioc = {
|
||||
&gk104_disp_oimm_oclass,
|
||||
&gk104_disp_curs_oclass,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
|
||||
void *data, u32 size, struct nvkm_object **pobject)
|
||||
{
|
||||
return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
|
||||
data, size, pobject);
|
||||
}
|
||||
|
||||
const struct nvkm_disp_oclass
|
||||
gp104_disp_root_oclass = {
|
||||
.base.oclass = GP104_DISP,
|
||||
.base.minver = -1,
|
||||
.base.maxver = -1,
|
||||
.ctor = gp104_disp_root_new,
|
||||
};
|
@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
|
||||
#endif
|
||||
|
@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o
|
||||
nvkm-y += nvkm/engine/fifo/gm107.o
|
||||
nvkm-y += nvkm/engine/fifo/gm200.o
|
||||
nvkm-y += nvkm/engine/fifo/gm20b.o
|
||||
nvkm-y += nvkm/engine/fifo/gp100.o
|
||||
|
||||
nvkm-y += nvkm/engine/fifo/chan.o
|
||||
nvkm-y += nvkm/engine/fifo/channv50.o
|
||||
@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifogk110.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifogm200.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifogp100.o
|
||||
|
@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
|
||||
extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
|
||||
extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
|
||||
extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
|
||||
extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
|
||||
#endif
|
||||
|
@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
|
||||
}
|
||||
|
||||
if (eu == NULL) {
|
||||
enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
|
||||
enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
|
||||
if (engidx < NVKM_SUBDEV_NR) {
|
||||
const char *src = nvkm_subdev_name[engidx];
|
||||
char *dst = en;
|
||||
@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
|
||||
struct gk104_fifo *fifo = gk104_fifo(base);
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_top *top = device->top;
|
||||
int engn, runl, pbid, ret, i, j;
|
||||
enum nvkm_devidx engidx;
|
||||
u32 *map;
|
||||
@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
|
||||
|
||||
/* Determine runlist configuration from topology device info. */
|
||||
i = 0;
|
||||
while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
|
||||
while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
|
||||
/* Determine which PBDMA handles requests for this engine. */
|
||||
for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
|
||||
if (map[j] & (1 << runl)) {
|
||||
@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
|
||||
}
|
||||
}
|
||||
|
||||
nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
|
||||
engn, runl, pbid);
|
||||
nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
|
||||
engn, runl, pbid, nvkm_subdev_name[engidx]);
|
||||
|
||||
fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
|
||||
fifo->engine[engn].runl = runl;
|
||||
|
67
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
Normal file
67
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gk104.h"
|
||||
#include "changk104.h"
|
||||
|
||||
static const struct nvkm_enum
|
||||
gp100_fifo_fault_engine[] = {
|
||||
{ 0x01, "DISPLAY" },
|
||||
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
|
||||
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
|
||||
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
|
||||
{ 0x06, "HOST0" },
|
||||
{ 0x07, "HOST1" },
|
||||
{ 0x08, "HOST2" },
|
||||
{ 0x09, "HOST3" },
|
||||
{ 0x0a, "HOST4" },
|
||||
{ 0x0b, "HOST5" },
|
||||
{ 0x0c, "HOST6" },
|
||||
{ 0x0d, "HOST7" },
|
||||
{ 0x0e, "HOST8" },
|
||||
{ 0x0f, "HOST9" },
|
||||
{ 0x10, "HOST10" },
|
||||
{ 0x13, "PERF" },
|
||||
{ 0x17, "PMU" },
|
||||
{ 0x18, "PTP" },
|
||||
{ 0x1f, "PHYSICAL" },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct gk104_fifo_func
|
||||
gp100_fifo = {
|
||||
.fault.engine = gp100_fifo_fault_engine,
|
||||
.fault.reason = gk104_fifo_fault_reason,
|
||||
.fault.hubclient = gk104_fifo_fault_hubclient,
|
||||
.fault.gpcclient = gk104_fifo_fault_gpcclient,
|
||||
.chan = {
|
||||
&gp100_fifo_gpfifo_oclass,
|
||||
NULL
|
||||
},
|
||||
};
|
||||
|
||||
int
|
||||
gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
|
||||
{
|
||||
return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
|
||||
}
|
34
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
Normal file
34
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "changk104.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
const struct nvkm_fifo_chan_oclass
|
||||
gp100_fifo_gpfifo_oclass = {
|
||||
.base.oclass = PASCAL_CHANNEL_GPFIFO_A,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = gk104_fifo_gpfifo_new,
|
||||
};
|
@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o
|
||||
nvkm-y += nvkm/engine/gr/gm107.o
|
||||
nvkm-y += nvkm/engine/gr/gm200.o
|
||||
nvkm-y += nvkm/engine/gr/gm20b.o
|
||||
nvkm-y += nvkm/engine/gr/gp100.o
|
||||
|
||||
nvkm-y += nvkm/engine/gr/ctxnv40.o
|
||||
nvkm-y += nvkm/engine/gr/ctxnv50.o
|
||||
@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o
|
||||
nvkm-y += nvkm/engine/gr/ctxgm107.o
|
||||
nvkm-y += nvkm/engine/gr/ctxgm200.o
|
||||
nvkm-y += nvkm/engine/gr/ctxgm20b.o
|
||||
nvkm-y += nvkm/engine/gr/ctxgp100.o
|
||||
|
@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
||||
const struct gf100_grctx_func *grctx = gr->func->grctx;
|
||||
u32 idle_timeout;
|
||||
|
||||
nvkm_mc_unk260(device->mc, 0);
|
||||
nvkm_mc_unk260(device, 0);
|
||||
|
||||
gf100_gr_mmio(gr, grctx->hub);
|
||||
gf100_gr_mmio(gr, grctx->gpc);
|
||||
@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
||||
gf100_gr_icmd(gr, grctx->icmd);
|
||||
nvkm_wr32(device, 0x404154, idle_timeout);
|
||||
gf100_gr_mthd(gr, grctx->mthd);
|
||||
nvkm_mc_unk260(device->mc, 1);
|
||||
nvkm_mc_unk260(device, 1);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
|
||||
|
||||
extern const struct gf100_grctx_func gm20b_grctx;
|
||||
|
||||
extern const struct gf100_grctx_func gp100_grctx;
|
||||
|
||||
/* context init value lists */
|
||||
|
||||
extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
|
||||
|
@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
||||
u32 idle_timeout;
|
||||
int i;
|
||||
|
||||
nvkm_mc_unk260(device->mc, 0);
|
||||
nvkm_mc_unk260(device, 0);
|
||||
|
||||
gf100_gr_mmio(gr, grctx->hub);
|
||||
gf100_gr_mmio(gr, grctx->gpc);
|
||||
@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
||||
gf100_gr_icmd(gr, grctx->icmd);
|
||||
nvkm_wr32(device, 0x404154, idle_timeout);
|
||||
gf100_gr_mthd(gr, grctx->mthd);
|
||||
nvkm_mc_unk260(device->mc, 1);
|
||||
nvkm_mc_unk260(device, 1);
|
||||
}
|
||||
|
||||
const struct gf100_grctx_func
|
||||
|
@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
||||
u32 idle_timeout;
|
||||
int i;
|
||||
|
||||
nvkm_mc_unk260(device->mc, 0);
|
||||
nvkm_mc_unk260(device, 0);
|
||||
|
||||
gf100_gr_mmio(gr, grctx->hub);
|
||||
gf100_gr_mmio(gr, grctx->gpc);
|
||||
@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
||||
gf100_gr_icmd(gr, grctx->icmd);
|
||||
nvkm_wr32(device, 0x404154, idle_timeout);
|
||||
gf100_gr_mthd(gr, grctx->mthd);
|
||||
nvkm_mc_unk260(device->mc, 1);
|
||||
nvkm_mc_unk260(device, 1);
|
||||
|
||||
nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
|
||||
nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
|
||||
|
179
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
Normal file
179
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
Normal file
@ -0,0 +1,179 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "ctxgf100.h"
|
||||
|
||||
#include <subdev/fb.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* PGRAPH context implementation
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
gp100_grctx_generate_pagepool(struct gf100_grctx *info)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
|
||||
mmio_refn(info, 0x40800c, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408010, 0x80000000);
|
||||
mmio_refn(info, 0x419004, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x419008, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gp100_grctx_generate_attrib(struct gf100_grctx *info)
|
||||
{
|
||||
struct gf100_gr *gr = info->gr;
|
||||
const struct gf100_grctx_func *grctx = gr->func->grctx;
|
||||
const u32 alpha = grctx->alpha_nr;
|
||||
const u32 attrib = grctx->attrib_nr;
|
||||
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
|
||||
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
|
||||
const u32 access = NV_MEM_ACCESS_RW;
|
||||
const int s = 12;
|
||||
const int b = mmio_vram(info, size, (1 << s), access);
|
||||
const int max_batches = 0xffff;
|
||||
u32 ao = 0;
|
||||
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
|
||||
int gpc, ppc, n = 0;
|
||||
|
||||
mmio_refn(info, 0x418810, 0x80000000, s, b);
|
||||
mmio_refn(info, 0x419848, 0x10000000, s, b);
|
||||
mmio_refn(info, 0x419c2c, 0x10000000, s, b);
|
||||
mmio_refn(info, 0x419b00, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
|
||||
mmio_wr32(info, 0x405830, attrib);
|
||||
mmio_wr32(info, 0x40585c, alpha);
|
||||
mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
|
||||
|
||||
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
|
||||
for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
|
||||
const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
|
||||
const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
|
||||
const u32 u = 0x418ea0 + (n * 0x04);
|
||||
const u32 o = PPC_UNIT(gpc, ppc, 0);
|
||||
if (!(gr->ppc_mask[gpc] & (1 << ppc)))
|
||||
continue;
|
||||
mmio_wr32(info, o + 0xc0, bs);
|
||||
mmio_wr32(info, o + 0xf4, bo);
|
||||
mmio_wr32(info, o + 0xf0, bs);
|
||||
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
|
||||
mmio_wr32(info, o + 0xe4, as);
|
||||
mmio_wr32(info, o + 0xf8, ao);
|
||||
ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
|
||||
mmio_wr32(info, u, bs);
|
||||
}
|
||||
}
|
||||
|
||||
mmio_wr32(info, 0x418eec, 0x00000000);
|
||||
mmio_wr32(info, 0x41befc, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gp100_grctx_generate_405b60(struct gf100_gr *gr)
|
||||
{
|
||||
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||
const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
|
||||
u32 dist[TPC_MAX / 4] = {};
|
||||
u32 gpcs[GPC_MAX * 2] = {};
|
||||
u8 tpcnr[GPC_MAX];
|
||||
int tpc, gpc, i;
|
||||
|
||||
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
|
||||
|
||||
/* won't result in the same distribution as the binary driver where
|
||||
* some of the gpcs have more tpcs than others, but this shall do
|
||||
* for the moment. the code for earlier gpus has this issue too.
|
||||
*/
|
||||
for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
|
||||
do {
|
||||
gpc = (gpc + 1) % gr->gpc_nr;
|
||||
} while(!tpcnr[gpc]);
|
||||
tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
|
||||
|
||||
dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
|
||||
gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
|
||||
}
|
||||
|
||||
for (i = 0; i < dist_nr; i++)
|
||||
nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
|
||||
for (i = 0; i < gr->gpc_nr * 2; i++)
|
||||
nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
|
||||
}
|
||||
|
||||
static void
|
||||
gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
||||
{
|
||||
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||
const struct gf100_grctx_func *grctx = gr->func->grctx;
|
||||
u32 idle_timeout, tmp;
|
||||
int i;
|
||||
|
||||
gf100_gr_mmio(gr, gr->fuc_sw_ctx);
|
||||
|
||||
idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
|
||||
|
||||
grctx->pagepool(info);
|
||||
grctx->bundle(info);
|
||||
grctx->attrib(info);
|
||||
grctx->unkn(gr);
|
||||
|
||||
gm200_grctx_generate_tpcid(gr);
|
||||
gf100_grctx_generate_r406028(gr);
|
||||
gk104_grctx_generate_r418bb8(gr);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
|
||||
nvkm_wr32(device, 0x406500, 0x00000000);
|
||||
|
||||
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
|
||||
|
||||
for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
|
||||
tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
|
||||
nvkm_wr32(device, 0x4041c4, tmp);
|
||||
|
||||
gp100_grctx_generate_405b60(gr);
|
||||
|
||||
gf100_gr_icmd(gr, gr->fuc_bundle);
|
||||
nvkm_wr32(device, 0x404154, idle_timeout);
|
||||
gf100_gr_mthd(gr, gr->fuc_method);
|
||||
}
|
||||
|
||||
const struct gf100_grctx_func
|
||||
gp100_grctx = {
|
||||
.main = gp100_grctx_generate_main,
|
||||
.unkn = gk104_grctx_generate_unkn,
|
||||
.bundle = gm107_grctx_generate_bundle,
|
||||
.bundle_size = 0x3000,
|
||||
.bundle_min_gpm_fifo_depth = 0x180,
|
||||
.bundle_token_limit = 0x1080,
|
||||
.pagepool = gp100_grctx_generate_pagepool,
|
||||
.pagepool_size = 0x20000,
|
||||
.attrib = gp100_grctx_generate_attrib,
|
||||
.attrib_nr_max = 0x660,
|
||||
.attrib_nr = 0x440,
|
||||
.alpha_nr_max = 0xc00,
|
||||
.alpha_nr = 0x800,
|
||||
};
|
@ -1457,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_secboot *sb = device->secboot;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
if (gr->firmware) {
|
||||
/* load fuc microcode */
|
||||
nvkm_mc_unk260(device->mc, 0);
|
||||
nvkm_mc_unk260(device, 0);
|
||||
|
||||
/* securely-managed falcons must be reset using secure boot */
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
|
||||
nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
|
||||
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
|
||||
else
|
||||
gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
|
||||
&gr->fuc409d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
|
||||
nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
|
||||
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
|
||||
else
|
||||
gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
|
||||
&gr->fuc41ad);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_mc_unk260(device->mc, 1);
|
||||
nvkm_mc_unk260(device, 1);
|
||||
|
||||
/* start both of them running */
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
@ -1576,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
||||
}
|
||||
|
||||
/* load HUB microcode */
|
||||
nvkm_mc_unk260(device->mc, 0);
|
||||
nvkm_mc_unk260(device, 0);
|
||||
nvkm_wr32(device, 0x4091c0, 0x01000000);
|
||||
for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
|
||||
nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
|
||||
@ -1599,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
||||
nvkm_wr32(device, 0x41a188, i >> 6);
|
||||
nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
|
||||
}
|
||||
nvkm_mc_unk260(device->mc, 1);
|
||||
nvkm_mc_unk260(device, 1);
|
||||
|
||||
/* load register lists */
|
||||
gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
|
||||
|
@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
|
||||
extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
|
||||
extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
|
||||
void gm107_gr_init_bios(struct gf100_gr *);
|
||||
|
||||
void gm200_gr_init_gpc_mmu(struct gf100_gr *);
|
||||
#endif
|
||||
|
@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr)
|
||||
return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
|
||||
{
|
||||
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||
|
171
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
Normal file
171
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ctxgf100.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* PGRAPH engine/subdev functions
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
|
||||
{
|
||||
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||
/*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
|
||||
const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
|
||||
nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
|
||||
nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
|
||||
}
|
||||
|
||||
static int
|
||||
gp100_gr_init(struct gf100_gr *gr)
|
||||
{
|
||||
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
|
||||
u32 data[TPC_MAX / 8] = {};
|
||||
u8 tpcnr[GPC_MAX];
|
||||
int gpc, tpc, rop;
|
||||
int i;
|
||||
|
||||
gr->func->init_gpc_mmu(gr);
|
||||
|
||||
gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
|
||||
|
||||
nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
|
||||
|
||||
memset(data, 0x00, sizeof(data));
|
||||
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
|
||||
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
|
||||
do {
|
||||
gpc = (gpc + 1) % gr->gpc_nr;
|
||||
} while (!tpcnr[gpc]);
|
||||
tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
|
||||
|
||||
data[i / 8] |= tpc << ((i % 8) * 4);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
|
||||
nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
|
||||
nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
|
||||
nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
|
||||
|
||||
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
|
||||
gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
|
||||
gr->tpc_total);
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
|
||||
nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
|
||||
nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
|
||||
|
||||
gr->func->init_rop_active_fbps(gr);
|
||||
|
||||
nvkm_wr32(device, 0x400500, 0x00010001);
|
||||
nvkm_wr32(device, 0x400100, 0xffffffff);
|
||||
nvkm_wr32(device, 0x40013c, 0xffffffff);
|
||||
nvkm_wr32(device, 0x400124, 0x00000002);
|
||||
nvkm_wr32(device, 0x409c24, 0x000f0002);
|
||||
nvkm_wr32(device, 0x405848, 0xc0000000);
|
||||
nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
|
||||
nvkm_wr32(device, 0x404000, 0xc0000000);
|
||||
nvkm_wr32(device, 0x404600, 0xc0000000);
|
||||
nvkm_wr32(device, 0x408030, 0xc0000000);
|
||||
nvkm_wr32(device, 0x404490, 0xc0000000);
|
||||
nvkm_wr32(device, 0x406018, 0xc0000000);
|
||||
nvkm_wr32(device, 0x407020, 0x40000000);
|
||||
nvkm_wr32(device, 0x405840, 0xc0000000);
|
||||
nvkm_wr32(device, 0x405844, 0x00ffffff);
|
||||
nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
|
||||
|
||||
nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
|
||||
nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
|
||||
|
||||
gr->func->init_ppc_exceptions(gr);
|
||||
|
||||
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
|
||||
for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
|
||||
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
|
||||
}
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
|
||||
nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
|
||||
}
|
||||
|
||||
for (rop = 0; rop < gr->rop_nr; rop++) {
|
||||
nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
|
||||
nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
|
||||
nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
|
||||
nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x400108, 0xffffffff);
|
||||
nvkm_wr32(device, 0x400138, 0xffffffff);
|
||||
nvkm_wr32(device, 0x400118, 0xffffffff);
|
||||
nvkm_wr32(device, 0x400130, 0xffffffff);
|
||||
nvkm_wr32(device, 0x40011c, 0xffffffff);
|
||||
nvkm_wr32(device, 0x400134, 0xffffffff);
|
||||
|
||||
gf100_gr_zbc_init(gr);
|
||||
|
||||
return gf100_gr_init_ctxctl(gr);
|
||||
}
|
||||
|
||||
static const struct gf100_gr_func
|
||||
gp100_gr = {
|
||||
.init = gp100_gr_init,
|
||||
.init_gpc_mmu = gm200_gr_init_gpc_mmu,
|
||||
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
|
||||
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
|
||||
.rops = gm200_gr_rops,
|
||||
.ppc_nr = 2,
|
||||
.grctx = &gp100_grctx,
|
||||
.sclass = {
|
||||
{ -1, -1, FERMI_TWOD_A },
|
||||
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
|
||||
{ -1, -1, PASCAL_A, &gf100_fermi },
|
||||
{ -1, -1, PASCAL_COMPUTE_A },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
||||
int
|
||||
gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
|
||||
{
|
||||
return gm200_gr_new_(&gp100_gr, device, index, pgr);
|
||||
}
|
@ -26,6 +26,49 @@
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/bmp.h>
|
||||
#include <subdev/bios/bit.h>
|
||||
#include <subdev/bios/image.h>
|
||||
|
||||
static bool
|
||||
nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
|
||||
{
|
||||
u32 p = *addr;
|
||||
|
||||
if (*addr > bios->image0_size && bios->imaged_addr) {
|
||||
*addr -= bios->image0_size;
|
||||
*addr += bios->imaged_addr;
|
||||
}
|
||||
|
||||
if (unlikely(*addr + size >= bios->size)) {
|
||||
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
u8
|
||||
nvbios_rd08(struct nvkm_bios *bios, u32 addr)
|
||||
{
|
||||
if (likely(nvbios_addr(bios, &addr, 1)))
|
||||
return bios->data[addr];
|
||||
return 0x00;
|
||||
}
|
||||
|
||||
u16
|
||||
nvbios_rd16(struct nvkm_bios *bios, u32 addr)
|
||||
{
|
||||
if (likely(nvbios_addr(bios, &addr, 2)))
|
||||
return get_unaligned_le16(&bios->data[addr]);
|
||||
return 0x0000;
|
||||
}
|
||||
|
||||
u32
|
||||
nvbios_rd32(struct nvkm_bios *bios, u32 addr)
|
||||
{
|
||||
if (likely(nvbios_addr(bios, &addr, 4)))
|
||||
return get_unaligned_le32(&bios->data[addr]);
|
||||
return 0x00000000;
|
||||
}
|
||||
|
||||
u8
|
||||
nvbios_checksum(const u8 *data, int size)
|
||||
@ -100,8 +143,9 @@ int
|
||||
nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
|
||||
{
|
||||
struct nvkm_bios *bios;
|
||||
struct nvbios_image image;
|
||||
struct bit_entry bit_i;
|
||||
int ret;
|
||||
int ret, idx = 0;
|
||||
|
||||
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Some tables have weird pointers that need adjustment before
|
||||
* they're dereferenced. I'm not entirely sure why...
|
||||
*/
|
||||
if (nvbios_image(bios, idx++, &image)) {
|
||||
bios->image0_size = image.size;
|
||||
while (nvbios_image(bios, idx++, &image)) {
|
||||
if (image.type == 0xe0) {
|
||||
bios->imaged_addr = image.base;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* detect type of vbios we're dealing with */
|
||||
bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
|
||||
"\xff\x7f""NV\0", 5);
|
||||
|
@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
|
||||
case 0x30:
|
||||
case 0x40:
|
||||
case 0x41:
|
||||
case 0x42:
|
||||
*hdr = nvbios_rd08(bios, data + 0x01);
|
||||
*len = nvbios_rd08(bios, data + 0x02);
|
||||
*cnt = nvbios_rd08(bios, data + 0x03);
|
||||
@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
|
||||
break;
|
||||
case 0x40:
|
||||
case 0x41:
|
||||
case 0x42:
|
||||
*hdr = nvbios_rd08(bios, data + 0x04);
|
||||
*cnt = 0;
|
||||
*len = 0;
|
||||
@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
|
||||
break;
|
||||
case 0x40:
|
||||
case 0x41:
|
||||
case 0x42:
|
||||
info->flags = nvbios_rd08(bios, data + 0x04);
|
||||
info->script[0] = nvbios_rd16(bios, data + 0x05);
|
||||
info->script[1] = nvbios_rd16(bios, data + 0x07);
|
||||
@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
|
||||
info->pe = nvbios_rd08(bios, data + 0x02);
|
||||
info->tx_pu = nvbios_rd08(bios, data + 0x03);
|
||||
break;
|
||||
case 0x42:
|
||||
info->dc = nvbios_rd08(bios, data + 0x00);
|
||||
info->pe = nvbios_rd08(bios, data + 0x01);
|
||||
info->tx_pu = nvbios_rd08(bios, data + 0x02);
|
||||
break;
|
||||
default:
|
||||
data = 0x0000;
|
||||
break;
|
||||
|
@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
|
||||
bool
|
||||
nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
|
||||
{
|
||||
u32 imaged_addr = bios->imaged_addr;
|
||||
memset(image, 0x00, sizeof(*image));
|
||||
bios->imaged_addr = 0;
|
||||
do {
|
||||
image->base += image->size;
|
||||
if (image->last || !nvbios_imagen(bios, image))
|
||||
if (image->last || !nvbios_imagen(bios, image)) {
|
||||
bios->imaged_addr = imaged_addr;
|
||||
return false;
|
||||
}
|
||||
} while(idx--);
|
||||
bios->imaged_addr = imaged_addr;
|
||||
return true;
|
||||
}
|
||||
|
@ -77,15 +77,17 @@ g84_pll_mapping[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static u16
|
||||
static u32
|
||||
pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
|
||||
{
|
||||
struct bit_entry bit_C;
|
||||
u16 data = 0x0000;
|
||||
u32 data = 0x0000;
|
||||
|
||||
if (!bit_entry(bios, 'C', &bit_C)) {
|
||||
if (bit_C.version == 1 && bit_C.length >= 10)
|
||||
data = nvbios_rd16(bios, bit_C.offset + 8);
|
||||
if (bit_C.version == 2 && bit_C.length >= 4)
|
||||
data = nvbios_rd32(bios, bit_C.offset + 0);
|
||||
if (data) {
|
||||
*ver = nvbios_rd08(bios, data + 0);
|
||||
*hdr = nvbios_rd08(bios, data + 1);
|
||||
@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios)
|
||||
}
|
||||
}
|
||||
|
||||
static u16
|
||||
static u32
|
||||
pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
|
||||
{
|
||||
struct pll_mapping *map;
|
||||
u8 hdr, cnt;
|
||||
u16 data;
|
||||
u32 data;
|
||||
|
||||
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
|
||||
if (data && *ver >= 0x30) {
|
||||
@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
|
||||
map = pll_map(bios);
|
||||
while (map && map->reg) {
|
||||
if (map->reg == reg && *ver >= 0x20) {
|
||||
u16 addr = (data += hdr);
|
||||
u32 addr = (data += hdr);
|
||||
*type = map->type;
|
||||
while (cnt--) {
|
||||
if (nvbios_rd32(bios, data) == map->reg)
|
||||
@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
|
||||
return 0x0000;
|
||||
}
|
||||
|
||||
static u16
|
||||
static u32
|
||||
pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
|
||||
{
|
||||
struct pll_mapping *map;
|
||||
u8 hdr, cnt;
|
||||
u16 data;
|
||||
u32 data;
|
||||
|
||||
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
|
||||
if (data && *ver >= 0x30) {
|
||||
@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
|
||||
map = pll_map(bios);
|
||||
while (map && map->reg) {
|
||||
if (map->type == type && *ver >= 0x20) {
|
||||
u16 addr = (data += hdr);
|
||||
u32 addr = (data += hdr);
|
||||
*reg = map->reg;
|
||||
while (cnt--) {
|
||||
if (nvbios_rd32(bios, data) == map->reg)
|
||||
@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u8 ver, len;
|
||||
u32 reg = type;
|
||||
u16 data;
|
||||
u32 data;
|
||||
|
||||
if (type > PLL_MAX) {
|
||||
reg = type;
|
||||
|
@ -26,21 +26,6 @@
|
||||
#include <subdev/bios/image.h>
|
||||
#include <subdev/bios/pmu.h>
|
||||
|
||||
static u32
|
||||
weirdo_pointer(struct nvkm_bios *bios, u32 data)
|
||||
{
|
||||
struct nvbios_image image;
|
||||
int idx = 0;
|
||||
if (nvbios_image(bios, idx++, &image)) {
|
||||
data -= image.size;
|
||||
while (nvbios_image(bios, idx++, &image)) {
|
||||
if (image.type == 0xe0)
|
||||
return image.base + data;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32
|
||||
nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
|
||||
{
|
||||
@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
|
||||
if (!bit_entry(bios, 'p', &bit_p)) {
|
||||
if (bit_p.version == 2 && bit_p.length >= 4)
|
||||
data = nvbios_rd32(bios, bit_p.offset + 0x00);
|
||||
if ((data = weirdo_pointer(bios, data))) {
|
||||
if (data) {
|
||||
*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
|
||||
*hdr = nvbios_rd08(bios, data + 0x01);
|
||||
*len = nvbios_rd08(bios, data + 0x02);
|
||||
@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
|
||||
u32 data;
|
||||
memset(info, 0x00, sizeof(*info));
|
||||
while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
|
||||
if ( pmuE.type == type &&
|
||||
(data = weirdo_pointer(bios, pmuE.data))) {
|
||||
if (pmuE.type == type && (data = pmuE.data)) {
|
||||
info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
|
||||
info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
|
||||
info->boot_addr = data + 0x30;
|
||||
|
@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
|
||||
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
|
||||
{
|
||||
struct bit_entry bit_P;
|
||||
u16 rammap = 0x0000;
|
||||
u32 rammap = 0x0000;
|
||||
|
||||
if (!bit_entry(bios, 'P', &bit_P)) {
|
||||
if (bit_P.version == 2)
|
||||
rammap = nvbios_rd16(bios, bit_P.offset + 4);
|
||||
rammap = nvbios_rd32(bios, bit_P.offset + 4);
|
||||
|
||||
if (rammap) {
|
||||
*ver = nvbios_rd08(bios, rammap + 0);
|
||||
@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
|
||||
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
|
||||
{
|
||||
u8 snr, ssz;
|
||||
u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
|
||||
u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
|
||||
if (rammap && idx < *cnt) {
|
||||
rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
|
||||
*hdr = *len;
|
||||
|
@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.subdev.device;
|
||||
u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
|
||||
u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
|
||||
u32 sclk, sctl, sdiv = 2;
|
||||
|
||||
switch (ssrc & 0x00000003) {
|
||||
case 0:
|
||||
@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
|
||||
case 2:
|
||||
return 100000;
|
||||
case 3:
|
||||
if (sctl & 0x80000000) {
|
||||
u32 sclk = read_vco(clk, dsrc + (doff * 4));
|
||||
u32 sdiv = (sctl & 0x0000003f) + 2;
|
||||
return (sclk * 2) / sdiv;
|
||||
sclk = read_vco(clk, dsrc + (doff * 4));
|
||||
|
||||
/* Memclk has doff of 0 despite its alt. location */
|
||||
if (doff <= 2) {
|
||||
sctl = nvkm_rd32(device, dctl + (doff * 4));
|
||||
|
||||
if (sctl & 0x80000000) {
|
||||
if (ssrc & 0x100)
|
||||
sctl >>= 8;
|
||||
|
||||
sdiv = (sctl & 0x3f) + 2;
|
||||
}
|
||||
}
|
||||
|
||||
return read_vco(clk, dsrc + (doff * 4));
|
||||
return (sclk * 2) / sdiv;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx)
|
||||
if (info->coef) {
|
||||
nvkm_wr32(device, addr + 0x04, info->coef);
|
||||
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
|
||||
|
||||
/* Test PLL lock */
|
||||
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
|
||||
nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
|
||||
break;
|
||||
);
|
||||
nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
|
||||
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
|
||||
|
||||
/* Enable sync mode */
|
||||
nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx)
|
||||
if (info->coef) {
|
||||
nvkm_wr32(device, addr + 0x04, info->coef);
|
||||
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
|
||||
|
||||
/* Test PLL lock */
|
||||
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
|
||||
nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
|
||||
break;
|
||||
);
|
||||
nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
|
||||
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
|
||||
|
||||
/* Enable sync mode */
|
||||
nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,69 +28,6 @@
|
||||
#include <core/tegra.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#define KHZ (1000)
|
||||
#define MHZ (KHZ * 1000)
|
||||
|
||||
#define MASK(w) ((1 << w) - 1)
|
||||
|
||||
#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
|
||||
#define GPCPLL_CFG_ENABLE BIT(0)
|
||||
#define GPCPLL_CFG_IDDQ BIT(1)
|
||||
#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
|
||||
#define GPCPLL_CFG_LOCK BIT(17)
|
||||
|
||||
#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
|
||||
#define GPCPLL_COEFF_M_SHIFT 0
|
||||
#define GPCPLL_COEFF_M_WIDTH 8
|
||||
#define GPCPLL_COEFF_N_SHIFT 8
|
||||
#define GPCPLL_COEFF_N_WIDTH 8
|
||||
#define GPCPLL_COEFF_P_SHIFT 16
|
||||
#define GPCPLL_COEFF_P_WIDTH 6
|
||||
|
||||
#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
|
||||
#define GPCPLL_CFG2_SETUP2_SHIFT 16
|
||||
#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
|
||||
|
||||
#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
|
||||
#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
|
||||
|
||||
#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
|
||||
#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
|
||||
#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
|
||||
#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
|
||||
#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
|
||||
#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
|
||||
#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
|
||||
|
||||
#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
|
||||
#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
|
||||
|
||||
#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
|
||||
#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
|
||||
#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
|
||||
#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
|
||||
#define GPC2CLK_OUT_VCODIV_WIDTH 6
|
||||
#define GPC2CLK_OUT_VCODIV_SHIFT 8
|
||||
#define GPC2CLK_OUT_VCODIV1 0
|
||||
#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
|
||||
GPC2CLK_OUT_VCODIV_SHIFT)
|
||||
#define GPC2CLK_OUT_BYPDIV_WIDTH 6
|
||||
#define GPC2CLK_OUT_BYPDIV_SHIFT 0
|
||||
#define GPC2CLK_OUT_BYPDIV31 0x3c
|
||||
#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
|
||||
GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
|
||||
| (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
|
||||
| (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
|
||||
#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
|
||||
GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
|
||||
| (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
|
||||
| (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
|
||||
|
||||
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
|
||||
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
|
||||
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
|
||||
(0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
|
||||
|
||||
static const u8 _pl_to_div[] = {
|
||||
/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
|
||||
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
|
||||
@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
|
||||
.min_pl = 1, .max_pl = 32,
|
||||
};
|
||||
|
||||
static void
|
||||
void
|
||||
gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.subdev.device;
|
||||
@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
|
||||
pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
|
||||
}
|
||||
|
||||
static u32
|
||||
gk20a_pllg_calc_rate(struct gk20a_clk *clk)
|
||||
void
|
||||
gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.subdev.device;
|
||||
u32 val;
|
||||
|
||||
val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
|
||||
val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
|
||||
val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
|
||||
nvkm_wr32(device, GPCPLL_COEFF, val);
|
||||
}
|
||||
|
||||
u32
|
||||
gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
|
||||
{
|
||||
u32 rate;
|
||||
u32 divider;
|
||||
|
||||
rate = clk->parent_rate * clk->pll.n;
|
||||
divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
|
||||
rate = clk->parent_rate * pll->n;
|
||||
divider = pll->m * clk->pl_to_div(pll->pl);
|
||||
|
||||
return rate / divider / 2;
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
|
||||
int
|
||||
gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
|
||||
struct gk20a_pll *pll)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.subdev;
|
||||
u32 target_clk_f, ref_clk_f, target_freq;
|
||||
@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
|
||||
target_clk_f = rate * 2 / KHZ;
|
||||
ref_clk_f = clk->parent_rate / KHZ;
|
||||
|
||||
max_vco_f = clk->params->max_vco;
|
||||
target_vco_f = target_clk_f + target_clk_f / 50;
|
||||
max_vco_f = max(clk->params->max_vco, target_vco_f);
|
||||
min_vco_f = clk->params->min_vco;
|
||||
best_m = clk->params->max_m;
|
||||
best_n = clk->params->min_n;
|
||||
best_pl = clk->params->min_pl;
|
||||
|
||||
target_vco_f = target_clk_f + target_clk_f / 50;
|
||||
if (max_vco_f < target_vco_f)
|
||||
max_vco_f = target_vco_f;
|
||||
|
||||
/* min_pl <= high_pl <= max_pl */
|
||||
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
|
||||
high_pl = min(high_pl, clk->params->max_pl);
|
||||
@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
|
||||
target_vco_f = target_clk_f * clk->pl_to_div(pl);
|
||||
|
||||
for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
|
||||
u32 u_f, vco_f;
|
||||
|
||||
u_f = ref_clk_f / m;
|
||||
u32 u_f = ref_clk_f / m;
|
||||
|
||||
if (u_f < clk->params->min_u)
|
||||
break;
|
||||
@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
|
||||
break;
|
||||
|
||||
for (; n <= n2; n++) {
|
||||
u32 vco_f;
|
||||
|
||||
if (n < clk->params->min_n)
|
||||
continue;
|
||||
if (n > clk->params->max_n)
|
||||
@ -247,16 +194,16 @@ found_match:
|
||||
"no best match for target @ %dMHz on gpc_pll",
|
||||
target_clk_f / KHZ);
|
||||
|
||||
clk->pll.m = best_m;
|
||||
clk->pll.n = best_n;
|
||||
clk->pll.pl = best_pl;
|
||||
pll->m = best_m;
|
||||
pll->n = best_n;
|
||||
pll->pl = best_pl;
|
||||
|
||||
target_freq = gk20a_pllg_calc_rate(clk);
|
||||
target_freq = gk20a_pllg_calc_rate(clk, pll);
|
||||
|
||||
nvkm_debug(subdev,
|
||||
"actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
|
||||
target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
|
||||
clk->pl_to_div(clk->pll.pl));
|
||||
"actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
|
||||
target_freq / KHZ, pll->m, pll->n, pll->pl,
|
||||
clk->pl_to_div(pll->pl));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 val;
|
||||
int ramp_timeout;
|
||||
struct gk20a_pll pll;
|
||||
int ret = 0;
|
||||
|
||||
/* get old coefficients */
|
||||
val = nvkm_rd32(device, GPCPLL_COEFF);
|
||||
gk20a_pllg_read_mnp(clk, &pll);
|
||||
/* do nothing if NDIV is the same */
|
||||
if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
|
||||
if (n == pll.n)
|
||||
return 0;
|
||||
|
||||
/* setup */
|
||||
nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
|
||||
0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
|
||||
nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
|
||||
0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
|
||||
|
||||
/* pll slowdown mode */
|
||||
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
|
||||
|
||||
/* new ndiv ready for ramp */
|
||||
val = nvkm_rd32(device, GPCPLL_COEFF);
|
||||
val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
|
||||
val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
|
||||
pll.n = n;
|
||||
udelay(1);
|
||||
nvkm_wr32(device, GPCPLL_COEFF, val);
|
||||
gk20a_pllg_write_mnp(clk, &pll);
|
||||
|
||||
/* dynamic ramp to new ndiv */
|
||||
val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
|
||||
val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
|
||||
udelay(1);
|
||||
nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
|
||||
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
|
||||
|
||||
for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
|
||||
udelay(1);
|
||||
val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
|
||||
if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
|
||||
break;
|
||||
}
|
||||
/* wait for ramping to complete */
|
||||
if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
|
||||
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
|
||||
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
/* exit slowdown mode */
|
||||
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
|
||||
@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
|
||||
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
|
||||
|
||||
if (ramp_timeout <= 0) {
|
||||
nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
gk20a_pllg_enable(struct gk20a_clk *clk)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.subdev.device;
|
||||
u32 val;
|
||||
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
|
||||
/* enable lock detection */
|
||||
val = nvkm_rd32(device, GPCPLL_CFG);
|
||||
if (val & GPCPLL_CFG_LOCK_DET_OFF) {
|
||||
val &= ~GPCPLL_CFG_LOCK_DET_OFF;
|
||||
nvkm_wr32(device, GPCPLL_CFG, val);
|
||||
}
|
||||
|
||||
/* wait for lock */
|
||||
if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
|
||||
GPCPLL_CFG_LOCK) < 0)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* switch to VCO mode */
|
||||
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
|
||||
BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.subdev.device;
|
||||
|
||||
/* put PLL in bypass before disabling it */
|
||||
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
|
||||
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
}
|
||||
|
||||
static int
|
||||
_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
|
||||
gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 val, cfg;
|
||||
struct gk20a_pll old_pll;
|
||||
u32 n_lo;
|
||||
struct gk20a_pll cur_pll;
|
||||
int ret;
|
||||
|
||||
/* get old coefficients */
|
||||
gk20a_pllg_read_mnp(clk, &old_pll);
|
||||
gk20a_pllg_read_mnp(clk, &cur_pll);
|
||||
|
||||
/* do NDIV slide if there is no change in M and PL */
|
||||
cfg = nvkm_rd32(device, GPCPLL_CFG);
|
||||
if (allow_slide && clk->pll.m == old_pll.m &&
|
||||
clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
|
||||
return gk20a_pllg_slide(clk, clk->pll.n);
|
||||
}
|
||||
/* split VCO-to-bypass jump in half by setting out divider 1:2 */
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
/* Intentional 2nd write to assure linear divider operation */
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
nvkm_rd32(device, GPC2CLK_OUT);
|
||||
udelay(2);
|
||||
|
||||
/* slide down to NDIV_LO */
|
||||
if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
|
||||
int ret;
|
||||
gk20a_pllg_disable(clk);
|
||||
|
||||
n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
|
||||
clk->parent_rate / KHZ);
|
||||
ret = gk20a_pllg_slide(clk, n_lo);
|
||||
gk20a_pllg_write_mnp(clk, pll);
|
||||
|
||||
ret = gk20a_pllg_enable(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* restore out divider 1:1 */
|
||||
udelay(2);
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
/* Intentional 2nd write to assure linear divider operation */
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
nvkm_rd32(device, GPC2CLK_OUT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
|
||||
{
|
||||
struct gk20a_pll cur_pll;
|
||||
int ret;
|
||||
|
||||
if (gk20a_pllg_is_enabled(clk)) {
|
||||
gk20a_pllg_read_mnp(clk, &cur_pll);
|
||||
|
||||
/* just do NDIV slide if there is no change to M and PL */
|
||||
if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
|
||||
return gk20a_pllg_slide(clk, pll->n);
|
||||
|
||||
/* slide down to current NDIV_LO */
|
||||
cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
|
||||
ret = gk20a_pllg_slide(clk, cur_pll.n);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
|
||||
/* put PLL in bypass before programming it */
|
||||
val = nvkm_rd32(device, SEL_VCO);
|
||||
val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
|
||||
udelay(2);
|
||||
nvkm_wr32(device, SEL_VCO, val);
|
||||
|
||||
/* get out from IDDQ */
|
||||
val = nvkm_rd32(device, GPCPLL_CFG);
|
||||
if (val & GPCPLL_CFG_IDDQ) {
|
||||
val &= ~GPCPLL_CFG_IDDQ;
|
||||
nvkm_wr32(device, GPCPLL_CFG, val);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
udelay(2);
|
||||
}
|
||||
|
||||
gk20a_pllg_disable(clk);
|
||||
|
||||
nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
|
||||
clk->pll.m, clk->pll.n, clk->pll.pl);
|
||||
|
||||
n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
|
||||
clk->parent_rate / KHZ);
|
||||
val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
|
||||
val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
|
||||
val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
|
||||
nvkm_wr32(device, GPCPLL_COEFF, val);
|
||||
|
||||
gk20a_pllg_enable(clk);
|
||||
|
||||
val = nvkm_rd32(device, GPCPLL_CFG);
|
||||
if (val & GPCPLL_CFG_LOCK_DET_OFF) {
|
||||
val &= ~GPCPLL_CFG_LOCK_DET_OFF;
|
||||
nvkm_wr32(device, GPCPLL_CFG, val);
|
||||
}
|
||||
|
||||
if (nvkm_usec(device, 300,
|
||||
if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
|
||||
break;
|
||||
) < 0)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* switch to VCO mode */
|
||||
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
|
||||
BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
|
||||
|
||||
/* restore out divider 1:1 */
|
||||
val = nvkm_rd32(device, GPC2CLK_OUT);
|
||||
if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
|
||||
(GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
|
||||
val &= ~GPC2CLK_OUT_VCODIV_MASK;
|
||||
val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
|
||||
udelay(2);
|
||||
nvkm_wr32(device, GPC2CLK_OUT, val);
|
||||
/* Intentional 2nd write to assure linear divider operation */
|
||||
nvkm_wr32(device, GPC2CLK_OUT, val);
|
||||
nvkm_rd32(device, GPC2CLK_OUT);
|
||||
}
|
||||
/* program MNP with the new clock parameters and new NDIV_LO */
|
||||
cur_pll = *pll;
|
||||
cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
|
||||
ret = gk20a_pllg_program_mnp(clk, &cur_pll);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* slide up to new NDIV */
|
||||
return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_pllg_program_mnp(struct gk20a_clk *clk)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = _gk20a_pllg_program_mnp(clk, true);
|
||||
if (err)
|
||||
err = _gk20a_pllg_program_mnp(clk, false);
|
||||
|
||||
return err;
|
||||
return gk20a_pllg_slide(clk, pll->n);
|
||||
}
|
||||
|
||||
static struct nvkm_pstate
|
||||
@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
|
||||
struct gk20a_clk *clk = gk20a_clk(base);
|
||||
struct nvkm_subdev *subdev = &clk->base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct gk20a_pll pll;
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
return device->crystal;
|
||||
case nv_clk_src_gpc:
|
||||
gk20a_pllg_read_mnp(clk, &clk->pll);
|
||||
return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
|
||||
gk20a_pllg_read_mnp(clk, &pll);
|
||||
return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
|
||||
default:
|
||||
nvkm_error(subdev, "invalid clock source %d\n", src);
|
||||
return -EINVAL;
|
||||
@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
|
||||
struct gk20a_clk *clk = gk20a_clk(base);
|
||||
|
||||
return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
|
||||
GK20A_CLK_GPC_MDIV);
|
||||
GK20A_CLK_GPC_MDIV, &clk->pll);
|
||||
}
|
||||
|
||||
int
|
||||
gk20a_clk_prog(struct nvkm_clk *base)
|
||||
{
|
||||
struct gk20a_clk *clk = gk20a_clk(base);
|
||||
int ret;
|
||||
|
||||
return gk20a_pllg_program_mnp(clk);
|
||||
ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
|
||||
if (ret)
|
||||
ret = gk20a_pllg_program_mnp(clk, &clk->pll);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base)
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
gk20a_clk_setup_slide(struct gk20a_clk *clk)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 step_a, step_b;
|
||||
|
||||
switch (clk->parent_rate) {
|
||||
case 12000000:
|
||||
case 12800000:
|
||||
case 13000000:
|
||||
step_a = 0x2b;
|
||||
step_b = 0x0b;
|
||||
break;
|
||||
case 19200000:
|
||||
step_a = 0x12;
|
||||
step_b = 0x08;
|
||||
break;
|
||||
case 38400000:
|
||||
step_a = 0x04;
|
||||
step_b = 0x05;
|
||||
break;
|
||||
default:
|
||||
nvkm_error(subdev, "invalid parent clock rate %u KHz",
|
||||
clk->parent_rate / KHZ);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
|
||||
step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
|
||||
nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
|
||||
step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
gk20a_clk_fini(struct nvkm_clk *base)
|
||||
{
|
||||
struct nvkm_device *device = base->subdev.device;
|
||||
struct gk20a_clk *clk = gk20a_clk(base);
|
||||
u32 val;
|
||||
|
||||
/* slide to VCO min */
|
||||
val = nvkm_rd32(device, GPCPLL_CFG);
|
||||
if (val & GPCPLL_CFG_ENABLE) {
|
||||
if (gk20a_pllg_is_enabled(clk)) {
|
||||
struct gk20a_pll pll;
|
||||
u32 n_lo;
|
||||
|
||||
gk20a_pllg_read_mnp(clk, &pll);
|
||||
n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
|
||||
clk->parent_rate / KHZ);
|
||||
n_lo = gk20a_pllg_n_lo(clk, &pll);
|
||||
gk20a_pllg_slide(clk, n_lo);
|
||||
}
|
||||
|
||||
/* put PLL in bypass before disabling it */
|
||||
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
|
||||
|
||||
gk20a_pllg_disable(clk);
|
||||
|
||||
/* set IDDQ */
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base)
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int ret;
|
||||
|
||||
/* get out from IDDQ */
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
udelay(5);
|
||||
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
|
||||
GPC2CLK_OUT_INIT_VAL);
|
||||
|
||||
ret = gk20a_clk_setup_slide(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Start with lowest frequency */
|
||||
base->func->calc(base, &base->func->pstates[0].base);
|
||||
ret = base->func->prog(&clk->base);
|
||||
@ -646,7 +610,7 @@ gk20a_clk = {
|
||||
};
|
||||
|
||||
int
|
||||
_gk20a_clk_ctor(struct nvkm_device *device, int index,
|
||||
gk20a_clk_ctor(struct nvkm_device *device, int index,
|
||||
const struct nvkm_clk_func *func,
|
||||
const struct gk20a_clk_pllg_params *params,
|
||||
struct gk20a_clk *clk)
|
||||
@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
|
||||
return -ENOMEM;
|
||||
*pclk = &clk->base;
|
||||
|
||||
ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
|
||||
ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
|
||||
clk);
|
||||
|
||||
clk->pl_to_div = pl_to_div;
|
||||
|
@ -24,9 +24,79 @@
|
||||
#ifndef __NVKM_CLK_GK20A_H__
|
||||
#define __NVKM_CLK_GK20A_H__
|
||||
|
||||
#define KHZ (1000)
|
||||
#define MHZ (KHZ * 1000)
|
||||
|
||||
#define MASK(w) ((1 << (w)) - 1)
|
||||
|
||||
#define GK20A_CLK_GPC_MDIV 1000
|
||||
|
||||
#define SYS_GPCPLL_CFG_BASE 0x00137000
|
||||
#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
|
||||
#define GPCPLL_CFG_ENABLE BIT(0)
|
||||
#define GPCPLL_CFG_IDDQ BIT(1)
|
||||
#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
|
||||
#define GPCPLL_CFG_LOCK BIT(17)
|
||||
|
||||
#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
|
||||
#define GPCPLL_CFG2_SETUP2_SHIFT 16
|
||||
#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
|
||||
|
||||
#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
|
||||
#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0
|
||||
#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9
|
||||
#define GPCPLL_CFG3_VCO_CTRL_MASK \
|
||||
(MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT)
|
||||
#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
|
||||
#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8
|
||||
|
||||
#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
|
||||
#define GPCPLL_COEFF_M_SHIFT 0
|
||||
#define GPCPLL_COEFF_M_WIDTH 8
|
||||
#define GPCPLL_COEFF_N_SHIFT 8
|
||||
#define GPCPLL_COEFF_N_WIDTH 8
|
||||
#define GPCPLL_COEFF_N_MASK \
|
||||
(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT)
|
||||
#define GPCPLL_COEFF_P_SHIFT 16
|
||||
#define GPCPLL_COEFF_P_WIDTH 6
|
||||
|
||||
#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
|
||||
#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
|
||||
#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
|
||||
#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
|
||||
#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
|
||||
#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
|
||||
|
||||
#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800
|
||||
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0)
|
||||
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
|
||||
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
|
||||
(0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
|
||||
|
||||
#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
|
||||
#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
|
||||
|
||||
#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
|
||||
#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
|
||||
#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
|
||||
#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
|
||||
#define GPC2CLK_OUT_VCODIV_WIDTH 6
|
||||
#define GPC2CLK_OUT_VCODIV_SHIFT 8
|
||||
#define GPC2CLK_OUT_VCODIV1 0
|
||||
#define GPC2CLK_OUT_VCODIV2 2
|
||||
#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
|
||||
GPC2CLK_OUT_VCODIV_SHIFT)
|
||||
#define GPC2CLK_OUT_BYPDIV_WIDTH 6
|
||||
#define GPC2CLK_OUT_BYPDIV_SHIFT 0
|
||||
#define GPC2CLK_OUT_BYPDIV31 0x3c
|
||||
#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
|
||||
GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
|
||||
| (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
|
||||
| (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
|
||||
#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
|
||||
GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
|
||||
| (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
|
||||
| (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
|
||||
|
||||
/* All frequencies in Khz */
|
||||
struct gk20a_clk_pllg_params {
|
||||
@ -54,7 +124,29 @@ struct gk20a_clk {
|
||||
};
|
||||
#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
|
||||
|
||||
int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
|
||||
u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *);
|
||||
int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *);
|
||||
void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *);
|
||||
void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *);
|
||||
|
||||
static inline bool
|
||||
gk20a_pllg_is_enabled(struct gk20a_clk *clk)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.subdev.device;
|
||||
u32 val;
|
||||
|
||||
val = nvkm_rd32(device, GPCPLL_CFG);
|
||||
return val & GPCPLL_CFG_ENABLE;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
|
||||
{
|
||||
return DIV_ROUND_UP(pll->m * clk->params->min_vco,
|
||||
clk->parent_rate / KHZ);
|
||||
}
|
||||
|
||||
int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
|
||||
const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
|
||||
void gk20a_clk_fini(struct nvkm_clk *);
|
||||
int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
|
||||
@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
|
||||
int gk20a_clk_prog(struct nvkm_clk *);
|
||||
void gk20a_clk_tidy(struct nvkm_clk *);
|
||||
|
||||
int gk20a_clk_setup_slide(struct gk20a_clk *);
|
||||
|
||||
#endif
|
||||
|
@ -21,20 +21,123 @@
|
||||
*/
|
||||
|
||||
#include <subdev/clk.h>
|
||||
#include <subdev/volt.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <core/device.h>
|
||||
#include <core/tegra.h>
|
||||
|
||||
#include "priv.h"
|
||||
#include "gk20a.h"
|
||||
|
||||
#define KHZ (1000)
|
||||
#define MHZ (KHZ * 1000)
|
||||
|
||||
#define MASK(w) ((1 << w) - 1)
|
||||
#define GPCPLL_CFG_SYNC_MODE BIT(2)
|
||||
|
||||
#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
|
||||
#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
|
||||
#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
|
||||
|
||||
#define GPCPLL_CFG2_SDM_DIN_SHIFT 0
|
||||
#define GPCPLL_CFG2_SDM_DIN_WIDTH 8
|
||||
#define GPCPLL_CFG2_SDM_DIN_MASK \
|
||||
(MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
|
||||
#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8
|
||||
#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15
|
||||
#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \
|
||||
(MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
|
||||
#define GPCPLL_CFG2_SETUP2_SHIFT 16
|
||||
#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
|
||||
|
||||
#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10)
|
||||
#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0
|
||||
#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7
|
||||
#define GPCPLL_DVFS0_DFS_COEFF_MASK \
|
||||
(MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
|
||||
#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
|
||||
#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
|
||||
#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \
|
||||
(MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
|
||||
|
||||
#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14)
|
||||
#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0
|
||||
#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7
|
||||
#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7
|
||||
#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1
|
||||
#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8
|
||||
#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7
|
||||
#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15
|
||||
#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1
|
||||
#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16
|
||||
#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12
|
||||
#define GPCPLL_DVFS1_EN_SDM_SHIFT 28
|
||||
#define GPCPLL_DVFS1_EN_SDM_WIDTH 1
|
||||
#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28)
|
||||
#define GPCPLL_DVFS1_EN_DFS_SHIFT 29
|
||||
#define GPCPLL_DVFS1_EN_DFS_WIDTH 1
|
||||
#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29)
|
||||
#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30
|
||||
#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1
|
||||
#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30)
|
||||
#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31
|
||||
#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1
|
||||
#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31)
|
||||
|
||||
#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
|
||||
#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16)
|
||||
|
||||
#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24
|
||||
#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7
|
||||
|
||||
#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
|
||||
#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
|
||||
|
||||
struct gm20b_clk_dvfs_params {
|
||||
s32 coeff_slope;
|
||||
s32 coeff_offs;
|
||||
u32 vco_ctrl;
|
||||
};
|
||||
|
||||
static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
|
||||
.coeff_slope = -165230,
|
||||
.coeff_offs = 214007,
|
||||
.vco_ctrl = 0x7 << 3,
|
||||
};
|
||||
|
||||
/*
|
||||
* base.n is now the *integer* part of the N factor.
|
||||
* sdm_din contains n's decimal part.
|
||||
*/
|
||||
struct gm20b_pll {
|
||||
struct gk20a_pll base;
|
||||
u32 sdm_din;
|
||||
};
|
||||
|
||||
struct gm20b_clk_dvfs {
|
||||
u32 dfs_coeff;
|
||||
s32 dfs_det_max;
|
||||
s32 dfs_ext_cal;
|
||||
};
|
||||
|
||||
struct gm20b_clk {
|
||||
/* currently applied parameters */
|
||||
struct gk20a_clk base;
|
||||
struct gm20b_clk_dvfs dvfs;
|
||||
u32 uv;
|
||||
|
||||
/* new parameters to apply */
|
||||
struct gk20a_pll new_pll;
|
||||
struct gm20b_clk_dvfs new_dvfs;
|
||||
u32 new_uv;
|
||||
|
||||
const struct gm20b_clk_dvfs_params *dvfs_params;
|
||||
|
||||
/* fused parameters */
|
||||
s32 uvdet_slope;
|
||||
s32 uvdet_offs;
|
||||
|
||||
/* safe frequency we can use at minimum voltage */
|
||||
u32 safe_fmax_vmin;
|
||||
};
|
||||
#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
|
||||
|
||||
static u32 pl_to_div(u32 pl)
|
||||
{
|
||||
return pl;
|
||||
@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
|
||||
.min_pl = 1, .max_pl = 31,
|
||||
};
|
||||
|
||||
static void
|
||||
gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 val;
|
||||
|
||||
gk20a_pllg_read_mnp(&clk->base, &pll->base);
|
||||
val = nvkm_rd32(device, GPCPLL_CFG2);
|
||||
pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
|
||||
MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
|
||||
}
|
||||
|
||||
static void
|
||||
gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.base.subdev.device;
|
||||
|
||||
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
|
||||
pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
|
||||
gk20a_pllg_write_mnp(&clk->base, &pll->base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine DFS_COEFF for the requested voltage. Always select external
|
||||
* calibration override equal to the voltage, and set maximum detection
|
||||
* limit "0" (to make sure that PLL output remains under F/V curve when
|
||||
* voltage increases).
|
||||
*/
|
||||
static void
|
||||
gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
|
||||
struct gm20b_clk_dvfs *dvfs)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
|
||||
u32 coeff;
|
||||
/* Work with mv as uv would likely trigger an overflow */
|
||||
s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
|
||||
|
||||
/* coeff = slope * voltage + offset */
|
||||
coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
|
||||
coeff = DIV_ROUND_CLOSEST(coeff, 1000);
|
||||
dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
|
||||
|
||||
dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
|
||||
clk->uvdet_slope);
|
||||
/* should never happen */
|
||||
if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
|
||||
nvkm_error(subdev, "dfs_ext_cal overflow!\n");
|
||||
|
||||
dvfs->dfs_det_max = 0;
|
||||
|
||||
nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
|
||||
__func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
|
||||
dvfs->dfs_det_max);
|
||||
}
|
||||
|
||||
/*
|
||||
* Solve equation for integer and fractional part of the effective NDIV:
|
||||
*
|
||||
* n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
|
||||
* (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
|
||||
*
|
||||
* The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
|
||||
*/
|
||||
static void
|
||||
gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
const struct gk20a_clk_pllg_params *p = clk->base.params;
|
||||
u32 n;
|
||||
s32 det_delta;
|
||||
u32 rem, rem_range;
|
||||
|
||||
/* calculate current ext_cal and subtract previous one */
|
||||
det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
|
||||
clk->uvdet_slope);
|
||||
det_delta -= clk->dvfs.dfs_ext_cal;
|
||||
det_delta = min(det_delta, clk->dvfs.dfs_det_max);
|
||||
det_delta *= clk->dvfs.dfs_coeff;
|
||||
|
||||
/* integer part of n */
|
||||
n = (n_eff << DFS_DET_RANGE) - det_delta;
|
||||
/* should never happen! */
|
||||
if (n <= 0) {
|
||||
nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
|
||||
n = 1 << DFS_DET_RANGE;
|
||||
}
|
||||
if (n >> DFS_DET_RANGE > p->max_n) {
|
||||
nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
|
||||
n = p->max_n << DFS_DET_RANGE;
|
||||
}
|
||||
*n_int = n >> DFS_DET_RANGE;
|
||||
|
||||
/* fractional part of n */
|
||||
rem = ((u32)n) & MASK(DFS_DET_RANGE);
|
||||
rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
|
||||
/* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
|
||||
rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
|
||||
/* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
|
||||
*sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
|
||||
|
||||
nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
|
||||
n_eff, *n_int, *sdm_din);
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct gm20b_pll pll;
|
||||
u32 n_int, sdm_din;
|
||||
int ret = 0;
|
||||
|
||||
/* calculate the new n_int/sdm_din for this n/uv */
|
||||
gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
|
||||
|
||||
/* get old coefficients */
|
||||
gm20b_pllg_read_mnp(clk, &pll);
|
||||
/* do nothing if NDIV is the same */
|
||||
if (n_int == pll.base.n && sdm_din == pll.sdm_din)
|
||||
return 0;
|
||||
|
||||
/* pll slowdown mode */
|
||||
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
|
||||
|
||||
/* new ndiv ready for ramp */
|
||||
/* in DVFS mode SDM is updated via "new" field */
|
||||
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
|
||||
sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
|
||||
pll.base.n = n_int;
|
||||
udelay(1);
|
||||
gk20a_pllg_write_mnp(&clk->base, &pll.base);
|
||||
|
||||
/* dynamic ramp to new ndiv */
|
||||
udelay(1);
|
||||
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
|
||||
|
||||
/* wait for ramping to complete */
|
||||
if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
|
||||
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
|
||||
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
/* in DVFS mode complete SDM update */
|
||||
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
|
||||
sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
|
||||
|
||||
/* exit slowdown mode */
|
||||
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
|
||||
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_pllg_enable(struct gm20b_clk *clk)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.base.subdev.device;
|
||||
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
|
||||
/* In DVFS mode lock cannot be used - so just delay */
|
||||
udelay(40);
|
||||
|
||||
/* set SYNC_MODE for glitchless switch out of bypass */
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
|
||||
GPCPLL_CFG_SYNC_MODE);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
|
||||
/* switch to VCO mode */
|
||||
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
|
||||
BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gm20b_pllg_disable(struct gm20b_clk *clk)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.base.subdev.device;
|
||||
|
||||
/* put PLL in bypass before disabling it */
|
||||
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
|
||||
|
||||
/* clear SYNC_MODE before disabling PLL */
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
|
||||
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct gm20b_pll cur_pll;
|
||||
u32 n_int, sdm_din;
|
||||
/* if we only change pdiv, we can do a glitchless transition */
|
||||
bool pdiv_only;
|
||||
int ret;
|
||||
|
||||
gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
|
||||
gm20b_pllg_read_mnp(clk, &cur_pll);
|
||||
pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
|
||||
cur_pll.base.m == pll->m;
|
||||
|
||||
/* need full sequence if clock not enabled yet */
|
||||
if (!gk20a_pllg_is_enabled(&clk->base))
|
||||
pdiv_only = false;
|
||||
|
||||
/* split VCO-to-bypass jump in half by setting out divider 1:2 */
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
/* Intentional 2nd write to assure linear divider operation */
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
nvkm_rd32(device, GPC2CLK_OUT);
|
||||
udelay(2);
|
||||
|
||||
if (pdiv_only) {
|
||||
u32 old = cur_pll.base.pl;
|
||||
u32 new = pll->pl;
|
||||
|
||||
/*
|
||||
* we can do a glitchless transition only if the old and new PL
|
||||
* parameters share at least one bit set to 1. If this is not
|
||||
* the case, calculate and program an interim PL that will allow
|
||||
* us to respect that rule.
|
||||
*/
|
||||
if ((old & new) == 0) {
|
||||
cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
|
||||
new | BIT(ffs(old) - 1));
|
||||
gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
|
||||
}
|
||||
|
||||
cur_pll.base.pl = new;
|
||||
gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
|
||||
} else {
|
||||
/* disable before programming if more than pdiv changes */
|
||||
gm20b_pllg_disable(clk);
|
||||
|
||||
cur_pll.base = *pll;
|
||||
cur_pll.base.n = n_int;
|
||||
cur_pll.sdm_din = sdm_din;
|
||||
gm20b_pllg_write_mnp(clk, &cur_pll);
|
||||
|
||||
ret = gm20b_pllg_enable(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* restore out divider 1:1 */
|
||||
udelay(2);
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
/* Intentional 2nd write to assure linear divider operation */
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
nvkm_rd32(device, GPC2CLK_OUT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
|
||||
{
|
||||
struct gk20a_pll cur_pll;
|
||||
int ret;
|
||||
|
||||
if (gk20a_pllg_is_enabled(&clk->base)) {
|
||||
gk20a_pllg_read_mnp(&clk->base, &cur_pll);
|
||||
|
||||
/* just do NDIV slide if there is no change to M and PL */
|
||||
if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
|
||||
return gm20b_pllg_slide(clk, pll->n);
|
||||
|
||||
/* slide down to current NDIV_LO */
|
||||
cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
|
||||
ret = gm20b_pllg_slide(clk, cur_pll.n);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* program MNP with the new clock parameters and new NDIV_LO */
|
||||
cur_pll = *pll;
|
||||
cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
|
||||
ret = gm20b_pllg_program_mnp(clk, &cur_pll);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* slide up to new NDIV */
|
||||
return gm20b_pllg_slide(clk, pll->n);
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct gm20b_clk *clk = gm20b_clk(base);
|
||||
struct nvkm_subdev *subdev = &base->subdev;
|
||||
struct nvkm_volt *volt = base->subdev.device->volt;
|
||||
int ret;
|
||||
|
||||
ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
|
||||
GK20A_CLK_GPC_MDIV, &clk->new_pll);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clk->new_uv = volt->vid[cstate->voltage].uv;
|
||||
gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
|
||||
|
||||
nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute PLL parameters that are always safe for the current voltage
|
||||
*/
|
||||
static void
|
||||
gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
|
||||
{
|
||||
u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
|
||||
u32 parent_rate = clk->base.parent_rate / KHZ;
|
||||
u32 nmin, nsafe;
|
||||
|
||||
/* remove a safe margin of 10% */
|
||||
if (rate > clk->safe_fmax_vmin)
|
||||
rate = rate * (100 - 10) / 100;
|
||||
|
||||
/* gpc2clk */
|
||||
rate *= 2;
|
||||
|
||||
nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
|
||||
nsafe = pll->m * rate / (clk->base.parent_rate);
|
||||
|
||||
if (nsafe < nmin) {
|
||||
pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
|
||||
nsafe = nmin;
|
||||
}
|
||||
|
||||
pll->n = nsafe;
|
||||
}
|
||||
|
||||
static void
|
||||
gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.base.subdev.device;
|
||||
|
||||
/* strobe to read external DFS coefficient */
|
||||
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
|
||||
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
|
||||
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
|
||||
|
||||
nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
|
||||
coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
|
||||
|
||||
udelay(1);
|
||||
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
|
||||
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.base.subdev.device;
|
||||
u32 val;
|
||||
|
||||
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
|
||||
dfs_det_cal);
|
||||
udelay(1);
|
||||
|
||||
val = nvkm_rd32(device, GPCPLL_DVFS1);
|
||||
if (!(val & BIT(25))) {
|
||||
/* Use external value to overwrite calibration value */
|
||||
val |= BIT(25) | BIT(16);
|
||||
nvkm_wr32(device, GPCPLL_DVFS1, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
|
||||
struct gm20b_clk_dvfs *dvfs)
|
||||
{
|
||||
struct nvkm_device *device = clk->base.base.subdev.device;
|
||||
|
||||
/* strobe to read external DFS coefficient */
|
||||
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
|
||||
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
|
||||
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
|
||||
|
||||
nvkm_mask(device, GPCPLL_DVFS0,
|
||||
GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
|
||||
dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
|
||||
dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
|
||||
|
||||
udelay(1);
|
||||
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
|
||||
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
|
||||
|
||||
gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_clk_prog(struct nvkm_clk *base)
|
||||
{
|
||||
struct gm20b_clk *clk = gm20b_clk(base);
|
||||
u32 cur_freq;
|
||||
int ret;
|
||||
|
||||
/* No change in DVFS settings? */
|
||||
if (clk->uv == clk->new_uv)
|
||||
goto prog;
|
||||
|
||||
/*
|
||||
* Interim step for changing DVFS detection settings: low enough
|
||||
* frequency to be safe at at DVFS coeff = 0.
|
||||
*
|
||||
* 1. If voltage is increasing:
|
||||
* - safe frequency target matches the lowest - old - frequency
|
||||
* - DVFS settings are still old
|
||||
* - Voltage already increased to new level by volt, but maximum
|
||||
* detection limit assures PLL output remains under F/V curve
|
||||
*
|
||||
* 2. If voltage is decreasing:
|
||||
* - safe frequency target matches the lowest - new - frequency
|
||||
* - DVFS settings are still old
|
||||
* - Voltage is also old, it will be lowered by volt afterwards
|
||||
*
|
||||
* Interim step can be skipped if old frequency is below safe minimum,
|
||||
* i.e., it is low enough to be safe at any voltage in operating range
|
||||
* with zero DVFS coefficient.
|
||||
*/
|
||||
cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
|
||||
if (cur_freq > clk->safe_fmax_vmin) {
|
||||
struct gk20a_pll pll_safe;
|
||||
|
||||
if (clk->uv < clk->new_uv)
|
||||
/* voltage will raise: safe frequency is current one */
|
||||
pll_safe = clk->base.pll;
|
||||
else
|
||||
/* voltage will drop: safe frequency is new one */
|
||||
pll_safe = clk->new_pll;
|
||||
|
||||
gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
|
||||
ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* DVFS detection settings transition:
|
||||
* - Set DVFS coefficient zero
|
||||
* - Set calibration level to new voltage
|
||||
* - Set DVFS coefficient to match new voltage
|
||||
*/
|
||||
gm20b_dvfs_program_coeff(clk, 0);
|
||||
gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
|
||||
gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
|
||||
gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
|
||||
|
||||
prog:
|
||||
clk->uv = clk->new_uv;
|
||||
clk->dvfs = clk->new_dvfs;
|
||||
clk->base.pll = clk->new_pll;
|
||||
|
||||
return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
|
||||
}
|
||||
|
||||
static struct nvkm_pstate
|
||||
gm20b_pstates[] = {
|
||||
{
|
||||
@ -133,9 +714,99 @@ gm20b_pstates[] = {
|
||||
.voltage = 12,
|
||||
},
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
static void
|
||||
gm20b_clk_fini(struct nvkm_clk *base)
|
||||
{
|
||||
struct nvkm_device *device = base->subdev.device;
|
||||
struct gm20b_clk *clk = gm20b_clk(base);
|
||||
|
||||
/* slide to VCO min */
|
||||
if (gk20a_pllg_is_enabled(&clk->base)) {
|
||||
struct gk20a_pll pll;
|
||||
u32 n_lo;
|
||||
|
||||
gk20a_pllg_read_mnp(&clk->base, &pll);
|
||||
n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
|
||||
gm20b_pllg_slide(clk, n_lo);
|
||||
}
|
||||
|
||||
gm20b_pllg_disable(clk);
|
||||
|
||||
/* set IDDQ */
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_clk_init_dvfs(struct gm20b_clk *clk)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
bool fused = clk->uvdet_offs && clk->uvdet_slope;
|
||||
static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
|
||||
u32 data;
|
||||
int ret;
|
||||
|
||||
/* Enable NA DVFS */
|
||||
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
|
||||
GPCPLL_DVFS1_EN_DFS_BIT);
|
||||
|
||||
/* Set VCO_CTRL */
|
||||
if (clk->dvfs_params->vco_ctrl)
|
||||
nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
|
||||
clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
|
||||
|
||||
if (fused) {
|
||||
/* Start internal calibration, but ignore results */
|
||||
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
|
||||
GPCPLL_DVFS1_EN_DFS_CAL_BIT);
|
||||
|
||||
/* got uvdev parameters from fuse, skip calibration */
|
||||
goto calibrated;
|
||||
}
|
||||
|
||||
/*
|
||||
* If calibration parameters are not fused, start internal calibration,
|
||||
* wait for completion, and use results along with default slope to
|
||||
* calculate ADC offset during boot.
|
||||
*/
|
||||
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
|
||||
GPCPLL_DVFS1_EN_DFS_CAL_BIT);
|
||||
|
||||
/* Wait for internal calibration done (spec < 2us). */
|
||||
ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
|
||||
GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
|
||||
GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
|
||||
if (ret < 0) {
|
||||
nvkm_error(subdev, "GPCPLL calibration timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
data = nvkm_rd32(device, GPCPLL_CFG3) >>
|
||||
GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
|
||||
data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
|
||||
|
||||
clk->uvdet_slope = ADC_SLOPE_UV;
|
||||
clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
|
||||
|
||||
nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
|
||||
clk->uvdet_offs, clk->uvdet_slope);
|
||||
|
||||
calibrated:
|
||||
/* Compute and apply initial DVFS parameters */
|
||||
gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
|
||||
gm20b_dvfs_program_coeff(clk, 0);
|
||||
gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
|
||||
gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
|
||||
gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
|
||||
static const struct nvkm_clk_func gm20b_clk;
|
||||
|
||||
static int
|
||||
gm20b_clk_init(struct nvkm_clk *base)
|
||||
{
|
||||
@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base)
|
||||
struct nvkm_subdev *subdev = &clk->base.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int ret;
|
||||
u32 data;
|
||||
|
||||
/* get out from IDDQ */
|
||||
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
|
||||
nvkm_rd32(device, GPCPLL_CFG);
|
||||
udelay(5);
|
||||
|
||||
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
|
||||
GPC2CLK_OUT_INIT_VAL);
|
||||
|
||||
/* Set the global bypass control to VCO */
|
||||
nvkm_mask(device, BYPASSCTRL_SYS,
|
||||
MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
|
||||
0);
|
||||
|
||||
ret = gk20a_clk_setup_slide(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
|
||||
data = nvkm_rd32(device, 0x021944);
|
||||
if (!(data & 0x3)) {
|
||||
data |= 0x2;
|
||||
nvkm_wr32(device, 0x021944, data);
|
||||
|
||||
data = nvkm_rd32(device, 0x021948);
|
||||
data |= 0x1;
|
||||
nvkm_wr32(device, 0x021948, data);
|
||||
}
|
||||
|
||||
/* Disable idle slow down */
|
||||
nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
|
||||
|
||||
/* speedo >= 1? */
|
||||
if (clk->base.func == &gm20b_clk) {
|
||||
struct gm20b_clk *_clk = gm20b_clk(base);
|
||||
struct nvkm_volt *volt = device->volt;
|
||||
|
||||
/* Get current voltage */
|
||||
_clk->uv = nvkm_volt_get(volt);
|
||||
|
||||
/* Initialize DVFS */
|
||||
ret = gm20b_clk_init_dvfs(_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Start with lowest frequency */
|
||||
base->func->calc(base, &base->func->pstates[0].base);
|
||||
ret = base->func->prog(&clk->base);
|
||||
ret = base->func->prog(base);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "cannot initialize clock\n");
|
||||
return ret;
|
||||
@ -169,6 +881,7 @@ gm20b_clk_speedo0 = {
|
||||
.prog = gk20a_clk_prog,
|
||||
.tidy = gk20a_clk_tidy,
|
||||
.pstates = gm20b_pstates,
|
||||
/* Speedo 0 only supports 12 voltages */
|
||||
.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
|
||||
.domains = {
|
||||
{ nv_clk_src_crystal, 0xff },
|
||||
@ -177,8 +890,26 @@ gm20b_clk_speedo0 = {
|
||||
},
|
||||
};
|
||||
|
||||
int
|
||||
gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
|
||||
static const struct nvkm_clk_func
|
||||
gm20b_clk = {
|
||||
.init = gm20b_clk_init,
|
||||
.fini = gm20b_clk_fini,
|
||||
.read = gk20a_clk_read,
|
||||
.calc = gm20b_clk_calc,
|
||||
.prog = gm20b_clk_prog,
|
||||
.tidy = gk20a_clk_tidy,
|
||||
.pstates = gm20b_pstates,
|
||||
.nr_pstates = ARRAY_SIZE(gm20b_pstates),
|
||||
.domains = {
|
||||
{ nv_clk_src_crystal, 0xff },
|
||||
{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
|
||||
{ nv_clk_src_max },
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
|
||||
struct nvkm_clk **pclk)
|
||||
{
|
||||
struct gk20a_clk *clk;
|
||||
int ret;
|
||||
@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
|
||||
return -ENOMEM;
|
||||
*pclk = &clk->base;
|
||||
|
||||
ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
|
||||
&gm20b_pllg_params, clk);
|
||||
ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
|
||||
&gm20b_pllg_params, clk);
|
||||
|
||||
clk->pl_to_div = pl_to_div;
|
||||
clk->div_to_pl = div_to_pl;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* FUSE register */
|
||||
#define FUSE_RESERVED_CALIB0 0x204
|
||||
#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0
|
||||
#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4
|
||||
#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4
|
||||
#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10
|
||||
#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14
|
||||
#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10
|
||||
#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24
|
||||
#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6
|
||||
#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30
|
||||
#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2
|
||||
|
||||
static int
|
||||
gm20b_clk_init_fused_params(struct gm20b_clk *clk)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
u32 val = 0;
|
||||
u32 rev = 0;
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARCH_TEGRA)
|
||||
tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
|
||||
rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
|
||||
MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
|
||||
#endif
|
||||
|
||||
/* No fused parameters, we will calibrate later */
|
||||
if (rev == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Integer part in mV + fractional part in uV */
|
||||
clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
|
||||
MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
|
||||
((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
|
||||
MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
|
||||
|
||||
/* Integer part in mV + fractional part in 100uV */
|
||||
clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
|
||||
MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
|
||||
((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
|
||||
MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
|
||||
|
||||
nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
|
||||
clk->uvdet_slope, clk->uvdet_offs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->base.base.subdev;
|
||||
struct nvkm_volt *volt = subdev->device->volt;
|
||||
struct nvkm_pstate *pstates = clk->base.base.func->pstates;
|
||||
int nr_pstates = clk->base.base.func->nr_pstates;
|
||||
int vmin, id = 0;
|
||||
u32 fmax = 0;
|
||||
int i;
|
||||
|
||||
/* find lowest voltage we can use */
|
||||
vmin = volt->vid[0].uv;
|
||||
for (i = 1; i < volt->vid_nr; i++) {
|
||||
if (volt->vid[i].uv <= vmin) {
|
||||
vmin = volt->vid[i].uv;
|
||||
id = volt->vid[i].vid;
|
||||
}
|
||||
}
|
||||
|
||||
/* find max frequency at this voltage */
|
||||
for (i = 0; i < nr_pstates; i++)
|
||||
if (pstates[i].base.voltage == id)
|
||||
fmax = max(fmax,
|
||||
pstates[i].base.domain[nv_clk_src_gpc]);
|
||||
|
||||
if (!fmax) {
|
||||
nvkm_error(subdev, "failed to evaluate safe fmax\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* we are safe at 90% of the max frequency */
|
||||
clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
|
||||
nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
|
||||
{
|
||||
struct nvkm_device_tegra *tdev = device->func->tegra(device);
|
||||
struct gm20b_clk *clk;
|
||||
struct nvkm_subdev *subdev;
|
||||
struct gk20a_clk_pllg_params *clk_params;
|
||||
int ret;
|
||||
|
||||
/* Speedo 0 GPUs cannot use noise-aware PLL */
|
||||
if (tdev->gpu_speedo_id == 0)
|
||||
return gm20b_clk_new_speedo0(device, index, pclk);
|
||||
|
||||
/* Speedo >= 1, use NAPLL */
|
||||
clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
|
||||
if (!clk)
|
||||
return -ENOMEM;
|
||||
*pclk = &clk->base.base;
|
||||
subdev = &clk->base.base.subdev;
|
||||
|
||||
/* duplicate the clock parameters since we will patch them below */
|
||||
clk_params = (void *) (clk + 1);
|
||||
*clk_params = gm20b_pllg_params;
|
||||
ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
|
||||
&clk->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* NAPLL can only work with max_u, clamp the m range so
|
||||
* gk20a_pllg_calc_mnp always uses it
|
||||
*/
|
||||
clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
|
||||
(clk->base.parent_rate / KHZ));
|
||||
if (clk_params->max_m == 0) {
|
||||
nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
|
||||
kfree(clk);
|
||||
return gm20b_clk_new_speedo0(device, index, pclk);
|
||||
}
|
||||
|
||||
clk->base.pl_to_div = pl_to_div;
|
||||
clk->base.div_to_pl = div_to_pl;
|
||||
|
||||
clk->dvfs_params = &gm20b_dvfs_params;
|
||||
|
||||
ret = gm20b_clk_init_fused_params(clk);
|
||||
/*
|
||||
* we will calibrate during init - should never happen on
|
||||
* prod parts
|
||||
*/
|
||||
if (ret)
|
||||
nvkm_warn(subdev, "no fused calibration parameters\n");
|
||||
|
||||
ret = gm20b_clk_init_safe_fmax(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o
|
||||
nvkm-y += nvkm/subdev/fb/gk20a.o
|
||||
nvkm-y += nvkm/subdev/fb/gm107.o
|
||||
nvkm-y += nvkm/subdev/fb/gm200.o
|
||||
nvkm-y += nvkm/subdev/fb/gp100.o
|
||||
nvkm-y += nvkm/subdev/fb/gp104.o
|
||||
|
||||
nvkm-y += nvkm/subdev/fb/ram.o
|
||||
nvkm-y += nvkm/subdev/fb/ramnv04.o
|
||||
@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o
|
||||
nvkm-y += nvkm/subdev/fb/ramgf100.o
|
||||
nvkm-y += nvkm/subdev/fb/ramgk104.o
|
||||
nvkm-y += nvkm/subdev/fb/ramgm107.o
|
||||
nvkm-y += nvkm/subdev/fb/ramgp100.o
|
||||
nvkm-y += nvkm/subdev/fb/sddr2.o
|
||||
nvkm-y += nvkm/subdev/fb/sddr3.o
|
||||
nvkm-y += nvkm/subdev/fb/gddr3.o
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "ram.h"
|
||||
|
||||
#include <core/memory.h>
|
||||
#include <core/option.h>
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/M0203.h>
|
||||
#include <engine/gr.h>
|
||||
@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
|
||||
|
||||
if (fb->func->init)
|
||||
fb->func->init(fb);
|
||||
if (fb->func->init_page)
|
||||
fb->func->init_page(fb);
|
||||
if (fb->func->init_unkn)
|
||||
fb->func->init_unkn(fb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
|
||||
nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
|
||||
fb->func = func;
|
||||
fb->tile.regions = fb->func->tile.regions;
|
||||
fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -71,6 +71,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
gf100_fb_init_page(struct nvkm_fb *fb)
|
||||
{
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
switch (fb->page) {
|
||||
case 16:
|
||||
nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
|
||||
break;
|
||||
case 17:
|
||||
default:
|
||||
nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
|
||||
fb->page = 17;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
gf100_fb_init(struct nvkm_fb *base)
|
||||
{
|
||||
@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base)
|
||||
|
||||
if (fb->r100c10_page)
|
||||
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
|
||||
|
||||
nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
|
||||
}
|
||||
|
||||
void *
|
||||
@ -125,6 +139,7 @@ gf100_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.ram_new = gf100_ram_new,
|
||||
.memtype_valid = gf100_fb_memtype_valid,
|
||||
|
@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
|
||||
void *gf100_fb_dtor(struct nvkm_fb *);
|
||||
void gf100_fb_init(struct nvkm_fb *);
|
||||
void gf100_fb_intr(struct nvkm_fb *);
|
||||
|
||||
void gp100_fb_init(struct nvkm_fb *);
|
||||
#endif
|
||||
|
@ -29,6 +29,7 @@ gk104_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.ram_new = gk104_ram_new,
|
||||
.memtype_valid = gf100_fb_memtype_valid,
|
||||
|
@ -27,7 +27,6 @@ static void
|
||||
gk20a_fb_init(struct nvkm_fb *fb)
|
||||
{
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
|
||||
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
|
||||
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
|
||||
}
|
||||
@ -36,6 +35,7 @@ static const struct nvkm_fb_func
|
||||
gk20a_fb = {
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gk20a_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.memtype_valid = gf100_fb_memtype_valid,
|
||||
};
|
||||
|
||||
|
@ -29,6 +29,7 @@ gm107_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.ram_new = gm107_ram_new,
|
||||
.memtype_valid = gf100_fb_memtype_valid,
|
||||
|
@ -26,6 +26,24 @@
|
||||
|
||||
#include <core/memory.h>
|
||||
|
||||
void
|
||||
gm200_fb_init_page(struct nvkm_fb *fb)
|
||||
{
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
switch (fb->page) {
|
||||
case 16:
|
||||
nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
|
||||
break;
|
||||
case 17:
|
||||
nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
|
||||
break;
|
||||
default:
|
||||
nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
|
||||
fb->page = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gm200_fb_init(struct nvkm_fb *base)
|
||||
{
|
||||
@ -48,6 +66,7 @@ gm200_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gm200_fb_init,
|
||||
.init_page = gm200_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.ram_new = gm107_ram_new,
|
||||
.memtype_valid = gf100_fb_memtype_valid,
|
||||
|
69
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
Normal file
69
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
#include <core/memory.h>
|
||||
|
||||
static void
|
||||
gp100_fb_init_unkn(struct nvkm_fb *base)
|
||||
{
|
||||
struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
|
||||
nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
|
||||
nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
|
||||
nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
|
||||
nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
|
||||
}
|
||||
|
||||
void
|
||||
gp100_fb_init(struct nvkm_fb *base)
|
||||
{
|
||||
struct gf100_fb *fb = gf100_fb(base);
|
||||
struct nvkm_device *device = fb->base.subdev.device;
|
||||
|
||||
if (fb->r100c10_page)
|
||||
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
|
||||
|
||||
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
|
||||
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
|
||||
nvkm_mask(device, 0x100cc4, 0x00060000,
|
||||
max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
|
||||
}
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
gp100_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gp100_fb_init,
|
||||
.init_page = gm200_fb_init_page,
|
||||
.init_unkn = gp100_fb_init_unkn,
|
||||
.ram_new = gp100_ram_new,
|
||||
.memtype_valid = gf100_fb_memtype_valid,
|
||||
};
|
||||
|
||||
int
|
||||
gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
|
||||
{
|
||||
return gf100_fb_new_(&gp100_fb, device, index, pfb);
|
||||
}
|
43
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
Normal file
43
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
#include <core/memory.h>
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
gp104_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gp100_fb_init,
|
||||
.init_page = gm200_fb_init_page,
|
||||
.ram_new = gp100_ram_new,
|
||||
.memtype_valid = gf100_fb_memtype_valid,
|
||||
};
|
||||
|
||||
int
|
||||
gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
|
||||
{
|
||||
return gf100_fb_new_(&gp104_fb, device, index, pfb);
|
||||
}
|
@ -8,6 +8,8 @@ struct nvkm_fb_func {
|
||||
void *(*dtor)(struct nvkm_fb *);
|
||||
int (*oneinit)(struct nvkm_fb *);
|
||||
void (*init)(struct nvkm_fb *);
|
||||
void (*init_page)(struct nvkm_fb *);
|
||||
void (*init_unkn)(struct nvkm_fb *);
|
||||
void (*intr)(struct nvkm_fb *);
|
||||
|
||||
struct {
|
||||
@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
|
||||
u32 pitch, u32 flags, struct nvkm_fb_tile *);
|
||||
|
||||
int gf100_fb_oneinit(struct nvkm_fb *);
|
||||
void gf100_fb_init_page(struct nvkm_fb *);
|
||||
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
|
||||
|
||||
void gm200_fb_init_page(struct nvkm_fb *);
|
||||
#endif
|
||||
|
@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
#endif
|
||||
|
146
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
Normal file
146
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
Normal file
@ -0,0 +1,146 @@
|
||||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "ram.h"
|
||||
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/init.h>
|
||||
#include <subdev/bios/rammap.h>
|
||||
|
||||
static int
|
||||
gp100_ram_init(struct nvkm_ram *ram)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &ram->fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
u8 ver, hdr, cnt, len, snr, ssz;
|
||||
u32 data;
|
||||
int i;
|
||||
|
||||
/* run a bunch of tables from rammap table. there's actually
|
||||
* individual pointers for each rammap entry too, but, nvidia
|
||||
* seem to just run the last two entries' scripts early on in
|
||||
* their init, and never again.. we'll just run 'em all once
|
||||
* for now.
|
||||
*
|
||||
* i strongly suspect that each script is for a separate mode
|
||||
* (likely selected by 0x9a065c's lower bits?), and the
|
||||
* binary driver skips the one that's already been setup by
|
||||
* the init tables.
|
||||
*/
|
||||
data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
|
||||
if (!data || hdr < 0x15)
|
||||
return -EINVAL;
|
||||
|
||||
cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
|
||||
data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
|
||||
if (cnt) {
|
||||
u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
|
||||
for (i = 0; i < cnt; i++, data += 4) {
|
||||
if (i != save >> 4) {
|
||||
nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
|
||||
nvbios_exec(&(struct nvbios_init) {
|
||||
.subdev = subdev,
|
||||
.bios = bios,
|
||||
.offset = nvbios_rd32(bios, data),
|
||||
.execute = 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
nvkm_mask(device, 0x9a065c, 0x000000f0, save);
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
|
||||
nvkm_wr32(device, 0x10ecc0, 0xffffffff);
|
||||
nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_ram_func
|
||||
gp100_ram_func = {
|
||||
.init = gp100_ram_init,
|
||||
.get = gf100_ram_get,
|
||||
.put = gf100_ram_put,
|
||||
};
|
||||
|
||||
int
|
||||
gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_ram *ram;
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
|
||||
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
|
||||
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
|
||||
u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
|
||||
u32 fbio_opt = nvkm_rd32(device, 0x021c14);
|
||||
u64 part, size = 0, comm = ~0ULL;
|
||||
bool mixed = false;
|
||||
int ret;
|
||||
|
||||
nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
|
||||
nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
|
||||
for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
|
||||
if (!(fbio_opt & (1 << fbpa))) {
|
||||
part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
|
||||
nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
|
||||
part = part << 20;
|
||||
if (part != comm) {
|
||||
if (comm != ~0ULL)
|
||||
mixed = true;
|
||||
comm = min(comm, part);
|
||||
}
|
||||
size = size + part;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
|
||||
*pram = ram;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_mm_fini(&ram->vram);
|
||||
|
||||
if (mixed) {
|
||||
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
((comm * fbpa_num) - rsvd_head) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
|
||||
NVKM_RAM_MM_SHIFT,
|
||||
(size - (comm * fbpa_num) - rsvd_tail) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(size - rsvd_head - rsvd_tail) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o
|
||||
nvkm-y += nvkm/subdev/ltc/gk104.o
|
||||
nvkm-y += nvkm/subdev/ltc/gm107.o
|
||||
nvkm-y += nvkm/subdev/ltc/gm200.o
|
||||
nvkm-y += nvkm/subdev/ltc/gp100.o
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
#include "priv.h"
|
||||
|
||||
#include <core/enum.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
|
||||
nvkm_wr32(device, 0x17ea58, depth);
|
||||
}
|
||||
|
||||
static const struct nvkm_bitfield
|
||||
const struct nvkm_bitfield
|
||||
gf100_ltc_lts_intr_name[] = {
|
||||
{ 0x00000001, "IDLE_ERROR_IQ" },
|
||||
{ 0x00000002, "IDLE_ERROR_CBC" },
|
||||
|
@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
|
||||
nvkm_wr32(device, 0x17e34c, depth);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
|
||||
{
|
||||
struct nvkm_subdev *subdev = <c->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
|
||||
u32 stat = nvkm_rd32(device, base + 0x00c);
|
||||
u32 intr = nvkm_rd32(device, base + 0x00c);
|
||||
u16 stat = intr & 0x0000ffff;
|
||||
char msg[128];
|
||||
|
||||
if (stat) {
|
||||
nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
|
||||
nvkm_wr32(device, base + 0x00c, stat);
|
||||
nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
|
||||
nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, base + 0x00c, intr);
|
||||
}
|
||||
|
||||
void
|
||||
|
75
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
Normal file
75
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
|
||||
static void
|
||||
gp100_ltc_intr(struct nvkm_ltc *ltc)
|
||||
{
|
||||
struct nvkm_device *device = ltc->subdev.device;
|
||||
u32 mask;
|
||||
|
||||
mask = nvkm_rd32(device, 0x0001c0);
|
||||
while (mask) {
|
||||
u32 s, c = __ffs(mask);
|
||||
for (s = 0; s < ltc->lts_nr; s++)
|
||||
gm107_ltc_intr_lts(ltc, c, s);
|
||||
mask &= ~(1 << c);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
gp100_ltc_oneinit(struct nvkm_ltc *ltc)
|
||||
{
|
||||
struct nvkm_device *device = ltc->subdev.device;
|
||||
ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
|
||||
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
|
||||
/*XXX: tagram allocation - TBD */
|
||||
return nvkm_mm_init(<c->tags, 0, 0, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
gp100_ltc_init(struct nvkm_ltc *ltc)
|
||||
{
|
||||
/*XXX: PMU LS call to setup tagram address */
|
||||
}
|
||||
|
||||
static const struct nvkm_ltc_func
|
||||
gp100_ltc = {
|
||||
.oneinit = gp100_ltc_oneinit,
|
||||
.init = gp100_ltc_init,
|
||||
.intr = gp100_ltc_intr,
|
||||
.cbc_clear = gm107_ltc_cbc_clear,
|
||||
.cbc_wait = gm107_ltc_cbc_wait,
|
||||
.zbc = 16,
|
||||
.zbc_clear_color = gm107_ltc_zbc_clear_color,
|
||||
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
|
||||
.invalidate = gf100_ltc_invalidate,
|
||||
.flush = gf100_ltc_flush,
|
||||
};
|
||||
|
||||
int
|
||||
gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
|
||||
{
|
||||
return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user