diff options
author | Dave Airlie <airlied@redhat.com> | 2020-04-30 11:08:54 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-04-30 11:08:54 +1000 |
commit | 937eea297e26effac6809a0bf8c20e6ca9d90b9a (patch) | |
tree | 5e2d4ddc284776b56355d36c8d2c5a757956b3d4 | |
parent | 126a34061eec2df05a5a28052edefd4e6125f31c (diff) | |
parent | e748f07d00c1c4a9106acafac52df7ea4ecf6264 (diff) | |
download | linux-next-937eea297e26effac6809a0bf8c20e6ca9d90b9a.tar.gz |
Merge tag 'amd-drm-next-5.8-2020-04-24' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.8-2020-04-24:
amdgpu:
- Documentation improvements
- Enable FRU chip access on boards that support it
- RAS updates
- SR-IOV updates
- Powerplay locking fixes for older SMU versions
- VCN DPG (dynamic powergating) cleanup
- VCN 2.5 DPG enablement
- Rework GPU scheduler handling
- Improve scheduler priority handling
- Add SPM (streaming performance monitor) golden settings for navi
- GFX10 clockgating fixes
- DC ABM (automatic backlight modulation) fixes
- DC cursor and plane fixes
- DC watermark fixes
- DC clock handling fixes
- DC color management fixes
- GPU reset fixes
- Clean up MMIO access macros
- EEPROM access fixes
- Misc code cleanups
amdkfd:
- Misc code cleanups
radeon:
- Clean up safe reg list generation
- Misc code cleanups
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200424190827.4542-1-alexander.deucher@amd.com
260 files changed, 9302 insertions, 3868 deletions
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst index 0efede580039..4cc74325bf91 100644 --- a/Documentation/gpu/amdgpu.rst +++ b/Documentation/gpu/amdgpu.rst @@ -202,3 +202,91 @@ busy_percent .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c :doc: busy_percent + +GPU Product Information +======================= + +Information about the GPU can be obtained on certain cards +via sysfs + +product_name +------------ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: product_name + +product_number +-------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: product_name + +serial_number +------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: serial_number + +unique_id +--------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c + :doc: unique_id + +GPU Memory Usage Information +============================ + +Various memory accounting can be accessed via sysfs + +mem_info_vram_total +------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vram_total + +mem_info_vram_used +------------------ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vram_used + +mem_info_vis_vram_total +----------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vis_vram_total + +mem_info_vis_vram_used +---------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vis_vram_used + +mem_info_gtt_total +------------------ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c + :doc: mem_info_gtt_total + +mem_info_gtt_used +----------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c + :doc: mem_info_gtt_used + +PCIe Accounting Information +=========================== + +pcie_bw +------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c + :doc: pcie_bw + +pcie_replay_count +----------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: pcie_replay_count + + diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index c2bbcdd9c875..210d57a4afc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -55,7 +55,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ - amdgpu_umc.o smu_v11_0_i2c.o + amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2992a49ad4a5..56da8920195d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -28,6 +28,18 @@ #ifndef __AMDGPU_H__ #define __AMDGPU_H__ +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "amdgpu: " fmt + +#ifdef dev_fmt +#undef dev_fmt +#endif + +#define dev_fmt(fmt) "amdgpu: " fmt + #include "amdgpu_ctx.h" #include <linux/atomic.h> @@ -388,6 +400,13 @@ struct amdgpu_sa_bo { int amdgpu_fence_slab_init(void); void amdgpu_fence_slab_fini(void); +enum amdgpu_ib_pool_type { + AMDGPU_IB_POOL_NORMAL = 0, + AMDGPU_IB_POOL_VM, + AMDGPU_IB_POOL_DIRECT, + + AMDGPU_IB_POOL_MAX +}; /* * IRQS. */ @@ -439,7 +458,9 @@ struct amdgpu_fpriv { int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned size, struct amdgpu_ib *ib); + unsigned size, + enum amdgpu_ib_pool_type pool, + struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct dma_fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, @@ -512,7 +533,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, /* * Writeback */ -#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ +#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */ struct amdgpu_wb { struct amdgpu_bo *wb_obj; @@ -843,7 +864,8 @@ struct amdgpu_device { unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; - struct amdgpu_sa_manager ring_tmp_bo; + struct amdgpu_sa_manager ring_tmp_bo[AMDGPU_IB_POOL_MAX]; + struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; /* interrupts */ struct amdgpu_irq irq; @@ -935,9 +957,6 @@ struct amdgpu_device { /* link all shadow bo */ struct list_head shadow_list; struct mutex shadow_list_lock; - /* keep an lru list of rings by HW IP */ - struct list_head ring_lru_list; - spinlock_t ring_lru_list_lock; /* record hw reset is performed */ bool has_hw_reset; @@ -946,8 +965,6 @@ struct amdgpu_device { /* s3/s4 mask */ bool in_suspend; - /* record last mm index being written through WREG32*/ - unsigned long last_mm_index; bool in_gpu_reset; enum pp_mp1_state mp1_state; struct mutex lock_reset; @@ -966,14 +983,17 @@ struct amdgpu_device { uint64_t unique_id; uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; - /* device pstate */ - int pstate; /* enable runtime pm on the device */ bool runpm; bool in_runpm; bool pm_sysfs_en; bool ucode_sysfs_en; + + /* Chip product information */ + char product_number[16]; + char product_name[32]; + char serial[16]; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -990,10 +1010,10 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, uint32_t *buf, size_t size, bool write); -uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, +uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, + uint32_t acc_flags); +void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); -void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, - uint32_t acc_flags); void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); @@ -1010,25 +1030,20 @@ int emu_soc_asic_init(struct amdgpu_device *adev); /* * Registers read & write functions. */ - -#define AMDGPU_REGS_IDX (1<<0) #define AMDGPU_REGS_NO_KIQ (1<<1) -#define AMDGPU_REGS_KIQ (1<<2) -#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) -#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) +#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) +#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) -#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ) -#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ) +#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg)) +#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v)) #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) -#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) -#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) -#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) -#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) -#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX) +#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0) +#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0)) +#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) @@ -1065,7 +1080,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); tmp_ |= ((val) & ~(mask)); \ WREG32_PLL(reg, tmp_); \ } while (0) -#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) +#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false)) #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 4ec6d0c03201..691c89705bcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -543,6 +543,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd); + if (adev->in_gpu_reset) + return -EIO; + #if 0 unsigned long flags; int retry; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 9dff792c9290..0768b7eb7683 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -362,13 +362,13 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, ¶m); if (ret) { - pr_err("amdgpu: failed to validate PT BOs\n"); + pr_err("failed to validate PT BOs\n"); return ret; } ret = amdgpu_amdkfd_validate(¶m, pd); if (ret) { - pr_err("amdgpu: failed to validate PD\n"); + pr_err("failed to validate PD\n"); return ret; } @@ -377,7 +377,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) if (vm->use_cpu_for_update) { ret = amdgpu_bo_kmap(pd, NULL); if (ret) { - pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret); + pr_err("failed to kmap PD, ret=%d\n", ret); return ret; } } @@ -660,15 +660,15 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, false, &ctx->duplicates); - if (!ret) - ctx->reserved = true; - else { - pr_err("Failed to reserve buffers in ttm\n"); + if (ret) { + pr_err("Failed to reserve buffers in ttm.\n"); kfree(ctx->vm_pd); ctx->vm_pd = NULL; + return ret; } - return ret; + ctx->reserved = true; + return 0; } /** @@ -733,17 +733,15 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, false, &ctx->duplicates); - if (!ret) - ctx->reserved = true; - else - pr_err("Failed to reserve buffers in ttm.\n"); - if (ret) { + pr_err("Failed to reserve buffers in ttm.\n"); kfree(ctx->vm_pd); ctx->vm_pd = NULL; + return ret; } - return ret; + ctx->reserved = true; + return 0; } /** @@ -1286,22 +1284,22 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( struct kfd_bo_va_list *entry, *tmp; struct bo_vm_reservation_context ctx; struct ttm_validate_buffer *bo_list_entry; + unsigned int mapped_to_gpu_memory; int ret; mutex_lock(&mem->lock); + mapped_to_gpu_memory = mem->mapped_to_gpu_memory; + mutex_unlock(&mem->lock); + /* lock is not needed after this, since mem is unused and will + * be freed anyway + */ - if (mem->mapped_to_gpu_memory > 0) { + if (mapped_to_gpu_memory > 0) { pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", mem->va, bo_size); - mutex_unlock(&mem->lock); return -EBUSY; } - mutex_unlock(&mem->lock); - /* lock is not needed after this, since mem is unused and will - * be freed anyway - */ - /* No more MMU notifiers */ amdgpu_mn_unregister(mem->bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 031b094607bd..78ac6dbe70d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -60,8 +60,6 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, { CGS_FUNC_ADEV; switch (space) { - case CGS_IND_REG__MMIO: - return RREG32_IDX(index); case CGS_IND_REG__PCIE: return RREG32_PCIE(index); case CGS_IND_REG__SMC: @@ -77,6 +75,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; + default: + BUG(); } WARN(1, "Invalid indirect register space"); return 0; @@ -88,8 +88,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, { CGS_FUNC_ADEV; switch (space) { - case CGS_IND_REG__MMIO: - return WREG32_IDX(index, value); case CGS_IND_REG__PCIE: return WREG32_PCIE(index, value); case CGS_IND_REG__SMC: @@ -105,6 +103,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; + default: + BUG(); } WARN(1, "Invalid indirect register space"); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index af91627b19b0..3eee5c7d83e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -924,7 +924,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ring = to_amdgpu_ring(entity->rq->sched); r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? - chunk_ib->ib_bytes : 0, ib); + chunk_ib->ib_bytes : 0, AMDGPU_IB_POOL_NORMAL, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6ed36a2c5f73..8842c55d4490 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_sched.h" #include "amdgpu_ras.h" +#include <linux/nospec.h> #define to_amdgpu_ctx_entity(e) \ container_of((e), struct amdgpu_ctx_entity, entity) @@ -72,13 +73,30 @@ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sch } } -static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) +static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, + enum drm_sched_priority prio, + u32 hw_ip) +{ + unsigned int hw_prio; + + hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ? + amdgpu_ctx_sched_prio_to_compute_prio(prio) : + AMDGPU_RING_PRIO_DEFAULT; + hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); + if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) + hw_prio = AMDGPU_RING_PRIO_DEFAULT; + + return hw_prio; +} + +static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, + const u32 ring) { struct amdgpu_device *adev = ctx->adev; struct amdgpu_ctx_entity *entity; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; unsigned num_scheds = 0; - enum gfx_pipe_priority hw_prio; + unsigned int hw_prio; enum drm_sched_priority priority; int r; @@ -90,52 +108,16 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const entity->sequence = 1; priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; - switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - sched = &adev->gfx.gfx_ring[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_COMPUTE: - hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); - scheds = adev->gfx.compute_prio_sched[hw_prio]; - num_scheds = adev->gfx.num_compute_sched[hw_prio]; - break; - case AMDGPU_HW_IP_DMA: - scheds = adev->sdma.sdma_sched; - num_scheds = adev->sdma.num_sdma_sched; - break; - case AMDGPU_HW_IP_UVD: - sched = &adev->uvd.inst[0].ring.sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_VCE: - sched = &adev->vce.ring[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_UVD_ENC: - sched = &adev->uvd.inst[0].ring_enc[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_VCN_DEC: - sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched, - adev->vcn.num_vcn_dec_sched); - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_VCN_ENC: - sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched, - adev->vcn.num_vcn_enc_sched); + hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip); + + hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); + scheds = adev->gpu_sched[hw_ip][hw_prio].sched; + num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; + + if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) { + sched = drm_sched_pick_best(scheds, num_scheds); scheds = &sched; num_scheds = 1; - break; - case AMDGPU_HW_IP_VCN_JPEG: - scheds = adev->jpeg.jpeg_sched; - num_scheds = adev->jpeg.num_jpeg_sched; - break; } r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, @@ -178,7 +160,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; return 0; - } static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) @@ -525,7 +506,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { struct amdgpu_device *adev = ctx->adev; - enum gfx_pipe_priority hw_prio; + unsigned int hw_prio; struct drm_gpu_scheduler **scheds = NULL; unsigned num_scheds; @@ -534,9 +515,11 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, /* set hw priority */ if (hw_ip == AMDGPU_HW_IP_COMPUTE) { - hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); - scheds = adev->gfx.compute_prio_sched[hw_prio]; - num_scheds = adev->gfx.num_compute_sched[hw_prio]; + hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, + AMDGPU_HW_IP_COMPUTE); + hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); + scheds = adev->gpu_sched[hw_ip][hw_prio].sched; + num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; drm_sched_entity_modify_sched(&aentity->entity, scheds, num_scheds); } @@ -665,78 +648,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) idr_destroy(&mgr->ctx_handles); mutex_destroy(&mgr->lock); } - - -static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev) -{ - int num_compute_sched_normal = 0; - int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1; - int i; - - /* use one drm sched array, gfx.compute_sched to store both high and - * normal priority drm compute schedulers */ - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - if (!adev->gfx.compute_ring[i].has_high_prio) - adev->gfx.compute_sched[num_compute_sched_normal++] = - &adev->gfx.compute_ring[i].sched; - else - adev->gfx.compute_sched[num_compute_sched_high--] = - &adev->gfx.compute_ring[i].sched; - } - - /* compute ring only has two priority for now */ - i = AMDGPU_GFX_PIPE_PRIO_NORMAL; - adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; - adev->gfx.num_compute_sched[i] = num_compute_sched_normal; - - i = AMDGPU_GFX_PIPE_PRIO_HIGH; - if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) { - /* When compute has no high priority rings then use */ - /* normal priority sched array */ - adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; - adev->gfx.num_compute_sched[i] = num_compute_sched_normal; - } else { - adev->gfx.compute_prio_sched[i] = - &adev->gfx.compute_sched[num_compute_sched_high - 1]; - adev->gfx.num_compute_sched[i] = - adev->gfx.num_compute_rings - num_compute_sched_normal; - } -} - -void amdgpu_ctx_init_sched(struct amdgpu_device *adev) -{ - int i, j; - - amdgpu_ctx_init_compute_sched(adev); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched; - adev->gfx.num_gfx_sched++; - } - - for (i = 0; i < adev->sdma.num_instances; i++) { - adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched; - adev->sdma.num_sdma_sched++; - } - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] = - &adev->vcn.inst[i].ring_dec.sched; - } - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - for (j = 0; j < adev->vcn.num_enc_rings; ++j) - adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] = - &adev->vcn.inst[i].ring_enc[j].sched; - } - - for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; - adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] = - &adev->jpeg.inst[i].ring_dec.sched; - } -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index de490f183af2..f54e10314661 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); -void amdgpu_ctx_init_sched(struct amdgpu_device *adev); - - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index c0f9a651dc06..1a4894fa3693 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -152,11 +152,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; } mutex_lock(&adev->grbm_idx_mutex); @@ -207,6 +212,7 @@ end: pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -255,6 +261,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -263,6 +273,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -275,6 +286,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -304,6 +316,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -311,6 +327,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -325,6 +342,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -354,6 +372,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -362,6 +384,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -374,6 +397,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -403,6 +427,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -410,6 +438,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -424,6 +453,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -453,6 +483,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -461,6 +495,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -473,6 +508,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -502,6 +538,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -509,6 +549,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -523,6 +564,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -651,16 +693,24 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); - if (r) + if (r) { + amdgpu_virt_disable_access_debugfs(adev); return r; + } - if (size > valuesize) + if (size > valuesize) { + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; + } outsize = 0; x = 0; @@ -673,6 +723,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, } } + amdgpu_virt_disable_access_debugfs(adev); return !r ? outsize : r; } @@ -720,6 +771,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -734,16 +789,20 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); - if (!x) + if (!x) { + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; + } while (size && (offset < x * 4)) { uint32_t value; value = data[offset >> 2]; r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + amdgpu_virt_disable_access_debugfs(adev); return r; + } result += 4; buf += 4; @@ -751,6 +810,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, size -= 4; } + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -805,6 +865,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -840,6 +904,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, err: kfree(data); + amdgpu_virt_disable_access_debugfs(adev); return result; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f84f9e35a73b..f9b315e7e004 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -64,6 +64,7 @@ #include "amdgpu_xgmi.h" #include "amdgpu_ras.h" #include "amdgpu_pmu.h" +#include "amdgpu_fru_eeprom.h" #include <linux/suspend.h> #include <drm/task_barrier.h> @@ -138,6 +139,72 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO, static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); /** + * DOC: product_name + * + * The amdgpu driver provides a sysfs API for reporting the product name + * for the device + * The file serial_number is used for this and returns the product name + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_device_get_product_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name); +} + +static DEVICE_ATTR(product_name, S_IRUGO, + amdgpu_device_get_product_name, NULL); + +/** + * DOC: product_number + * + * The amdgpu driver provides a sysfs API for reporting the part number + * for the device + * The file serial_number is used for this and returns the part number + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_device_get_product_number(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number); +} + +static DEVICE_ATTR(product_number, S_IRUGO, + amdgpu_device_get_product_number, NULL); + +/** + * DOC: serial_number + * + * The amdgpu driver provides a sysfs API for reporting the serial number + * for the device + * The file serial_number is used for this and returns the serial number + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_device_get_serial_number(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial); +} + +static DEVICE_ATTR(serial_number, S_IRUGO, + amdgpu_device_get_serial_number, NULL); + +/** * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control * * @dev: drm_device pointer @@ -187,32 +254,6 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, uint32_t hi = ~0; uint64_t last; - -#ifdef CONFIG_64BIT - last = min(pos + size, adev->gmc.visible_vram_size); - if (last > pos) { - void __iomem *addr = adev->mman.aper_base_kaddr + pos; - size_t count = last - pos; - - if (write) { - memcpy_toio(addr, buf, count); - mb(); - amdgpu_asic_flush_hdp(adev, NULL); - } else { - amdgpu_asic_invalidate_hdp(adev, NULL); - mb(); - memcpy_fromio(buf, addr, count); - } - - if (count == size) - return; - - pos += count; - buf += count / 4; - size -= count; - } -#endif - spin_lock_irqsave(&adev->mmio_idx_lock, flags); for (last = pos + size; pos < last; pos += 4) { uint32_t tmp = pos >> 31; @@ -231,10 +272,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, } /* - * MMIO register access helper functions. + * device register access helper functions. */ /** - * amdgpu_mm_rreg - read a memory mapped IO register + * amdgpu_device_rreg - read a register * * @adev: amdgpu_device pointer * @reg: dword aligned register offset @@ -242,25 +283,19 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, * * Returns the 32 bit value from the offset specified. */ -uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, - uint32_t acc_flags) +uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, + uint32_t acc_flags) { uint32_t ret; - if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) return amdgpu_kiq_rreg(adev, reg); - if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) + if ((reg * 4) < adev->rmmio_size) ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); - else { - unsigned long flags; - - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); - ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - } - trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); + else + ret = adev->pcie_rreg(adev, (reg * 4)); + trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); return ret; } @@ -306,28 +341,19 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) BUG(); } -void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) +void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg, + uint32_t v, uint32_t acc_flags) { - trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + trace_amdgpu_device_wreg(adev->pdev->device, reg, v); - if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) + if ((reg * 4) < adev->rmmio_size) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); - else { - unsigned long flags; - - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); - writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - } - - if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { - udelay(500); - } + else + adev->pcie_wreg(adev, (reg * 4), v); } /** - * amdgpu_mm_wreg - write to a memory mapped IO register + * amdgpu_device_wreg - write to a register * * @adev: amdgpu_device pointer * @reg: dword aligned register offset @@ -336,17 +362,13 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, * * Writes the value specified to the offset specified. */ -void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, - uint32_t acc_flags) +void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + uint32_t acc_flags) { - if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { - adev->last_mm_index = v; - } - - if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) return amdgpu_kiq_wreg(adev, reg, v); - amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); + amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags); } /* @@ -365,7 +387,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v); } - amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); + amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags); } /** @@ -397,20 +419,12 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) */ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { - if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { - adev->last_mm_index = v; - } - if ((reg * 4) < adev->rio_mem_size) iowrite32(v, adev->rio_mem + (reg * 4)); else { iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); } - - if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { - udelay(500); - } } /** @@ -1147,7 +1161,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero return; if (state == VGA_SWITCHEROO_ON) { - pr_info("amdgpu: switched on\n"); + pr_info("switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -1161,7 +1175,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { - pr_info("amdgpu: switched off\n"); + pr_info("switched off\n"); drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_suspend(dev, true); @@ -1731,9 +1745,28 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) amdgpu_amdkfd_device_probe(adev); if (amdgpu_sriov_vf(adev)) { + /* handle vbios stuff prior full access mode for new handshake */ + if (adev->virt.req_init_data_ver == 1) { + if (!amdgpu_get_bios(adev)) { + DRM_ERROR("failed to get vbios\n"); + return -EINVAL; + } + + r = amdgpu_atombios_init(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); + return r; + } + } + } + + /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios + * will not be prepared by host for this VF */ + if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) { r = amdgpu_virt_request_full_gpu(adev, true); if (r) - return -EAGAIN; + return r; } adev->pm.pp_feature = amdgpu_pp_feature_mask; @@ -1763,6 +1796,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } /* get the vbios after the asic_funcs are set up */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { + /* skip vbios handling for new handshake */ + if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1) + continue; + /* Read BIOS */ if (!amdgpu_get_bios(adev)) return -EINVAL; @@ -1889,6 +1926,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) return r; + if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) { + r = amdgpu_virt_request_full_gpu(adev, true); + if (r) + return -EAGAIN; + } + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -1975,6 +2018,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); + amdgpu_fru_get_product_info(adev); + init_failed: if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, true); @@ -2171,6 +2216,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = true; } + amdgpu_ras_set_error_query_ready(adev, true); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); @@ -2203,7 +2250,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (gpu_instance->adev->flags & AMD_IS_APU) continue; - r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); + r = amdgpu_xgmi_set_pstate(gpu_instance->adev, + AMDGPU_XGMI_PSTATE_MIN); if (r) { DRM_ERROR("pstate setting failed (%d).\n", r); break; @@ -2785,12 +2833,12 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) * By default timeout for non compute jobs is 10000. * And there is no timeout enforced on compute jobs. * In SR-IOV or passthrough mode, timeout for compute - * jobs are 10000 by default. + * jobs are 60000 by default. */ adev->gfx_timeout = msecs_to_jiffies(10000); adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) - adev->compute_timeout = adev->gfx_timeout; + adev->compute_timeout = msecs_to_jiffies(60000); else adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; @@ -2942,9 +2990,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->shadow_list); mutex_init(&adev->shadow_list_lock); - INIT_LIST_HEAD(&adev->ring_lru_list); - spin_lock_init(&adev->ring_lru_list_lock); - INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, @@ -3002,18 +3047,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) adev->enable_mes = true; - if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) { - r = amdgpu_discovery_init(adev); - if (r) { - dev_err(adev->dev, "amdgpu_discovery_init failed\n"); - return r; - } - } - - /* early init functions */ - r = amdgpu_device_ip_early_init(adev); - if (r) - return r; + /* detect hw virtualization here */ + amdgpu_detect_virtualization(adev); r = amdgpu_device_get_job_timeout_settings(adev); if (r) { @@ -3021,6 +3056,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } + /* early init functions */ + r = amdgpu_device_ip_early_init(adev); + if (r) + return r; + /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); @@ -3127,14 +3167,13 @@ fence_driver_init: goto failed; } - DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", + dev_info(adev->dev, + "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", adev->gfx.config.max_shader_engines, adev->gfx.config.max_sh_per_se, adev->gfx.config.max_cu_per_sh, adev->gfx.cu_info.number); - amdgpu_ctx_init_sched(adev); - adev->accel_working = true; amdgpu_vm_check_compute_bug(adev); @@ -3205,6 +3244,24 @@ fence_driver_init: return r; } + r = device_create_file(adev->dev, &dev_attr_product_name); + if (r) { + dev_err(adev->dev, "Could not create product_name"); + return r; + } + + r = device_create_file(adev->dev, &dev_attr_product_number); + if (r) { + dev_err(adev->dev, "Could not create product_number"); + return r; + } + + r = device_create_file(adev->dev, &dev_attr_serial_number); + if (r) { + dev_err(adev->dev, "Could not create serial_number"); + return r; + } + if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); if (r) @@ -3287,6 +3344,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pcie_replay_count); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); + device_remove_file(adev->dev, &dev_attr_product_name); + device_remove_file(adev->dev, &dev_attr_product_number); + device_remove_file(adev->dev, &dev_attr_serial_number); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) @@ -3757,6 +3817,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; + amdgpu_amdkfd_pre_reset(adev); + /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -4070,7 +4132,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) { struct list_head device_list, *device_list_handle = NULL; - bool need_full_reset, job_signaled; + bool need_full_reset = false; + bool job_signaled = false; struct amdgpu_hive_info *hive = NULL; struct amdgpu_device *tmp_adev = NULL; int i, r = 0; @@ -4091,16 +4154,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, emergency_restart(); } - need_full_reset = job_signaled = false; - INIT_LIST_HEAD(&device_list); - dev_info(adev->dev, "GPU %s begin!\n", (in_ras_intr && !use_baco) ? "jobs stop":"reset"); - cancel_delayed_work_sync(&adev->delayed_init_work); - - hive = amdgpu_get_xgmi_hive(adev, false); - /* * Here we trylock to avoid chain of resets executing from * either trigger by jobs on different adevs in XGMI hive or jobs on @@ -4108,39 +4164,25 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * We always reset all schedulers for device and all devices for XGMI * hive so that should take care of them too. */ - + hive = amdgpu_get_xgmi_hive(adev, true); if (hive && !mutex_trylock(&hive->reset_lock)) { DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", job ? job->base.id : -1, hive->hive_id); + mutex_unlock(&hive->hive_lock); return 0; } - /* Start with adev pre asic reset first for soft reset check.*/ - if (!amdgpu_device_lock_adev(adev, !hive)) { - DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", - job ? job->base.id : -1); - return 0; - } - - /* Block kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_pre_reset(adev); - - /* Build list of devices to reset */ - if (adev->gmc.xgmi.num_physical_nodes > 1) { - if (!hive) { - /*unlock kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_post_reset(adev); - amdgpu_device_unlock_adev(adev); + /* + * Build list of devices to reset. + * In case we are in XGMI hive mode, resort the device list + * to put adev in the 1st position. + */ + INIT_LIST_HEAD(&device_list); + if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!hive) return -ENODEV; - } - - /* - * In case we are in XGMI hive mode device reset is done for all the - * nodes in the hive to retrain all XGMI links and hence the reset - * sequence is executed in loop on all nodes. - */ + if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list)) + list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list); device_list_handle = &hive->device_list; } else { list_add_tail(&adev->gmc.xgmi.head, &device_list); @@ -4149,19 +4191,27 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - if (tmp_adev != adev) { - amdgpu_device_lock_adev(tmp_adev, false); - if (!amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_pre_reset(tmp_adev); + if (!amdgpu_device_lock_adev(tmp_adev, !hive)) { + DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", + job ? job->base.id : -1); + mutex_unlock(&hive->hive_lock); + return 0; } + amdgpu_ras_set_error_query_ready(tmp_adev, false); + + cancel_delayed_work_sync(&tmp_adev->delayed_init_work); + + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); + /* * Mark these ASICs to be reseted as untracked first * And add them back after reset completed */ amdgpu_unregister_gpu_instance(tmp_adev); - amdgpu_fbdev_set_suspend(adev, 1); + amdgpu_fbdev_set_suspend(tmp_adev, 1); /* disable ras on ALL IPs */ if (!(in_ras_intr && !use_baco) && @@ -4181,7 +4231,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, } } - if (in_ras_intr && !use_baco) goto skip_sched_resume; @@ -4192,30 +4241,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * job->base holds a reference to parent fence */ if (job && job->base.s_fence->parent && - dma_fence_is_signaled(job->base.s_fence->parent)) + dma_fence_is_signaled(job->base.s_fence->parent)) { job_signaled = true; - - if (job_signaled) { dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; } - - /* Guilty job will be freed after this*/ - r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset); - if (r) { - /*TODO Should we stop ?*/ - DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", - r, adev->ddev->unique); - adev->asic_reset_res = r; - } - retry: /* Rest of adevs pre asic reset from XGMI hive. */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - - if (tmp_adev == adev) - continue; - r = amdgpu_device_pre_asic_reset(tmp_adev, NULL, &need_full_reset); @@ -4280,8 +4313,10 @@ skip_sched_resume: amdgpu_device_unlock_adev(tmp_adev); } - if (hive) + if (hive) { mutex_unlock(&hive->reset_lock); + mutex_unlock(&hive->hive_lock); + } if (r) dev_info(adev->dev, "GPU reset end with ret = %d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 27d8ae19a7a4..43bb22ad8add 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -23,9 +23,7 @@ #include "amdgpu.h" #include "amdgpu_discovery.h" -#include "soc15_common.h" #include "soc15_hw_ip.h" -#include "nbio/nbio_2_3_offset.h" #include "discovery.h" #define mmRCC_CONFIG_MEMSIZE 0xde3 @@ -158,7 +156,7 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); } -int amdgpu_discovery_init(struct amdgpu_device *adev) +static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; @@ -257,10 +255,12 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) uint8_t num_base_address; int hw_ip; int i, j, k; + int r; - if (!adev->discovery) { - DRM_ERROR("ip discovery uninitialized\n"); - return -EINVAL; + r = amdgpu_discovery_init(adev); + if (r) { + DRM_ERROR("amdgpu_discovery_init failed\n"); + return r; } bhdr = (struct binary_header *)adev->discovery; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index ba78e15d9b05..d50d597c45ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -26,7 +26,6 @@ #define DISCOVERY_TMR_SIZE (64 << 10) -int amdgpu_discovery_init(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c new file mode 100644 index 000000000000..815c072ac4da --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -0,0 +1,185 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_i2c.h" +#include "smu_v11_0_i2c.h" +#include "atom.h" + +#define I2C_PRODUCT_INFO_ADDR 0xAC +#define I2C_PRODUCT_INFO_ADDR_SIZE 0x2 +#define I2C_PRODUCT_INFO_OFFSET 0xC0 + +bool is_fru_eeprom_supported(struct amdgpu_device *adev) +{ + /* TODO: Gaming SKUs don't have the FRU EEPROM. + * Use this hack to address hangs on modprobe on gaming SKUs + * until a proper solution can be implemented by only supporting + * the explicit chip IDs for VG20 Server cards + * + * TODO: Add list of supported Arcturus DIDs once confirmed + */ + if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) || + (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) || + (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4)) + return true; + return false; +} + +int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, + unsigned char *buff) +{ + int ret, size; + struct i2c_msg msg = { + .addr = I2C_PRODUCT_INFO_ADDR, + .flags = I2C_M_RD, + .buf = buff, + }; + buff[0] = 0; + buff[1] = addrptr; + msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1; + ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1); + + if (ret < 1) { + DRM_WARN("FRU: Failed to get size field"); + return ret; + } + + /* The size returned by the i2c requires subtraction of 0xC0 since the + * size apparently always reports as 0xC0+actual size. + */ + size = buff[2] - I2C_PRODUCT_INFO_OFFSET; + /* Add 1 since address field was 1 byte */ + buff[1] = addrptr + 1; + + msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size; + ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1); + + if (ret < 1) { + DRM_WARN("FRU: Failed to get data field"); + return ret; + } + + return size; +} + +int amdgpu_fru_get_product_info(struct amdgpu_device *adev) +{ + unsigned char buff[34]; + int addrptr = 0, size = 0; + + if (!is_fru_eeprom_supported(adev)) + return 0; + + /* If algo exists, it means that the i2c_adapter's initialized */ + if (!adev->pm.smu_i2c.algo) { + DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); + return 0; + } + + /* There's a lot of repetition here. This is due to the FRU having + * variable-length fields. To get the information, we have to find the + * size of each field, and then keep reading along and reading along + * until we get all of the data that we want. We use addrptr to track + * the address as we go + */ + + /* The first fields are all of size 1-byte, from 0-7 are offsets that + * contain information that isn't useful to us. + * Bytes 8-a are all 1-byte and refer to the size of the entire struct, + * and the language field, so just start from 0xb, manufacturer size + */ + addrptr = 0xb; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + if (size < 1) { + DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size); + return size; + } + + /* Increment the addrptr by the size of the field, and 1 due to the + * size field being 1 byte. This pattern continues below. + */ + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + if (size < 1) { + DRM_ERROR("Failed to read FRU product name, ret:%d", size); + return size; + } + + /* Product name should only be 32 characters. Any more, + * and something could be wrong. Cap it at 32 to be safe + */ + if (size > 32) { + DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake"); + size = 32; + } + /* Start at 2 due to buff using fields 0 and 1 for the address */ + memcpy(adev->product_name, &buff[2], size); + adev->product_name[size] = '\0'; + + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + if (size < 1) { + DRM_ERROR("Failed to read FRU product number, ret:%d", size); + return size; + } + + /* Product number should only be 16 characters. Any more, + * and something could be wrong. Cap it at 16 to be safe + */ + if (size > 16) { + DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake"); + size = 16; + } + memcpy(adev->product_number, &buff[2], size); + adev->product_number[size] = '\0'; + + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + + if (size < 1) { + DRM_ERROR("Failed to read FRU product version, ret:%d", size); + return size; + } + + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + + if (size < 1) { + DRM_ERROR("Failed to read FRU serial number, ret:%d", size); + return size; + } + + /* Serial number should only be 16 characters. Any more, + * and something could be wrong. Cap it at 16 to be safe + */ + if (size > 16) { + DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake"); + size = 16; + } + memcpy(adev->serial, &buff[2], size); + adev->serial[size] = '\0'; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h new file mode 100644 index 000000000000..968115c97e33 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_PRODINFO_H__ +#define __AMDGPU_PRODINFO_H__ + +int amdgpu_fru_get_product_info(struct amdgpu_device *adev); + +#endif // __AMDGPU_PRODINFO_H__ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e42608115c99..245aec521388 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -162,16 +162,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_bo_list_entry vm_pd; struct list_head list, duplicates; + struct dma_fence *fence = NULL; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct amdgpu_bo_va *bo_va; - int r; + long r; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); tv.bo = &bo->tbo; - tv.num_shared = 1; + tv.num_shared = 2; list_add(&tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); @@ -179,28 +180,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); if (r) { dev_err(adev->dev, "leaking bo va because " - "we fail to reserve bo (%d)\n", r); + "we fail to reserve bo (%ld)\n", r); return; } bo_va = amdgpu_vm_bo_find(vm, bo); - if (bo_va && --bo_va->ref_count == 0) { - amdgpu_vm_bo_rmv(adev, bo_va); - - if (amdgpu_vm_ready(vm)) { - struct dma_fence *fence = NULL; + if (!bo_va || --bo_va->ref_count) + goto out_unlock; - r = amdgpu_vm_clear_freed(adev, vm, &fence); - if (unlikely(r)) { - dev_err(adev->dev, "failed to clear page " - "tables on GEM object close (%d)\n", r); - } + amdgpu_vm_bo_rmv(adev, bo_va); + if (!amdgpu_vm_ready(vm)) + goto out_unlock; - if (fence) { - amdgpu_bo_fence(bo, fence, true); - dma_fence_put(fence); - } - } + fence = dma_resv_get_excl(bo->tbo.base.resv); + if (fence) { + amdgpu_bo_fence(bo, fence, true); + fence = NULL; } + + r = amdgpu_vm_clear_freed(adev, vm, &fence); + if (r || !fence) + goto out_unlock; + + amdgpu_bo_fence(bo, fence, true); + dma_fence_put(fence); + +out_unlock: + if (unlikely(r < 0)) + dev_err(adev->dev, "failed to clear page " + "tables on GEM object close (%ld)\n", r); ttm_eu_backoff_reservation(&ticket, &list); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 6b9c9193cdfa..a721b0e0ff69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -304,10 +304,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, spin_lock_init(&kiq->ring_lock); - r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs); - if (r) - return r; - ring->adev = NULL; ring->ring_obj = NULL; ring->use_doorbell = true; @@ -318,9 +314,11 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, return r; ring->eop_gpu_addr = kiq->eop_gpu_addr; + ring->no_scheduler = true; sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); r = amdgpu_ring_init(adev, ring, 1024, - irq, AMDGPU_CP_KIQ_IRQ_DRIVER0); + irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, + AMDGPU_RING_PRIO_DEFAULT); if (r) dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); @@ -329,7 +327,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { - amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs); amdgpu_ring_fini(ring); } @@ -670,15 +667,20 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) { signed long r, cnt = 0; unsigned long flags; - uint32_t seq; + uint32_t seq, reg_val_offs = 0, value = 0; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_rreg); spin_lock_irqsave(&kiq->ring_lock, flags); + if (amdgpu_device_wb_get(adev, ®_val_offs)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + pr_err("critical bug! too many kiq readers\n"); + goto failed_kiq_read; + } amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_rreg(ring, reg); + amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); amdgpu_fence_emit_polling(ring, &seq); amdgpu_ring_commit(ring); spin_unlock_irqrestore(&kiq->ring_lock, flags); @@ -705,7 +707,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) if (cnt > MAX_KIQ_REG_TRY) goto failed_kiq_read; - return adev->wb.wb[kiq->reg_val_offs]; + mb(); + value = adev->wb.wb[reg_val_offs]; + amdgpu_device_wb_free(adev, reg_val_offs); + return value; failed_kiq_read: pr_err("failed to read reg:%x\n", reg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5825692d07e4..ee698f0246d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -103,7 +103,6 @@ struct amdgpu_kiq { struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; - uint32_t reg_val_offs; }; /* @@ -286,13 +285,8 @@ struct amdgpu_gfx { bool me_fw_write_wait; bool cp_fw_write_wait; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; - struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS]; - uint32_t num_gfx_sched; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; - struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; - struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS]; - uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; unsigned num_compute_rings; struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 5884ab590486..a8ca808f453b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -136,8 +136,8 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) /** * amdgpu_gmc_vram_location - try to find VRAM location * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information * @base: base address at which to put VRAM * * Function will try to place VRAM at base address provided @@ -165,8 +165,8 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, /** * amdgpu_gmc_gart_location - try to find GART location * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information * * Function will place try to place GART before or after VRAM. * @@ -207,8 +207,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) /** * amdgpu_gmc_agp_location - try to find AGP location - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information * * Function will place try to find a place for the AGP BAR in the MC address * space. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index ccbd7acfc4cb..1adaac972190 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -61,12 +61,14 @@ * Returns 0 on success, error on failure. */ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned size, struct amdgpu_ib *ib) + unsigned size, + enum amdgpu_ib_pool_type pool_type, + struct amdgpu_ib *ib) { int r; if (size) { - r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo[pool_type], &ib->sa_bo, size, 256); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); @@ -280,19 +282,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, */ int amdgpu_ib_pool_init(struct amdgpu_device *adev) { - int r; + int r, i; + unsigned size; if (adev->ib_pool_ready) { return 0; } - r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, - AMDGPU_IB_POOL_SIZE*64*1024, - AMDGPU_GPU_PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT); - if (r) { - return r; + for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { + if (i == AMDGPU_IB_POOL_DIRECT) + size = PAGE_SIZE * 2; + else + size = AMDGPU_IB_POOL_SIZE*64*1024; + r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo[i], + size, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT); + if (r) { + for (i--; i >= 0; i--) + amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]); + return r; + } } - adev->ib_pool_ready = true; return 0; @@ -308,8 +318,11 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev) */ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) { + int i; + if (adev->ib_pool_ready) { - amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); + for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) + amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]); adev->ib_pool_ready = false; } } @@ -406,7 +419,12 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); + seq_printf(m, "-------------------- NORMAL -------------------- \n"); + amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_NORMAL], m); + seq_printf(m, "---------------------- VM ---------------------- \n"); + amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_VM], m); + seq_printf(m, "-------------------- DIRECT--------------------- \n"); + amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_DIRECT], m); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 5ed4227f304b..0cc4c67f95f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -260,7 +260,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); if (nvec > 0) { adev->irq.msi_enabled = true; - dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n"); + dev_dbg(adev->dev, "using MSI/MSI-X.\n"); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4981e443a884..47207188c569 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -33,6 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); struct amdgpu_task_info ti; + struct amdgpu_device *adev = ring->adev; memset(&ti, 0, sizeof(struct amdgpu_task_info)); @@ -49,10 +50,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", ti.process_name, ti.tgid, ti.task_name, ti.pid); - if (amdgpu_device_should_recover_gpu(ring->adev)) + if (amdgpu_device_should_recover_gpu(ring->adev)) { amdgpu_device_gpu_recover(ring->adev, job); - else + } else { drm_sched_suspend_timeout(&ring->sched); + if (amdgpu_sriov_vf(adev)) + adev->virt.tdr_debug = true; + } } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -87,7 +91,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - struct amdgpu_job **job) + enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job) { int r; @@ -95,7 +100,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, if (r) return r; - r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); + r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); if (r) kfree(*job); @@ -140,7 +145,6 @@ void amdgpu_job_free(struct amdgpu_job *job) int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { - enum drm_sched_priority priority; int r; if (!f) @@ -152,7 +156,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); - priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 3f7b8433d179..d4262069d501 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -38,6 +38,7 @@ #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) struct amdgpu_fence; +enum amdgpu_ib_pool_type; struct amdgpu_job { struct drm_sched_job base; @@ -67,8 +68,7 @@ struct amdgpu_job { int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job, struct amdgpu_vm *vm); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - struct amdgpu_job **job); - + enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 5727f00afc8e..d31d65e6b039 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, const unsigned ib_size_dw = 16; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index bd9ef9cc86de..5131a0a1bc8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -43,8 +43,6 @@ struct amdgpu_jpeg { uint8_t num_jpeg_inst; struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; struct amdgpu_jpeg_reg internal; - struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES]; - uint32_t num_jpeg_sched; unsigned harvest_config; struct delayed_work idle_work; enum amd_powergating_state cur_state; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index fd1dc3236eca..ea7e72ecaefa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -183,12 +183,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) /* Call ACPI methods: require modeset init * but failure is not fatal */ - if (!r) { - acpi_status = amdgpu_acpi_init(adev); - if (acpi_status) - dev_dbg(&dev->pdev->dev, - "Error during ACPI methods call\n"); - } + + acpi_status = amdgpu_acpi_init(adev); + if (acpi_status) + dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); if (adev->runpm) { dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 919bd566ba3c..edaac242ff85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs { u32 *flags); void (*ih_control)(struct amdgpu_device *adev); void (*init_registers)(struct amdgpu_device *adev); - void (*detect_hw_virt)(struct amdgpu_device *adev); void (*remap_hdp_registers)(struct amdgpu_device *adev); void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index abe94a55ecad..49e2e43f2e4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -444,8 +444,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ret = smu_get_power_num_states(&adev->smu, &data); if (ret) return ret; - } else if (adev->powerplay.pp_funcs->get_pp_num_states) + } else if (adev->powerplay.pp_funcs->get_pp_num_states) { amdgpu_dpm_get_pp_num_states(adev, &data); + } else { + memset(&data, 0, sizeof(data)); + } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index deaa26808841..730f98aab11b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -37,11 +37,11 @@ #include "amdgpu_ras.h" -static void psp_set_funcs(struct amdgpu_device *adev); - static int psp_sysfs_init(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev); +static int psp_load_smu_fw(struct psp_context *psp); + /* * Due to DF Cstate management centralized to PMFW, the firmware * loading sequence will be updated as below: @@ -80,8 +80,6 @@ static int psp_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - psp_set_funcs(adev); - switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: @@ -201,6 +199,7 @@ psp_cmd_submit_buf(struct psp_context *psp, int index; int timeout = 2000; bool ras_intr = false; + bool skip_unsupport = false; mutex_lock(&psp->mutex); @@ -232,6 +231,9 @@ psp_cmd_submit_buf(struct psp_context *psp, amdgpu_asic_invalidate_hdp(psp->adev, NULL); } + /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */ + skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev); + /* In some cases, psp response status is not 0 even there is no * problem while the command is submitted. Some version of PSP FW * doesn't write 0 to that field. @@ -239,7 +241,7 @@ psp_cmd_submit_buf(struct psp_context *psp, * during psp initialization to avoid breaking hw_init and it doesn't * return -EINVAL. */ - if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { + if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); @@ -268,7 +270,7 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp, struct psp_gfx_cmd_resp *cmd, uint64_t tmr_mc, uint32_t size) { - if (psp_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(psp->adev)) cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; else cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; @@ -884,6 +886,7 @@ static int psp_hdcp_load(struct psp_context *psp) if (!ret) { psp->hdcp_context.hdcp_initialized = true; psp->hdcp_context.session_id = cmd->resp.session_id; + mutex_init(&psp->hdcp_context.mutex); } kfree(cmd); @@ -1029,6 +1032,7 @@ static int psp_dtm_load(struct psp_context *psp) if (!ret) { psp->dtm_context.dtm_initialized = true; psp->dtm_context.session_id = cmd->resp.session_id; + mutex_init(&psp->dtm_context.mutex); } kfree(cmd); @@ -1169,16 +1173,20 @@ static int psp_hw_start(struct psp_context *psp) } /* - * For those ASICs with DF Cstate management centralized + * For ASICs with DF Cstate management centralized * to PMFW, TMR setup should be performed after PMFW * loaded and before other non-psp firmware loaded. */ - if (!psp->pmfw_centralized_cstate_management) { - ret = psp_tmr_load(psp); - if (ret) { - DRM_ERROR("PSP load tmr failed!\n"); + if (psp->pmfw_centralized_cstate_management) { + ret = psp_load_smu_fw(psp); + if (ret) return ret; - } + } + + ret = psp_tmr_load(psp); + if (ret) { + DRM_ERROR("PSP load tmr failed!\n"); + return ret; } return 0; @@ -1355,7 +1363,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, } static int psp_execute_np_fw_load(struct psp_context *psp, - struct amdgpu_firmware_info *ucode) + struct amdgpu_firmware_info *ucode) { int ret = 0; @@ -1369,64 +1377,95 @@ static int psp_execute_np_fw_load(struct psp_context *psp, return ret; } +static int psp_load_smu_fw(struct psp_context *psp) +{ + int ret; + struct amdgpu_device* adev = psp->adev; + struct amdgpu_firmware_info *ucode = + &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; + + if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) + return 0; + + + if (adev->in_gpu_reset) { + ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); + if (ret) { + DRM_WARN("Failed to set MP1 state prepare for reload\n"); + } + } + + ret = psp_execute_np_fw_load(psp, ucode); + + if (ret) + DRM_ERROR("PSP load smu failed!\n"); + + return ret; +} + +static bool fw_load_skip_check(struct psp_context *psp, + struct amdgpu_firmware_info *ucode) +{ + if (!ucode->fw) + return true; + + if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && + (psp_smu_reload_quirk(psp) || + psp->autoload_supported || + psp->pmfw_centralized_cstate_management)) + return true; + + if (amdgpu_sriov_vf(psp->adev) && + (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) + /*skip ucode loading in SRIOV VF */ + return true; + + if (psp->autoload_supported && + (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || + ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) + /* skip mec JT when autoload is enabled */ + return true; + + return false; +} + static int psp_np_fw_load(struct psp_context *psp) { int i, ret; struct amdgpu_firmware_info *ucode; struct amdgpu_device* adev = psp->adev; - if (psp->autoload_supported || - psp->pmfw_centralized_cstate_management) { - ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; - if (!ucode->fw || amdgpu_sriov_vf(adev)) - goto out; - - ret = psp_execute_np_fw_load(psp, ucode); + if (psp->autoload_supported && + !psp->pmfw_centralized_cstate_management) { + ret = psp_load_smu_fw(psp); if (ret) return ret; } - if (psp->pmfw_centralized_cstate_management) { - ret = psp_tmr_load(psp); - if (ret) { - DRM_ERROR("PSP load tmr failed!\n"); - return ret; - } - } - -out: for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; - if (!ucode->fw) - continue; if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && - (psp_smu_reload_quirk(psp) || - psp->autoload_supported || - psp->pmfw_centralized_cstate_management)) - continue; - - if (amdgpu_sriov_vf(adev) && - (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM - || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) - /*skip ucode loading in SRIOV VF */ + !fw_load_skip_check(psp, ucode)) { + ret = psp_load_smu_fw(psp); + if (ret) + return ret; continue; + } - if (psp->autoload_supported && - (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) - /* skip mec JT when autoload is enabled */ + if (fw_load_skip_check(psp, ucode)) continue; psp_print_fw_hdr(psp, ucode); @@ -1444,11 +1483,6 @@ out: return ret; } } -#if 0 - /* check if firmware loaded sucessfully */ - if (!amdgpu_psp_check_fw_loading_status(adev, i)) - return -EINVAL; -#endif } return 0; @@ -1806,19 +1840,110 @@ int psp_ring_cmd_submit(struct psp_context *psp, return 0; } -static bool psp_check_fw_loading_status(struct amdgpu_device *adev, - enum AMDGPU_UCODE_ID ucode_type) +int psp_init_asd_microcode(struct psp_context *psp, + const char *chip_name) +{ + struct amdgpu_device *adev = psp->adev; + char fw_name[30]; + const struct psp_firmware_header_v1_0 *asd_hdr; + int err = 0; + + if (!chip_name) { + dev_err(adev->dev, "invalid chip name for asd microcode\n"); + return -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); + err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.asd_fw); + if (err) + goto out; + + asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; + adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); + adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); + adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); + adev->psp.asd_start_addr = (uint8_t *)asd_hdr + + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); + return 0; +out: + dev_err(adev->dev, "fail to initialize asd microcode\n"); + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; + return err; +} + +int psp_init_sos_microcode(struct psp_context *psp, + const char *chip_name) { - struct amdgpu_firmware_info *ucode = NULL; + struct amdgpu_device *adev = psp->adev; + char fw_name[30]; + const struct psp_firmware_header_v1_0 *sos_hdr; + const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; + const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; + int err = 0; + + if (!chip_name) { + dev_err(adev->dev, "invalid chip name for sos microcode\n"); + return -EINVAL; + } - if (!adev->firmware.fw_size) - return false; + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); + err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.sos_fw); + if (err) + goto out; + + sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; + amdgpu_ucode_print_psp_hdr(&sos_hdr->header); + + switch (sos_hdr->header.header_version_major) { + case 1: + adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); + adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); + adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); + adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); + adev->psp.sys_start_addr = (uint8_t *)sos_hdr + + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); + adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + + le32_to_cpu(sos_hdr->sos_offset_bytes); + if (sos_hdr->header.header_version_minor == 1) { + sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; + adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); + adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + + le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); + adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + + le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); + } + if (sos_hdr->header.header_version_minor == 2) { + sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); + adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + + le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); + } + break; + default: + dev_err(adev->dev, + "unsupported psp sos firmware\n"); + err = -EINVAL; + goto out; + } - ucode = &adev->firmware.ucode[ucode_type]; - if (!ucode->fw || !ucode->ucode_size) - return false; + return 0; +out: + dev_err(adev->dev, + "failed to init sos firmware\n"); + release_firmware(adev->psp.sos_fw); + adev->psp.sos_fw = NULL; - return psp_compare_sram_data(&adev->psp, ucode, ucode_type); + return err; } static int psp_set_clockgating_state(void *handle, @@ -1957,16 +2082,6 @@ static void psp_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); } -static const struct amdgpu_psp_funcs psp_funcs = { - .check_fw_loading_status = psp_check_fw_loading_status, -}; - -static void psp_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->firmware.funcs) - adev->firmware.funcs = &psp_funcs; -} - const struct amdgpu_ip_block_version psp_v3_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 297435c0c7c1..7fcd63d5432c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -93,9 +93,6 @@ struct psp_funcs enum psp_ring_type ring_type); int (*ring_destroy)(struct psp_context *psp, enum psp_ring_type ring_type); - bool (*compare_sram_data)(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type); bool (*smu_reload_quirk)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp); int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id); @@ -104,7 +101,6 @@ struct psp_funcs struct psp_xgmi_topology_info *topology); int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology); - bool (*support_vmr_ring)(struct psp_context *psp); int (*ras_trigger_error)(struct psp_context *psp, struct ta_ras_trigger_error_input *info); int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr); @@ -161,6 +157,7 @@ struct psp_hdcp_context { struct amdgpu_bo *hdcp_shared_bo; uint64_t hdcp_shared_mc_addr; void *hdcp_shared_buf; + struct mutex mutex; }; struct psp_dtm_context { @@ -169,6 +166,7 @@ struct psp_dtm_context { struct amdgpu_bo *dtm_shared_bo; uint64_t dtm_shared_mc_addr; void *dtm_shared_buf; + struct mutex mutex; }; #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 @@ -306,8 +304,6 @@ struct amdgpu_psp_funcs { #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type)) #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type)) #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type))) -#define psp_compare_sram_data(psp, ucode, type) \ - (psp)->funcs->compare_sram_data((psp), (ucode), (type)) #define psp_init_microcode(psp) \ ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0) #define psp_bootloader_load_kdb(psp) \ @@ -318,8 +314,6 @@ struct amdgpu_psp_funcs { ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) -#define psp_support_vmr_ring(psp) \ - ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false) #define psp_mode1_reset(psp) \ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) #define psp_xgmi_get_node_id(psp, node_id) \ @@ -341,8 +335,6 @@ struct amdgpu_psp_funcs { #define psp_mem_training(psp, ops) \ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0) -#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) - #define psp_ras_trigger_error(psp, info) \ ((psp)->funcs->ras_trigger_error ? \ (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL) @@ -393,4 +385,8 @@ int psp_ring_cmd_submit(struct psp_context *psp, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index); +int psp_init_asd_microcode(struct psp_context *psp, + const char *chip_name); +int psp_init_sos_microcode(struct psp_context *psp, + const char *chip_name); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index ab379b44679c..8b14aee370c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -80,6 +80,20 @@ atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, uint64_t addr); +void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) +{ + if (adev && amdgpu_ras_get_context(adev)) + amdgpu_ras_get_context(adev)->error_query_ready = ready; +} + +bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) +{ + if (adev && amdgpu_ras_get_context(adev)) + return amdgpu_ras_get_context(adev)->error_query_ready; + + return false; +} + static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -281,8 +295,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * struct ras_debug_if data; int ret = 0; - if (amdgpu_ras_intr_triggered()) { - DRM_WARN("RAS WARN: error injection currently inaccessible\n"); + if (!amdgpu_ras_get_error_query_ready(adev)) { + dev_warn(adev->dev, "RAS WARN: error injection " + "currently inaccessible\n"); return size; } @@ -310,7 +325,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * /* umc ce/ue error injection for a bad page is not allowed */ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && amdgpu_ras_check_bad_page(adev, data.inject.address)) { - DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", + dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked " + "as bad before error injection!\n", data.inject.address); break; } @@ -399,7 +415,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, .head = obj->head, }; - if (amdgpu_ras_intr_triggered()) + if (!amdgpu_ras_get_error_query_ready(obj->adev)) return snprintf(buf, PAGE_SIZE, "Query currently inaccessible\n"); @@ -576,7 +592,8 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, if (!amdgpu_ras_intr_triggered()) { ret = psp_ras_enable_features(&adev->psp, &info, enable); if (ret) { - DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", + dev_err(adev->dev, "RAS ERROR: %s %s feature " + "failed ret %d\n", enable ? "enable":"disable", ras_block_str(head->block), ret); @@ -618,7 +635,8 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (ret == -EINVAL) { ret = __amdgpu_ras_feature_enable(adev, head, 1); if (!ret) - DRM_INFO("RAS INFO: %s setup object\n", + dev_info(adev->dev, + "RAS INFO: %s setup object\n", ras_block_str(head->block)); } } else { @@ -744,12 +762,17 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, info->ce_count = obj->err_data.ce_count; if (err_data.ce_count) { - dev_info(adev->dev, "%ld correctable errors detected in %s block\n", - obj->err_data.ce_count, ras_block_str(info->head.block)); + dev_info(adev->dev, "%ld correctable hardware errors " + "detected in %s block, no user " + "action is needed.\n", + obj->err_data.ce_count, + ras_block_str(info->head.block)); } if (err_data.ue_count) { - dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", - obj->err_data.ue_count, ras_block_str(info->head.block)); + dev_info(adev->dev, "%ld uncorrectable hardware errors " + "detected in %s block\n", + obj->err_data.ue_count, + ras_block_str(info->head.block)); } return 0; @@ -793,13 +816,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, ret = psp_ras_trigger_error(&adev->psp, &block_info); break; default: - DRM_INFO("%s error injection is not supported yet\n", + dev_info(adev->dev, "%s error injection is not supported yet\n", ras_block_str(info->head.block)); ret = -EINVAL; } if (ret) - DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", + dev_err(adev->dev, "RAS ERROR: inject %s error failed ret %d\n", ras_block_str(info->head.block), ret); @@ -1430,9 +1453,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false); /* Build list of devices to query RAS related errors */ - if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) device_list_handle = &hive->device_list; - } else { + else { + INIT_LIST_HEAD(&device_list); list_add_tail(&adev->gmc.xgmi.head, &device_list); device_list_handle = &device_list; } @@ -1535,7 +1559,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) &data->bps[control->num_recs], true, save_count)) { - DRM_ERROR("Failed to save EEPROM table data!"); + dev_err(adev->dev, "Failed to save EEPROM table data!"); return -EIO; } @@ -1563,7 +1587,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) if (amdgpu_ras_eeprom_process_recods(control, bps, false, control->num_recs)) { - DRM_ERROR("Failed to load EEPROM table records!"); + dev_err(adev->dev, "Failed to load EEPROM table records!"); ret = -EIO; goto out; } @@ -1637,7 +1661,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) AMDGPU_GPU_PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &bo, NULL)) - DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); + dev_warn(adev->dev, "RAS WARN: reserve vram for " + "retired page %llx fail\n", bp); data->bps_bo[i] = bo; data->last_reserved = i + 1; @@ -1725,7 +1750,7 @@ free: kfree(*data); con->eh_data = NULL; out: - DRM_WARN("Failed to initialize ras recovery!\n"); + dev_warn(adev->dev, "Failed to initialize ras recovery!\n"); return ret; } @@ -1787,18 +1812,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, return; if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { - DRM_INFO("HBM ECC is active.\n"); + dev_info(adev->dev, "HBM ECC is active.\n"); *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC | 1 << AMDGPU_RAS_BLOCK__DF); } else - DRM_INFO("HBM ECC is not presented.\n"); + dev_info(adev->dev, "HBM ECC is not presented.\n"); if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { - DRM_INFO("SRAM ECC is active.\n"); + dev_info(adev->dev, "SRAM ECC is active.\n"); *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 1 << AMDGPU_RAS_BLOCK__DF); } else - DRM_INFO("SRAM ECC is not presented.\n"); + dev_info(adev->dev, "SRAM ECC is not presented.\n"); /* hw_supported needs to be aligned with RAS block mask. */ *hw_supported &= AMDGPU_RAS_BLOCK_MASK; @@ -1855,7 +1880,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (amdgpu_ras_fs_init(adev)) goto fs_out; - DRM_INFO("RAS INFO: ras initialized successfully, " + dev_info(adev->dev, "RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); return 0; @@ -2037,7 +2062,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) return; if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { - DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); + dev_info(adev->dev, "uncorrectable hardware error" + "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); amdgpu_ras_reset_gpu(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 55c3eceb390d..e7df5d8429f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -334,6 +334,8 @@ struct amdgpu_ras { uint32_t flags; bool reboot; struct amdgpu_ras_eeprom_control eeprom_control; + + bool error_query_ready; }; struct ras_fs_data { @@ -629,4 +631,6 @@ static inline void amdgpu_ras_intr_cleared(void) void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); +void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index a7e1d0425ed0..13ea8ebc421c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -162,11 +162,13 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) * Returns 0 on success, error on failure. */ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, - unsigned max_dw, struct amdgpu_irq_src *irq_src, - unsigned irq_type) + unsigned int max_dw, struct amdgpu_irq_src *irq_src, + unsigned int irq_type, unsigned int hw_prio) { int r, i; int sched_hw_submission = amdgpu_sched_hw_submission; + u32 *num_sched; + u32 hw_ip; /* Set the hw submission limit higher for KIQ because * it's used for a number of gfx/compute tasks by both @@ -258,6 +260,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->priority = DRM_SCHED_PRIORITY_NORMAL; mutex_init(&ring->priority_mutex); + if (!ring->no_scheduler) { + hw_ip = ring->funcs->type; + num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds; + adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] = + &ring->sched; + } + for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) atomic_set(&ring->num_jobs[i], 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 9a443013d70d..e1d894f0d654 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -30,11 +30,15 @@ /* max number of rings */ #define AMDGPU_MAX_RINGS 28 +#define AMDGPU_MAX_HWIP_RINGS 8 #define AMDGPU_MAX_GFX_RINGS 2 #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 #define AMDGPU_MAX_UVD_ENC_RINGS 2 +#define AMDGPU_RING_PRIO_DEFAULT 1 +#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX + /* some special values for the owner field */ #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) #define AMDGPU_FENCE_OWNER_VM ((void *)1ul) @@ -47,16 +51,16 @@ #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) enum amdgpu_ring_type { - AMDGPU_RING_TYPE_GFX, - AMDGPU_RING_TYPE_COMPUTE, - AMDGPU_RING_TYPE_SDMA, - AMDGPU_RING_TYPE_UVD, - AMDGPU_RING_TYPE_VCE, - AMDGPU_RING_TYPE_KIQ, - AMDGPU_RING_TYPE_UVD_ENC, - AMDGPU_RING_TYPE_VCN_DEC, - AMDGPU_RING_TYPE_VCN_ENC, - AMDGPU_RING_TYPE_VCN_JPEG + AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX, + AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE, + AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA, + AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD, + AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE, + AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC, + AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC, + AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC, + AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG, + AMDGPU_RING_TYPE_KIQ }; struct amdgpu_device; @@ -65,6 +69,11 @@ struct amdgpu_ib; struct amdgpu_cs_parser; struct amdgpu_job; +struct amdgpu_sched { + u32 num_scheds; + struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS]; +}; + /* * Fences. */ @@ -159,7 +168,8 @@ struct amdgpu_ring_funcs { void (*end_use)(struct amdgpu_ring *ring); void (*emit_switch_buffer) (struct amdgpu_ring *ring); void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); - void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); + void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs); void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask); @@ -214,12 +224,12 @@ struct amdgpu_ring { unsigned vm_inv_eng; struct dma_fence *vmid_wait; bool has_compute_vm_bug; + bool no_scheduler; atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; struct mutex priority_mutex; /* protected by priority_mutex */ int priority; - bool has_high_prio; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; @@ -241,7 +251,7 @@ struct amdgpu_ring { #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) -#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) +#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o)) #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) @@ -257,8 +267,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, - unsigned ring_size, struct amdgpu_irq_src *irq_src, - unsigned irq_type); + unsigned int ring_size, struct amdgpu_irq_src *irq_src, + unsigned int irq_type, unsigned int prio); void amdgpu_ring_fini(struct amdgpu_ring *ring); void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, uint32_t reg0, uint32_t val0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 4b352206354b..2a152516504a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -61,8 +61,6 @@ struct amdgpu_sdma_ras_funcs { struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; - struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES]; - uint32_t num_sdma_sched; struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; struct amdgpu_irq_src ecc_irq; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 63e734a125fb..5da20fc166d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -35,7 +35,7 @@ #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) -TRACE_EVENT(amdgpu_mm_rreg, +TRACE_EVENT(amdgpu_device_rreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), TP_STRUCT__entry( @@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg, (unsigned long)__entry->value) ); -TRACE_EVENT(amdgpu_mm_wreg, +TRACE_EVENT(amdgpu_device_wreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), TP_STRUCT__entry( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6309ff72bd78..6880c023ca8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2042,7 +2042,8 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, + AMDGPU_IB_POOL_NORMAL, &job); if (r) return r; @@ -2101,7 +2102,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, + direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job); if (r) return r; @@ -2190,7 +2192,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, /* for IB padding */ num_dw += 64; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 9ef312428231..65bb25e31d45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -403,8 +403,8 @@ FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version); FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version); -FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version); -FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version); +FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version); +FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version); FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version); FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 9dd51f0d2c11..af1b1ccf613c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -110,7 +110,8 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, * even NOMEM error is encountered */ if(!err_data->err_addr) - DRM_WARN("Failed to alloc memory for umc error address record!\n"); + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); /* umc query_ras_error_address is also responsible for clearing * error status @@ -120,10 +121,14 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, /* only uncorrectable error needs gpu reset */ if (err_data->ue_count) { + dev_info(adev->dev, "%ld uncorrectable hardware errors " + "detected in UMC block\n", + err_data->ue_count); + if (err_data->err_addr_cnt && amdgpu_ras_add_bad_pages(adev, err_data->err_addr, err_data->err_addr_cnt)) - DRM_WARN("Failed to add ras bad page!\n"); + dev_warn(adev->dev, "Failed to add ras bad page!\n"); amdgpu_ras_reset_gpu(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 5fd32ad1c575..550282d9c1fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err; } - r = amdgpu_job_alloc_with_ib(adev, 64, &job); + r = amdgpu_job_alloc_with_ib(adev, 64, + direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 59ddba137946..d090455282e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -524,7 +525,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence *f = NULL; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index a41272fbcba2..2de99b441601 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -56,13 +56,17 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_sw_init(struct amdgpu_device *adev) { - unsigned long bo_size; + unsigned long bo_size, fw_shared_bo_size; const char *fw_name; const struct common_firmware_header *hdr; unsigned char fw_check; int i, r; INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); + mutex_init(&adev->vcn.vcn_pg_lock); + atomic_set(&adev->vcn.total_submission_cnt, 0); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); switch (adev->asic_type) { case CHIP_RAVEN: @@ -178,6 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) return r; } } + + r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo, + &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr); + if (r) { + dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r); + return r; + } + + fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo); + adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL); } return 0; @@ -192,6 +207,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; + + kvfree(adev->vcn.inst[j].saved_shm_bo); + amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo, + &adev->vcn.inst[j].fw_shared_gpu_addr, + (void **)&adev->vcn.inst[j].fw_shared_cpu_addr); + if (adev->vcn.indirect_sram) { amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, &adev->vcn.inst[j].dpg_sram_gpu_addr, @@ -210,6 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) } release_firmware(adev->vcn.fw); + mutex_destroy(&adev->vcn.vcn_pg_lock); return 0; } @@ -236,6 +258,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) return -ENOMEM; memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + + if (adev->vcn.inst[i].fw_shared_bo == NULL) + return 0; + + if (!adev->vcn.inst[i].saved_shm_bo) + return -ENOMEM; + + size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo); + ptr = adev->vcn.inst[i].fw_shared_cpu_addr; + + memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size); } return 0; } @@ -273,6 +306,17 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) } memset_io(ptr, 0, size); } + + if (adev->vcn.inst[i].fw_shared_bo == NULL) + return -EINVAL; + + size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo); + ptr = adev->vcn.inst[i].fw_shared_cpu_addr; + + if (adev->vcn.inst[i].saved_shm_bo != NULL) + memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size); + else + memset_io(ptr, 0, size); } return 0; } @@ -295,7 +339,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; - if (fence[j]) + if (fence[j] || + unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; @@ -307,8 +352,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) fences += fence[j]; } - if (fences == 0) { - amdgpu_gfx_off_ctrl(adev, true); + if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); } else { @@ -319,36 +363,46 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); - if (set_clocks) { - amdgpu_gfx_off_ctrl(adev, false); - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, - AMD_PG_STATE_UNGATE); - } + atomic_inc(&adev->vcn.total_submission_cnt); + cancel_delayed_work_sync(&adev->vcn.idle_work); + + mutex_lock(&adev->vcn.vcn_pg_lock); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; - unsigned int fences = 0; - unsigned int i; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); - } - if (fences) + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { + atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); new_state.fw_based = VCN_DPG_STATE__PAUSE; - else - new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + } else { + unsigned int fences = 0; + unsigned int i; - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) - new_state.fw_based = VCN_DPG_STATE__PAUSE; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); + + if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + } adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); } + mutex_unlock(&adev->vcn.vcn_pg_lock); } void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) { + if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && + ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); + + atomic_dec(&ring->adev->vcn.total_submission_cnt); + schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); } @@ -390,7 +444,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(adev, 64, &job); + r = amdgpu_job_alloc_with_ib(adev, 64, + AMDGPU_IB_POOL_DIRECT, &job); if (r) goto err; @@ -557,7 +612,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -610,7 +666,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 6fe057329de2..90aa12b22725 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -132,6 +132,13 @@ } \ } while (0) +#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) + +enum fw_queue_mode { + FW_QUEUE_RING_RESET = 1, + FW_QUEUE_DPG_HOLD_OFF = 2, +}; + enum engine_status_constants { UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0, @@ -179,10 +186,15 @@ struct amdgpu_vcn_inst { struct amdgpu_irq_src irq; struct amdgpu_vcn_reg external; struct amdgpu_bo *dpg_sram_bo; + struct amdgpu_bo *fw_shared_bo; struct dpg_pause_state pause_state; void *dpg_sram_cpu_addr; uint64_t dpg_sram_gpu_addr; uint32_t *dpg_sram_curr_addr; + atomic_t dpg_enc_submission_cnt; + void *fw_shared_cpu_addr; + uint64_t fw_shared_gpu_addr; + void *saved_shm_bo; }; struct amdgpu_vcn { @@ -196,16 +208,28 @@ struct amdgpu_vcn { uint8_t num_vcn_inst; struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; struct amdgpu_vcn_reg internal; - struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS]; - struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES]; - uint32_t num_vcn_enc_sched; - uint32_t num_vcn_dec_sched; + struct mutex vcn_pg_lock; + atomic_t total_submission_cnt; unsigned harvest_config; int (*pause_dpg_mode)(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); }; +struct amdgpu_fw_shared_multi_queue { + uint8_t decode_queue_mode; + uint8_t encode_generalpurpose_queue_mode; + uint8_t encode_lowlatency_queue_mode; + uint8_t encode_realtime_queue_mode; + uint8_t padding[4]; +}; + +struct amdgpu_fw_shared { + uint32_t present_flag_0; + uint8_t pad[53]; + struct amdgpu_fw_shared_multi_queue multi_queue; +} __attribute__((__packed__)); + int amdgpu_vcn_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_sw_fini(struct amdgpu_device *adev); int amdgpu_vcn_suspend(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index adc813cde8e2..8c10084f44ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -38,7 +38,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) void amdgpu_virt_init_setting(struct amdgpu_device *adev) { /* enable virtual display */ - adev->mode_info.num_crtc = 1; + if (adev->mode_info.num_crtc == 0) + adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; @@ -152,6 +153,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) return 0; } +void amdgpu_virt_request_init_data(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + + if (virt->ops && virt->ops->req_init_data) + virt->ops->req_init_data(adev); + + if (adev->virt.req_init_data_ver > 0) + DRM_INFO("host supports REQ_INIT_DATA handshake\n"); + else + DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); +} + /** * amdgpu_virt_wait_reset() - wait for reset gpu completed * @amdgpu: amdgpu device. @@ -287,3 +301,66 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) } } } + +void amdgpu_detect_virtualization(struct amdgpu_device *adev) +{ + uint32_t reg; + + switch (adev->asic_type) { + case CHIP_TONGA: + case CHIP_FIJI: + reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + break; + case CHIP_VEGA10: + case CHIP_VEGA20: + case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_ARCTURUS: + reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER); + break; + default: /* other chip doesn't support SRIOV */ + reg = 0; + break; + } + + if (reg & 1) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; + + if (reg & 0x80000000) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + + if (!reg) { + if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; + } +} + +bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) +{ + return amdgpu_sriov_is_debug(adev) ? true : false; +} + +bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev) +{ + return amdgpu_sriov_is_normal(adev) ? true : false; +} + +int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev) +{ + if (!amdgpu_sriov_vf(adev) || + amdgpu_virt_access_debugfs_is_kiq(adev)) + return 0; + + if (amdgpu_virt_access_debugfs_is_mmio(adev)) + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + else + return -EPERM; + + return 0; +} + +void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f0128f745bd2..de27308802c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -30,6 +30,11 @@ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +/* all asic after AI use this offset */ +#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 +/* tonga/fiji use this offset */ +#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 + struct amdgpu_mm_table { struct amdgpu_bo *bo; uint32_t *cpu_addr; @@ -54,6 +59,7 @@ struct amdgpu_vf_error_buffer { struct amdgpu_virt_ops { int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); + int (*req_init_data)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); @@ -83,6 +89,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, /* VRAM LOST by GIM */ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, + /* MM bandwidth */ + AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, /* PP ONE VF MODE in GIM */ AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), }; @@ -256,6 +264,8 @@ struct amdgpu_virt { struct amdgpu_virt_fw_reserve fw_reserve; uint32_t gim_feature; uint32_t reg_access_mode; + int req_init_data_ver; + bool tdr_debug; }; #define amdgpu_sriov_enabled(adev) \ @@ -287,6 +297,10 @@ static inline bool is_virtual_machine(void) #define amdgpu_sriov_is_pp_one_vf(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) +#define amdgpu_sriov_is_debug(adev) \ + ((!adev->in_gpu_reset) && adev->virt.tdr_debug) +#define amdgpu_sriov_is_normal(adev) \ + ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug)) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); @@ -296,6 +310,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); +void amdgpu_virt_request_init_data(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); @@ -303,4 +318,9 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, unsigned int key, unsigned int chksum); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_detect_virtualization(struct amdgpu_device *adev); + +bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); +int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); +void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6d9252a27916..bcdaf5204d05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -82,7 +82,7 @@ struct amdgpu_prt_cb { struct dma_fence_cb cb; }; -/** +/* * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS * happens while holding this lock anywhere to prevent deadlocks when * an MMU notifier runs in reclaim-FS context. @@ -2124,11 +2124,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { bo_va->is_xgmi = true; - mutex_lock(&adev->vm_manager.lock_pstate); /* Power up XGMI if it can be potentially used */ - if (++adev->vm_manager.xgmi_map_counter == 1) - amdgpu_xgmi_set_pstate(adev, 1); - mutex_unlock(&adev->vm_manager.lock_pstate); + amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); } return bo_va; @@ -2551,12 +2548,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, dma_fence_put(bo_va->last_pt_update); - if (bo && bo_va->is_xgmi) { - mutex_lock(&adev->vm_manager.lock_pstate); - if (--adev->vm_manager.xgmi_map_counter == 0) - amdgpu_xgmi_set_pstate(adev, 0); - mutex_unlock(&adev->vm_manager.lock_pstate); - } + if (bo && bo_va->is_xgmi) + amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN); kfree(bo_va); } @@ -3166,9 +3159,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) idr_init(&adev->vm_manager.pasid_idr); spin_lock_init(&adev->vm_manager.pasid_lock); - - adev->vm_manager.xgmi_map_counter = 0; - mutex_init(&adev->vm_manager.lock_pstate); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 06fe30e1492d..b13c14d6b820 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -349,10 +349,6 @@ struct amdgpu_vm_manager { */ struct idr pasid_idr; spinlock_t pasid_lock; - - /* counter of mapped memory through xgmi */ - uint32_t xgmi_map_counter; - struct mutex lock_pstate; }; #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index cf96c335b258..fbd451f3559a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -64,7 +64,8 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; int r; - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job); + r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, + p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job); if (r) return r; @@ -223,7 +224,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job); + r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, + p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 95b3327168ac..54d8a3e7e75c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -373,7 +373,13 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo if (lock) mutex_lock(&tmp->hive_lock); - tmp->pstate = -1; + tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; + tmp->hi_req_gpu = NULL; + /* + * hive pstate on boot is high in vega20 so we have to go to low + * pstate on after boot. + */ + tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE; mutex_unlock(&xgmi_mutex); return tmp; @@ -383,50 +389,51 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) { int ret = 0; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); - struct amdgpu_device *tmp_adev; - bool update_hive_pstate = true; - bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20; + struct amdgpu_device *request_adev = hive->hi_req_gpu ? + hive->hi_req_gpu : adev; + bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20; + bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN; - if (!hive) + /* fw bug so temporarily disable pstate switching */ + if (!hive || adev->asic_type == CHIP_VEGA20) return 0; mutex_lock(&hive->hive_lock); - if (hive->pstate == pstate) { - adev->pstate = is_high_pstate ? pstate : adev->pstate; + if (is_hi_req) + hive->hi_req_count++; + else + hive->hi_req_count--; + + /* + * Vega20 only needs single peer to request pstate high for the hive to + * go high but all peers must request pstate low for the hive to go low + */ + if (hive->pstate == pstate || + (!is_hi_req && hive->hi_req_count && !init_low)) goto out; - } - dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); + dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate); - ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate); + ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate); if (ret) { - dev_err(adev->dev, + dev_err(request_adev->dev, "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", - adev->gmc.xgmi.node_id, - adev->gmc.xgmi.hive_id, ret); + request_adev->gmc.xgmi.node_id, + request_adev->gmc.xgmi.hive_id, ret); goto out; } - /* Update device pstate */ - adev->pstate = pstate; - - /* - * Update the hive pstate only all devices of the hive - * are in the same pstate - */ - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - if (tmp_adev->pstate != adev->pstate) { - update_hive_pstate = false; - break; - } - } - if (update_hive_pstate || is_high_pstate) + if (init_low) + hive->pstate = hive->hi_req_count ? + hive->pstate : AMDGPU_XGMI_PSTATE_MIN; + else { hive->pstate = pstate; - + hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ? + adev : NULL; + } out: mutex_unlock(&hive->hive_lock); - return ret; } @@ -507,9 +514,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) goto exit; } - /* Set default device pstate */ - adev->pstate = -1; - top_info = &adev->psp.xgmi_context.top_info; list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); @@ -604,6 +608,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) adev->gmc.xgmi.num_physical_nodes == 0) return 0; + amdgpu_xgmi_reset_ras_error_count(adev); + if (!adev->gmc.xgmi.ras_if) { adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); if (!adev->gmc.xgmi.ras_if) @@ -668,6 +674,32 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, return addr + dram_base_addr; } +static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg) +{ + WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF); + WREG32_PCIE(pcs_status_reg, 0); +} + +void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) + pcs_clear_status(adev, + xgmi_pcs_err_status_reg_arct[i]); + break; + case CHIP_VEGA20: + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) + pcs_clear_status(adev, + xgmi_pcs_err_status_reg_vg20[i]); + break; + default: + break; + } +} + static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, uint32_t value, uint32_t *ue_count, @@ -758,6 +790,8 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, break; } + amdgpu_xgmi_reset_ras_error_count(adev); + err_data->ue_count += ue_cnt; err_data->ce_count += ce_cnt; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 4a92067fe595..6999eab16a72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -25,6 +25,7 @@ #include <drm/task_barrier.h> #include "amdgpu_psp.h" + struct amdgpu_hive_info { uint64_t hive_id; struct list_head device_list; @@ -33,8 +34,14 @@ struct amdgpu_hive_info { struct kobject *kobj; struct device_attribute dev_attr; struct amdgpu_device *adev; - int pstate; /*0 -- low , 1 -- high , -1 unknown*/ + int hi_req_count; + struct amdgpu_device *hi_req_gpu; struct task_barrier tb; + enum { + AMDGPU_XGMI_PSTATE_MIN, + AMDGPU_XGMI_PSTATE_MAX_VEGA20, + AMDGPU_XGMI_PSTATE_UNKNOWN + } pstate; }; struct amdgpu_pcs_ras_field { @@ -56,6 +63,7 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr); int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); +void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index cae426c7c086..4cfc786699c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -54,6 +54,8 @@ #define PLL_INDEX 2 #define PLL_DATA 3 +#define ATOM_CMD_TIMEOUT_SEC 20 + typedef struct { struct atom_context *ctx; uint32_t *ps, *ws; @@ -744,8 +746,9 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 10000)) { - DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) { + DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n", + ATOM_CMD_TIMEOUT_SEC); ctx->abort = true; } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 62635e58e45e..fe306d0f73f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1809,12 +1809,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } -static void cik_detect_hw_virtualization(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { @@ -2177,8 +2171,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block = int cik_set_ip_blocks(struct amdgpu_device *adev) { - cik_detect_hw_virtualization(adev); - switch (adev->asic_type) { case CHIP_BONAIRE: amdgpu_device_ip_block_add(adev, &cik_common_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 580d3f93d670..7c2ee84fa4a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -320,8 +320,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); } - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -679,7 +677,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -980,7 +979,8 @@ static int cik_sdma_sw_init(void *handle) &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 2512e7ebfedf..e38744d06f4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2303,9 +2303,9 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) struct amdgpu_device *adev = crtc->dev->dev_private; u32 tmp; - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static void dce_v10_0_show_cursor(struct drm_crtc *crtc) @@ -2319,10 +2319,10 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc) WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 0dde22db9848..2584ff74423b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2382,9 +2382,9 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) struct amdgpu_device *adev = crtc->dev->dev_private; u32 tmp; - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static void dce_v11_0_show_cursor(struct drm_crtc *crtc) @@ -2398,10 +2398,10 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc) WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 84219534bd38..d05c39f9ae40 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2194,9 +2194,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = crtc->dev->dev_private; - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } @@ -2211,10 +2211,10 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc) WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - CUR_CONTROL__CURSOR_EN_MASK | - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + CUR_CONTROL__CURSOR_EN_MASK | + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3a640702d7d1..ad0f8adb6a2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2205,9 +2205,9 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = crtc->dev->dev_private; - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } static void dce_v8_0_show_cursor(struct drm_crtc *crtc) @@ -2220,10 +2220,10 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc) WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - CUR_CONTROL__CURSOR_EN_MASK | - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + CUR_CONTROL__CURSOR_EN_MASK | + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 13e12be667fc..d5ff7b6331ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -172,8 +172,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + drm_crtc_vblank_off(crtc); + amdgpu_crtc->enabled = false; amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; amdgpu_crtc->encoder = NULL; amdgpu_crtc->connector = NULL; @@ -286,7 +287,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector) static const struct mode_size { int w; int h; - } common_modes[17] = { + } common_modes[21] = { { 640, 480}, { 720, 480}, { 800, 600}, @@ -303,10 +304,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector) {1680, 1050}, {1600, 1200}, {1920, 1080}, - {1920, 1200} + {1920, 1200}, + {4096, 3112}, + {3656, 2664}, + {3840, 2160}, + {4096, 2160}, }; - for (i = 0; i < 17; i++) { + for (i = 0; i < 21; i++) { mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); drm_mode_probed_add(connector, mode); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f92c158d89a1..e1687e408e1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -138,6 +138,1062 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x33), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) +}; + static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), @@ -272,11 +1328,1691 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1ac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1bc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1cc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x26), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x25), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x3b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) +}; + static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = { /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x22), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x35), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -431,6 +3167,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) soc15_program_register_sequence(adev, golden_settings_gc_10_0_nv10, (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10)); + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_0_nv10, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10)); break; case CHIP_NAVI14: soc15_program_register_sequence(adev, @@ -439,6 +3178,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) soc15_program_register_sequence(adev, golden_settings_gc_10_1_nv14, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_1_nv14, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14)); break; case CHIP_NAVI12: soc15_program_register_sequence(adev, @@ -447,6 +3189,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) soc15_program_register_sequence(adev, golden_settings_gc_10_1_2_nv12, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12)); + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_1_2_nv12, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12)); break; default: break; @@ -557,7 +3302,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) gpu_addr = adev->wb.gpu_addr + (index * 4); adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 16, &ib); + r = amdgpu_ib_get(adev, NULL, 16, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -1298,7 +4044,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; return 0; @@ -1309,7 +4056,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, { int r; unsigned irq_type; - struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; + struct amdgpu_ring *ring; + unsigned int hw_prio; ring = &adev->gfx.compute_ring[ring_id]; @@ -1328,10 +4076,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, hw_prio); if (r) return r; @@ -2441,10 +5190,6 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (!enable) { - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - } WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); for (i = 0; i < adev->usec_timeout; i++) { @@ -2923,16 +5668,12 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - if (enable) { WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); } else { WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; adev->gfx.kiq.ring.sched.ready = false; } udelay(50); @@ -3268,11 +6009,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; - ring->has_high_prio = true; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; - } else { - ring->has_high_prio = false; } } } @@ -4292,14 +7030,21 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { - u32 data; + u32 reg, data; - data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(reg); + else + data = RREG32(reg); data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; - WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); + else + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); } static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, @@ -4854,10 +7599,10 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ } -static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -4866,9 +7611,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); } static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 31f44d05e606..aa1e1be852dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -1950,7 +1951,6 @@ err1: static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; if (enable) { WREG32(mmCP_ME_CNTL, 0); } else { @@ -1958,10 +1958,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); WREG32(mmSCRATCH_UMSK, 0); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; } udelay(50); } @@ -3114,7 +3110,9 @@ static int gfx_v6_0_sw_init(void *handle) ring->ring_obj = NULL; sprintf(ring->name, "gfx"); r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -3136,7 +3134,8 @@ static int gfx_v6_0_sw_init(void *handle) sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 733d398c61cc..e5a88cad44cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -2431,15 +2432,12 @@ err1: */ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; - - if (enable) { + if (enable) WREG32(mmCP_ME_CNTL, 0); - } else { - WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - } + else + WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | + CP_ME_CNTL__PFP_HALT_MASK | + CP_ME_CNTL__CE_HALT_MASK)); udelay(50); } @@ -2700,15 +2698,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) */ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - - if (enable) { + if (enable) WREG32(mmCP_MEC_CNTL, 0); - } else { - WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; - } + else + WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); udelay(50); } @@ -4439,7 +4433,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -4511,7 +4506,9 @@ static int gfx_v7_0_sw_init(void *handle) ring->ring_obj = NULL; sprintf(ring->name, "gfx"); r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fc32586ef80b..2fcf6865abba 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) gpu_addr = adev->wb.gpu_addr + (index * 4); adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 16, &ib); + r = amdgpu_ib_get(adev, NULL, 16, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* allocate an indirect buffer to put the commands in */ memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, total_size, &ib); + r = amdgpu_ib_get(adev, NULL, total_size, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; @@ -1892,6 +1894,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int r; unsigned irq_type; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; + unsigned int hw_prio; ring = &adev->gfx.compute_ring[ring_id]; @@ -1911,9 +1914,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, hw_prio); if (r) return r; @@ -2017,7 +2022,8 @@ static int gfx_v8_0_sw_init(void *handle) } r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, - AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -4120,7 +4126,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; u32 tmp = RREG32(mmCP_ME_CNTL); if (enable) { @@ -4131,8 +4136,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; } WREG32(mmCP_ME_CNTL, tmp); udelay(50); @@ -4320,14 +4323,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - if (enable) { WREG32(mmCP_MEC_CNTL, 0); } else { WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; adev->gfx.kiq.ring.sched.ready = false; } udelay(50); @@ -4437,11 +4436,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; - ring->has_high_prio = true; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; - } else { - ring->has_high_prio = false; } } } @@ -5619,12 +5615,18 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; - data = RREG32(mmRLC_SPM_VMID); + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(mmRLC_SPM_VMID); + else + data = RREG32(mmRLC_SPM_VMID); data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; - WREG32(mmRLC_SPM_VMID, data); + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_NO_KIQ(mmRLC_SPM_VMID, data); + else + WREG32(mmRLC_SPM_VMID, data); } static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { @@ -6387,10 +6389,10 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne ring->ring[offset] = (ring->ring_size >> 2) - offset + cur; } -static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -6399,9 +6401,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); } static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0c390485bc10..496205a8ee0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -50,18 +50,14 @@ #include "gfx_v9_4.h" +#include "asic_reg/pwr/pwr_10_0_offset.h" +#include "asic_reg/pwr/pwr_10_0_sh_mask.h" + #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L -#define mmPWR_MISC_CNTL_STATUS 0x0183 -#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 -#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 -#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 -#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L -#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L - #define mmGCEA_PROBE_MAP 0x070c #define mmGCEA_PROBE_MAP_BASE_IDX 0 @@ -1082,7 +1078,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) gpu_addr = adev->wb.gpu_addr + (index * 4); adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 16, &ib); + r = amdgpu_ib_get(adev, NULL, 16, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -2197,6 +2194,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int r; unsigned irq_type; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; + unsigned int hw_prio; ring = &adev->gfx.compute_ring[ring_id]; @@ -2215,10 +2213,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, hw_prio); if (r) return r; @@ -2312,7 +2311,9 @@ static int gfx_v9_0_sw_init(void *handle) ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -3102,16 +3103,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (!enable) { - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - } WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); udelay(50); } @@ -3307,15 +3303,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - if (enable) { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0); } else { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; adev->gfx.kiq.ring.sched.ready = false; } udelay(50); @@ -3385,11 +3377,8 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; - ring->has_high_prio = true; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; - } else { - ring->has_high_prio = false; } } } @@ -4056,13 +4045,19 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) { signed long r, cnt = 0; unsigned long flags; - uint32_t seq; + uint32_t seq, reg_val_offs = 0; + uint64_t value = 0; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_rreg); spin_lock_irqsave(&kiq->ring_lock, flags); + if (amdgpu_device_wb_get(adev, ®_val_offs)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + pr_err("critical bug! too many kiq readers\n"); + goto failed_kiq_read; + } amdgpu_ring_alloc(ring, 32); amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 9 | /* src: register*/ @@ -4072,9 +4067,9 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_fence_emit_polling(ring, &seq); amdgpu_ring_commit(ring); spin_unlock_irqrestore(&kiq->ring_lock, flags); @@ -4101,8 +4096,11 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) if (cnt > MAX_KIQ_REG_TRY) goto failed_kiq_read; - return (uint64_t)adev->wb.wb[kiq->reg_val_offs] | - (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL; + mb(); + value = (uint64_t)adev->wb.wb[reg_val_offs] | + (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL; + amdgpu_device_wb_free(adev, reg_val_offs); + return value; failed_kiq_read: pr_err("failed to read gpu clock\n"); @@ -4489,7 +4487,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* allocate an indirect buffer to put the commands in */ memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, total_size, &ib); + r = amdgpu_ib_get(adev, NULL, total_size, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; @@ -4960,14 +4959,21 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { - u32 data; + u32 reg, data; - data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(reg); + else + data = RREG32(reg); data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; - WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); + else + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); } static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, @@ -5493,10 +5499,10 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne ring->ring[offset] = (ring->ring_size>>2) - offset + cur; } -static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -5505,9 +5511,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - kiq->reg_val_offs * 4)); + reg_val_offs * 4)); } static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, @@ -6408,15 +6414,15 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - vml2_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, vml2_mems[i], sec_count); err_data->ce_count += sec_count; } ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, - vml2_mems[i], ded_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "DED %d\n", i, vml2_mems[i], ded_count); err_data->ue_count += ded_count; } } @@ -6428,16 +6434,16 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - vml2_walker_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, vml2_walker_mems[i], sec_count); err_data->ce_count += sec_count; } ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, - vml2_walker_mems[i], ded_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "DED %d\n", i, vml2_walker_mems[i], ded_count); err_data->ue_count += ded_count; } } @@ -6448,8 +6454,9 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, sec_count = (data & 0x00006000L) >> 0xd; if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - atc_l2_cache_2m_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, atc_l2_cache_2m_mems[i], + sec_count); err_data->ce_count += sec_count; } } @@ -6460,15 +6467,17 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, sec_count = (data & 0x00006000L) >> 0xd; if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - atc_l2_cache_4k_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, atc_l2_cache_4k_mems[i], + sec_count); err_data->ce_count += sec_count; } ded_count = (data & 0x00018000L) >> 0xf; if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, - atc_l2_cache_4k_mems[i], ded_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "DED %d\n", i, atc_l2_cache_4k_mems[i], + ded_count); err_data->ue_count += ded_count; } } @@ -6481,7 +6490,8 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, return 0; } -static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, +static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, uint32_t se_id, uint32_t inst_id, uint32_t value, uint32_t *sec_count, uint32_t *ded_count) { @@ -6498,7 +6508,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, gfx_v9_0_ras_fields[i].sec_count_mask) >> gfx_v9_0_ras_fields[i].sec_count_shift; if (sec_cnt) { - DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + dev_info(adev->dev, "GFX SubBlock %s, " + "Instance[%d][%d], SEC %d\n", gfx_v9_0_ras_fields[i].name, se_id, inst_id, sec_cnt); @@ -6509,7 +6520,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, gfx_v9_0_ras_fields[i].ded_count_mask) >> gfx_v9_0_ras_fields[i].ded_count_shift; if (ded_cnt) { - DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + dev_info(adev->dev, "GFX SubBlock %s, " + "Instance[%d][%d], DED %d\n", gfx_v9_0_ras_fields[i].name, se_id, inst_id, ded_cnt); @@ -6598,9 +6610,10 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) - gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i], - j, k, reg_value, - &sec_count, &ded_count); + gfx_v9_0_ras_error_count(adev, + &gfx_v9_0_edc_counter_regs[i], + j, k, reg_value, + &sec_count, &ded_count); } } } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index dce945ef21a5..46351db36922 100644..100755 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -732,7 +732,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, vml2_walker_mems[i], sec_count); err_data->ce_count += sec_count; } @@ -740,7 +741,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, vml2_walker_mems[i], ded_count); err_data->ue_count += ded_count; } @@ -752,14 +754,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, utcl2_router_mems[i], sec_count); err_data->ce_count += sec_count; } ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, utcl2_router_mems[i], ded_count); err_data->ue_count += ded_count; } @@ -772,7 +776,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, atc_l2_cache_2m_mems[i], sec_count); err_data->ce_count += sec_count; } @@ -780,7 +785,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, atc_l2_cache_2m_mems[i], ded_count); err_data->ue_count += ded_count; } @@ -793,7 +799,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, atc_l2_cache_4k_mems[i], sec_count); err_data->ce_count += sec_count; } @@ -801,7 +808,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, atc_l2_cache_4k_mems[i], ded_count); err_data->ue_count += ded_count; } @@ -816,7 +824,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg, +static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, uint32_t se_id, uint32_t inst_id, uint32_t value, uint32_t *sec_count, uint32_t *ded_count) @@ -833,7 +842,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg, sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >> gfx_v9_4_ras_fields[i].sec_count_shift; if (sec_cnt) { - DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + dev_info(adev->dev, + "GFX SubBlock %s, Instance[%d][%d], SEC %d\n", gfx_v9_4_ras_fields[i].name, se_id, inst_id, sec_cnt); *sec_count += sec_cnt; @@ -842,7 +852,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg, ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >> gfx_v9_4_ras_fields[i].ded_count_shift; if (ded_cnt) { - DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + dev_info(adev->dev, + "GFX SubBlock %s, Instance[%d][%d], DED %d\n", gfx_v9_4_ras_fields[i].name, se_id, inst_id, ded_cnt); *ded_count += ded_cnt; @@ -876,7 +887,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( gfx_v9_4_edc_counter_regs[i])); if (reg_value) - gfx_v9_4_ras_error_count( + gfx_v9_4_ras_error_count(adev, &gfx_v9_4_edc_counter_regs[i], j, k, reg_value, &sec_count, &ded_count); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 9775eca6fe43..94a6096a81f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -170,6 +170,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); + dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, CID)); dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", REG_GET_FIELD(status, GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); @@ -369,7 +372,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * translation. Avoid this by doing the invalidation from the SDMA * itself. */ - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_VM, &job); if (r) goto error_alloc; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index b205039350b6..c1a530dbe162 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -858,7 +858,7 @@ static int gmc_v6_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { - dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "No suitable DMA available.\n"); return r; } adev->need_swiotlb = drm_need_swiotlb(44); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 9da9596a3638..e8529e244a2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1019,7 +1019,7 @@ static int gmc_v7_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); if (r) { - pr_warn("amdgpu: No suitable DMA available\n"); + pr_warn("No suitable DMA available\n"); return r; } adev->need_swiotlb = drm_need_swiotlb(40); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 27d83204fa2b..0aa5b82808d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1144,7 +1144,7 @@ static int gmc_v8_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); if (r) { - pr_warn("amdgpu: No suitable DMA available\n"); + pr_warn("No suitable DMA available\n"); return r; } adev->need_swiotlb = drm_need_swiotlb(40); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 8606f877478f..fecdbc471983 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -362,6 +362,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); + dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, CID)); dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 0debfd9f428c..b10c95cad9a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -480,7 +480,8 @@ int jpeg_v1_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; sprintf(ring->name, "jpeg_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, + 0, AMDGPU_RING_PRIO_DEFAULT); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 6173951db7b4..e67d09cb1b03 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -106,7 +106,8 @@ static int jpeg_v2_0_sw_init(void *handle) ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; sprintf(ring->name, "jpeg_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, + 0, AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -169,14 +170,11 @@ static int jpeg_v2_0_hw_init(void *handle) static int jpeg_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); - ring->sched.ready = false; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index c04c2078a7c1..37df3f2e587a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -118,7 +118,8 @@ static int jpeg_v2_5_sw_init(void *handle) ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; sprintf(ring->name, "jpeg_dec_%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, + 0, AMDGPU_RING_PRIO_DEFAULT); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 396c2a624de0..bd2d2b8d1f42 100644..100755 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -690,7 +690,8 @@ static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = { { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0}, }; -static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg, +static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, uint32_t value, uint32_t *sec_count, uint32_t *ded_count) { uint32_t i; @@ -704,7 +705,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg, mmhub_v1_0_ras_fields[i].sec_count_mask) >> mmhub_v1_0_ras_fields[i].sec_count_shift; if (sec_cnt) { - DRM_INFO("MMHUB SubBlock %s, SEC %d\n", + dev_info(adev->dev, + "MMHUB SubBlock %s, SEC %d\n", mmhub_v1_0_ras_fields[i].name, sec_cnt); *sec_count += sec_cnt; @@ -714,7 +716,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg, mmhub_v1_0_ras_fields[i].ded_count_mask) >> mmhub_v1_0_ras_fields[i].ded_count_shift; if (ded_cnt) { - DRM_INFO("MMHUB SubBlock %s, DED %d\n", + dev_info(adev->dev, + "MMHUB SubBlock %s, DED %d\n", mmhub_v1_0_ras_fields[i].name, ded_cnt); *ded_count += ded_cnt; @@ -739,7 +742,8 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); if (reg_value) - mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i], + mmhub_v1_0_get_ras_error_count(adev, + &mmhub_v1_0_edc_cnt_regs[i], reg_value, &sec_count, &ded_count); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 37dbe0f2142f..52a697545801 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -46,7 +46,8 @@ enum idh_event { IDH_SUCCESS, IDH_FAIL, IDH_QUERY_ALIVE, - IDH_EVENT_MAX + + IDH_TEXT_MESSAGE = 255, }; extern const struct amdgpu_virt_ops xgpu_ai_virt_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 237fa5e16b7c..ce2bf1fb79ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -30,7 +30,6 @@ #include "navi10_ih.h" #include "soc15_common.h" #include "mxgpu_nv.h" -#include "mxgpu_ai.h" static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) { @@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) */ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) { - return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); + return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); } @@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, { u32 reg; - reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); + reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); if (reg != event) return -ENOENT; @@ -110,7 +107,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) timeout -= 10; } while (timeout > 1); - pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); return -ETIME; } @@ -118,7 +114,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3) { - u32 reg; int r; uint8_t trn; @@ -137,19 +132,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, } } while (trn); - reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0)); - reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0, - MSGBUF_DATA, req); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0), - reg); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1), - data1); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2), - data2); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3), - data3); - + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req); + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1); + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2); + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3); xgpu_nv_mailbox_set_valid(adev, true); /* start to poll ack */ @@ -164,23 +150,48 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { int r; + enum idh_event event = -1; xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); - /* start to check msg if request is idh_req_gpu_init_access */ - if (req == IDH_REQ_GPU_INIT_ACCESS || - req == IDH_REQ_GPU_FINI_ACCESS || - req == IDH_REQ_GPU_RESET_ACCESS) { - r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + switch (req) { + case IDH_REQ_GPU_INIT_ACCESS: + case IDH_REQ_GPU_FINI_ACCESS: + case IDH_REQ_GPU_RESET_ACCESS: + event = IDH_READY_TO_ACCESS_GPU; + break; + case IDH_REQ_GPU_INIT_DATA: + event = IDH_REQ_GPU_INIT_DATA_READY; + break; + default: + break; + } + + if (event != -1) { + r = xgpu_nv_poll_msg(adev, event); if (r) { - pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); - return r; + if (req != IDH_REQ_GPU_INIT_DATA) { + pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + return r; + } + else /* host doesn't support REQ_GPU_INIT_DATA handshake */ + adev->virt.req_init_data_ver = 0; + } else { + if (req == IDH_REQ_GPU_INIT_DATA) + { + adev->virt.req_init_data_ver = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1); + + /* assume V1 in case host doesn't set version number */ + if (adev->virt.req_init_data_ver < 1) + adev->virt.req_init_data_ver = 1; + } } + /* Retrieve checksum from mailbox2 */ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { adev->virt.fw_reserve.checksum_key = - RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2)); + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); } } @@ -213,6 +224,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, return r; } +static int xgpu_nv_request_init_data(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); +} + static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -226,11 +242,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); + + if (state == AMDGPU_IRQ_STATE_ENABLE) + tmp |= 2; + else + tmp &= ~2; - tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN, - (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); return 0; } @@ -282,11 +301,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); + + if (state == AMDGPU_IRQ_STATE_ENABLE) + tmp |= 1; + else + tmp &= ~1; - tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN, - (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); return 0; } @@ -378,6 +400,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, + .req_init_data = xgpu_nv_request_init_data, .reset_gpu = xgpu_nv_request_reset, .wait_reset = NULL, .trans_msg = xgpu_nv_mailbox_trans_msg, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 99b15f6865cb..45bcf438e607 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -25,9 +25,33 @@ #define __MXGPU_NV_H__ #define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 -#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000 +#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000 #define NV_MAILBOX_POLL_FLR_TIMEDOUT 500 +enum idh_request { + IDH_REQ_GPU_INIT_ACCESS = 1, + IDH_REL_GPU_INIT_ACCESS, + IDH_REQ_GPU_FINI_ACCESS, + IDH_REL_GPU_FINI_ACCESS, + IDH_REQ_GPU_RESET_ACCESS, + IDH_REQ_GPU_INIT_DATA, + + IDH_LOG_VF_ERROR = 200, +}; + +enum idh_event { + IDH_CLR_MSG_BUF = 0, + IDH_READY_TO_ACCESS_GPU, + IDH_FLR_NOTIFICATION, + IDH_FLR_NOTIFICATION_CMPL, + IDH_SUCCESS, + IDH_FAIL, + IDH_QUERY_ALIVE, + IDH_REQ_GPU_INIT_DATA_READY, + + IDH_TEXT_MESSAGE = 255, +}; + extern const struct amdgpu_virt_ops xgpu_nv_virt_ops; void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev); @@ -35,7 +59,21 @@ int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev); int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev); void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev); -#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4) -#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1) +#define mmMAILBOX_CONTROL 0xE5E + +#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (mmMAILBOX_CONTROL * 4) +#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE + 1) + +#define mmMAILBOX_MSGBUF_TRN_DW0 0xE56 +#define mmMAILBOX_MSGBUF_TRN_DW1 0xE57 +#define mmMAILBOX_MSGBUF_TRN_DW2 0xE58 +#define mmMAILBOX_MSGBUF_TRN_DW3 0xE59 + +#define mmMAILBOX_MSGBUF_RCV_DW0 0xE5A +#define mmMAILBOX_MSGBUF_RCV_DW1 0xE5B +#define mmMAILBOX_MSGBUF_RCV_DW2 0xE5C +#define mmMAILBOX_MSGBUF_RCV_DW3 0xE5D + +#define mmMAILBOX_INT_CNTL 0xE5F #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h index f13dc6cc158f..713ee66a4d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h @@ -43,7 +43,8 @@ enum idh_event { IDH_READY_TO_ACCESS_GPU, IDH_FLR_NOTIFICATION, IDH_FLR_NOTIFICATION_CMPL, - IDH_EVENT_MAX + + IDH_TEXT_MESSAGE = 255 }; extern const struct amdgpu_virt_ops xgpu_vi_virt_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index e08245a446fc..f97857ed3c7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -49,8 +49,48 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { + DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + return; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + } + adev->irq.ih.enabled = true; + + if (adev->irq.ih1.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, + RB_ENABLE, 1); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, + ih_rb_cntl)) { + DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); + return; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + } + adev->irq.ih1.enabled = true; + } + + if (adev->irq.ih2.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, + RB_ENABLE, 1); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, + ih_rb_cntl)) { + DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); + return; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + } + adev->irq.ih2.enabled = true; + } } /** @@ -66,12 +106,61 @@ static void navi10_ih_disable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { + DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + return; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + } + /* set rptr, wptr to 0 */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); adev->irq.ih.enabled = false; adev->irq.ih.rptr = 0; + + if (adev->irq.ih1.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, + RB_ENABLE, 0); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, + ih_rb_cntl)) { + DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); + return; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + } + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); + adev->irq.ih1.enabled = false; + adev->irq.ih1.rptr = 0; + } + + if (adev->irq.ih2.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, + RB_ENABLE, 0); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, + ih_rb_cntl)) { + DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); + return; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + } + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); + adev->irq.ih2.enabled = false; + adev->irq.ih2.rptr = 0; + } + } static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) @@ -97,6 +186,43 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl return ih_rb_cntl; } +static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) +{ + u32 ih_doorbell_rtpr = 0; + + if (ih->use_doorbell) { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, OFFSET, + ih->doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 1); + } else { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 0); + } + return ih_doorbell_rtpr; +} + +static void navi10_ih_reroute_ih(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Reroute to IH ring 1 for VMC */ + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12); + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); + + /* Reroute IH ring 1 for UMC */ + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B); + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); +} + /** * navi10_ih_irq_init - init and enable the interrupt ring * @@ -111,7 +237,7 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl static int navi10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih = &adev->irq.ih; - u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken; + u32 ih_rb_cntl, ih_chicken; u32 tmp; /* disable irqs */ @@ -127,6 +253,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { + DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + } + navi10_ih_reroute_ih(adev); if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) { if (ih->use_bus_addr) { @@ -137,8 +272,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) } } - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - /* set the writeback address whether it's enabled or not */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); @@ -149,22 +282,68 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR); - if (ih->use_doorbell) { - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, - IH_DOORBELL_RPTR, OFFSET, - ih->doorbell_index); - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, - IH_DOORBELL_RPTR, ENABLE, 1); - } else { - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, - IH_DOORBELL_RPTR, ENABLE, 0); - } - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); + WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, + navi10_ih_doorbell_rptr(ih)); adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell, ih->doorbell_index); + ih = &adev->irq.ih1; + if (ih->ring_size) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1, + (ih->gpu_addr >> 40) & 0xff); + + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_ENABLE, 0); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + RB_FULL_DRAIN_ENABLE, 1); + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, + ih_rb_cntl)) { + DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + } + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); + + WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1, + navi10_ih_doorbell_rptr(ih)); + } + + ih = &adev->irq.ih2; + if (ih->ring_size) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2, + (ih->gpu_addr >> 40) & 0xff); + + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl); + + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, + ih_rb_cntl)) { + DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + } + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); + + WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2, + navi10_ih_doorbell_rptr(ih)); + } + + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, CLIENT18_IS_STORM_CLIENT, 1); @@ -217,7 +396,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + if (ih == &adev->irq.ih) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + else if (ih == &adev->irq.ih1) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + else if (ih == &adev->irq.ih2) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); + else + BUG(); + wptr = RREG32_NO_KIQ(reg); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -233,7 +420,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, wptr, ih->rptr, tmp); ih->rptr = tmp; - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + if (ih == &adev->irq.ih) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + else if (ih == &adev->irq.ih1) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + else if (ih == &adev->irq.ih2) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + else + BUG(); + tmp = RREG32_NO_KIQ(reg); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(reg, tmp); @@ -333,8 +528,52 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) navi10_ih_irq_rearm(adev, ih); - } else + } else if (ih == &adev->irq.ih) { WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); + } else if (ih == &adev->irq.ih1) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr); + } else if (ih == &adev->irq.ih2) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr); + } +} + +/** + * navi10_ih_self_irq - dispatch work for ring 1 and 2 + * + * @adev: amdgpu_device pointer + * @source: irq source + * @entry: IV with WPTR update + * + * Update the WPTR from the IV and schedule work to handle the entries. + */ +static int navi10_ih_self_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t wptr = cpu_to_le32(entry->src_data[0]); + + switch (entry->ring_id) { + case 1: + *adev->irq.ih1.wptr_cpu = wptr; + schedule_work(&adev->irq.ih1_work); + break; + case 2: + *adev->irq.ih2.wptr_cpu = wptr; + schedule_work(&adev->irq.ih2_work); + break; + default: break; + } + return 0; +} + +static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = { + .process = navi10_ih_self_irq, +}; + +static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev) +{ + adev->irq.self_irq.num_types = 0; + adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs; } static int navi10_ih_early_init(void *handle) @@ -342,6 +581,7 @@ static int navi10_ih_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; navi10_ih_set_interrupt_funcs(adev); + navi10_ih_set_self_irq_funcs(adev); return 0; } @@ -351,6 +591,12 @@ static int navi10_ih_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool use_bus_addr; + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, + &adev->irq.self_irq); + + if (r) + return r; + /* use gpu virtual address for ih ring * until ih_checken is programmed to allow * use bus address for ih ring by psp bl */ @@ -363,6 +609,20 @@ static int navi10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; + + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + r = amdgpu_irq_init(adev); return r; @@ -373,6 +633,8 @@ static int navi10_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih2); + amdgpu_ih_ring_fini(adev, &adev->irq.ih1); amdgpu_ih_ring_fini(adev, &adev->irq.ih); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index f3a3fe746222..cbcf04578b99 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; -static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static void nbio_v2_3_init_registers(struct amdgpu_device *adev) { uint32_t def, data; @@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .get_clockgating_state = nbio_v2_3_get_clockgating_state, .ih_control = nbio_v2_3_ih_control, .init_registers = nbio_v2_3_init_registers, - .detect_hw_virt = nbio_v2_3_detect_hw_virt, .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 635d9e1fc0a3..7b2fb050407d 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK }; -static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static void nbio_v6_1_init_registers(struct amdgpu_device *adev) { uint32_t def, data; @@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { .get_clockgating_state = nbio_v6_1_get_clockgating_state, .ih_control = nbio_v6_1_ih_control, .init_registers = nbio_v6_1_init_registers, - .detect_hw_virt = nbio_v6_1_detect_hw_virt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index d6cbf26074bc..d34628e113fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; -static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { @@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { .get_clockgating_state = nbio_v7_0_get_clockgating_state, .ih_control = nbio_v7_0_ih_control, .init_registers = nbio_v7_0_init_registers, - .detect_hw_virt = nbio_v7_0_detect_hw_virt, .remap_hdp_registers = nbio_v7_0_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 149d386590df..e629156173d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -185,7 +185,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, if (use_doorbell) { ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); - ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2); + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4); } else ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0); @@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, }; -static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { @@ -340,14 +323,20 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device obj->err_data.ce_count += err_data.ce_count; if (err_data.ce_count) - DRM_INFO("%ld correctable errors detected in %s block\n", - obj->err_data.ce_count, adev->nbio.ras_if->name); + dev_info(adev->dev, "%ld correctable hardware " + "errors detected in %s block, " + "no user action is needed.\n", + obj->err_data.ce_count, + adev->nbio.ras_if->name); if (err_data.ue_count) - DRM_INFO("%ld uncorrectable errors detected in %s block\n", - obj->err_data.ue_count, adev->nbio.ras_if->name); + dev_info(adev->dev, "%ld uncorrectable hardware " + "errors detected in %s block\n", + obj->err_data.ue_count, + adev->nbio.ras_if->name); - DRM_WARN("RAS controller interrupt triggered by NBIF error\n"); + dev_info(adev->dev, "RAS controller interrupt triggered " + "by NBIF error\n"); /* ras_controller_int is dedicated for nbif ras error, * not the global interrupt for sync flood @@ -561,7 +550,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .get_clockgating_state = nbio_v7_4_get_clockgating_state, .ih_control = nbio_v7_4_ih_control, .init_registers = nbio_v7_4_init_registers, - .detect_hw_virt = nbio_v7_4_detect_hw_virt, .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 52318b03c424..995bdec9fa7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -453,18 +453,19 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) { int r; - /* Set IP register base before any HW register access */ - r = nv_reg_base_init(adev); - if (r) - return r; - adev->nbio.funcs = &nbio_v2_3_funcs; adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - adev->nbio.funcs->detect_hw_virt(adev); - - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { adev->virt.ops = &xgpu_nv_virt_ops; + /* try send GPU_INIT_DATA request to host */ + amdgpu_virt_request_init_data(adev); + } + + /* Set IP register base before any HW register access */ + r = nv_reg_base_init(adev); + if (r) + return r; switch (adev->asic_type) { case CHIP_NAVI10: diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 7539104175e8..90727cfb4447 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -50,7 +50,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) const char *chip_name; char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *hdr; const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); @@ -66,22 +65,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + err = psp_init_asd_microcode(psp, chip_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->psp.asd_fw); - if (err) - goto out; - - hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)hdr + - le32_to_cpu(hdr->header.ucode_array_offset_bytes); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) { @@ -126,8 +113,6 @@ out: dev_err(adev->dev, "psp v10.0: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; } return err; @@ -230,129 +215,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp, return ret; } -static int -psp_v10_0_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) -{ - int ret = 0; - - switch(ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static bool psp_v10_0_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) -{ - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; - struct amdgpu_device *adev = psp->adev; - - err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (!ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - - static int psp_v10_0_mode1_reset(struct psp_context *psp) { DRM_INFO("psp mode 1 reset not supported now! \n"); @@ -379,7 +241,6 @@ static const struct psp_funcs psp_v10_0_funcs = { .ring_create = psp_v10_0_ring_create, .ring_stop = psp_v10_0_ring_stop, .ring_destroy = psp_v10_0_ring_destroy, - .compare_sram_data = psp_v10_0_compare_sram_data, .mode1_reset = psp_v10_0_mode1_reset, .ring_get_wptr = psp_v10_0_ring_get_wptr, .ring_set_wptr = psp_v10_0_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0afd610a1263..d2d2363787ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -75,10 +75,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) const char *chip_name; char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *sos_hdr; - const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; - const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; - const struct psp_firmware_header_v1_0 *asd_hdr; const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); @@ -103,66 +99,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); - err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); + err = psp_init_sos_microcode(psp, chip_name); if (err) - goto out; + return err; - err = amdgpu_ucode_validate(adev->psp.sos_fw); + err = psp_init_asd_microcode(psp, chip_name); if (err) - goto out; - - sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; - amdgpu_ucode_print_psp_hdr(&sos_hdr->header); - - switch (sos_hdr->header.header_version_major) { - case 1: - adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); - adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); - adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); - adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); - adev->psp.sys_start_addr = (uint8_t *)sos_hdr + - le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); - adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr->sos_offset_bytes); - if (sos_hdr->header.header_version_minor == 1) { - sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; - adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); - adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); - adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); - } - if (sos_hdr->header.header_version_minor == 2) { - sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); - adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); - } - break; - default: - dev_err(adev->dev, - "Unsupported psp sos firmware\n"); - err = -EINVAL; - goto out; - } - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); - if (err) - goto out1; - - err = amdgpu_ucode_validate(adev->psp.asd_fw); - if (err) - goto out1; - - asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)asd_hdr + - le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); + return err; switch (adev->asic_type) { case CHIP_VEGA20: @@ -229,15 +172,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) out2: release_firmware(adev->psp.ta_fw); adev->psp.ta_fw = NULL; -out1: - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; -out: - dev_err(adev->dev, - "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - return err; } @@ -283,11 +217,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) /* Check tOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - if (psp_v11_0_is_sos_alive(psp)) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); + if (psp_v11_0_is_sos_alive(psp)) return 0; - } ret = psp_v11_0_wait_for_bootloader(psp); if (ret) @@ -319,11 +250,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - if (psp_v11_0_is_sos_alive(psp)) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); + if (psp_v11_0_is_sos_alive(psp)) return 0; - } ret = psp_v11_0_wait_for_bootloader(psp); if (ret) @@ -446,13 +374,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp, return 0; } -static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) -{ - if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) - return true; - return false; -} - static int psp_v11_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -460,7 +381,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; /* Write the ring destroy command*/ - if (psp_v11_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); else @@ -471,7 +392,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) */ - if (psp_v11_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 0x80000000, 0x80000000, false); else @@ -489,7 +410,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; - if (psp_v11_0_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(adev)) { ret = psp_v11_0_ring_stop(psp, ring_type); if (ret) { DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); @@ -567,138 +488,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp, return ret; } -static int -psp_v11_0_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) -{ - int ret = 0; - - switch (ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - if (adev->asic_type < CHIP_NAVI10) { - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - } else { - *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10; - *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10; - } - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - if (adev->asic_type < CHIP_NAVI10) { - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - } else { - *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10; - *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10; - } - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static bool psp_v11_0_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) -{ - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; - struct amdgpu_device *adev = psp->adev; - - err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - static int psp_v11_0_mode1_reset(struct psp_context *psp) { int ret; @@ -1099,7 +888,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) uint32_t data; struct amdgpu_device *adev = psp->adev; - if (psp_v11_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); else data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); @@ -1111,7 +900,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) { struct amdgpu_device *adev = psp->adev; - if (psp_v11_0_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(adev)) { WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); } else @@ -1203,13 +992,11 @@ static const struct psp_funcs psp_v11_0_funcs = { .ring_create = psp_v11_0_ring_create, .ring_stop = psp_v11_0_ring_stop, .ring_destroy = psp_v11_0_ring_destroy, - .compare_sram_data = psp_v11_0_compare_sram_data, .mode1_reset = psp_v11_0_mode1_reset, .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, - .support_vmr_ring = psp_v11_0_support_vmr_ring, .ras_trigger_error = psp_v11_0_ras_trigger_error, .ras_cure_posion = psp_v11_0_ras_cure_posion, .rlc_autoload_start = psp_v11_0_rlc_autoload_start, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 58d8b6d732e8..6c9614f77d33 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -45,11 +45,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; const char *chip_name; - char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *asd_hdr; - - DRM_DEBUG("\n"); switch (adev->asic_type) { case CHIP_RENOIR: @@ -59,28 +55,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); - if (err) - goto out1; - - err = amdgpu_ucode_validate(adev->psp.asd_fw); - if (err) - goto out1; - - asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)asd_hdr + - le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); - - return 0; - -out1: - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; - + err = psp_init_asd_microcode(psp, chip_name); return err; } @@ -95,11 +70,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) * are already been loaded. */ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - printk("sos fw version = 0x%x.\n", psp->sos_fw_version); + if (sol_reg) return 0; - } /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), @@ -228,13 +200,6 @@ static int psp_v12_0_ring_init(struct psp_context *psp, return 0; } -static bool psp_v12_0_support_vmr_ring(struct psp_context *psp) -{ - if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) - return true; - return false; -} - static int psp_v12_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -243,7 +208,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp, struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; - if (psp_v12_0_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(psp->adev)) { /* Write low address of the ring to C2PMSG_102 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); @@ -295,7 +260,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; /* Write the ring destroy command*/ - if (psp_v12_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); else @@ -306,7 +271,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) */ - if (psp_v12_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 0x80000000, 0x80000000, false); else @@ -334,128 +299,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp, return ret; } -static int -psp_v12_0_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) -{ - int ret = 0; - - switch (ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static bool psp_v12_0_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) -{ - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; - struct amdgpu_device *adev = psp->adev; - - err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - static int psp_v12_0_mode1_reset(struct psp_context *psp) { int ret; @@ -495,7 +338,7 @@ static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp) uint32_t data; struct amdgpu_device *adev = psp->adev; - if (psp_v12_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); else data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); @@ -507,7 +350,7 @@ static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value) { struct amdgpu_device *adev = psp->adev; - if (psp_v12_0_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(adev)) { WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); } else @@ -522,7 +365,6 @@ static const struct psp_funcs psp_v12_0_funcs = { .ring_create = psp_v12_0_ring_create, .ring_stop = psp_v12_0_ring_stop, .ring_destroy = psp_v12_0_ring_destroy, - .compare_sram_data = psp_v12_0_compare_sram_data, .mode1_reset = psp_v12_0_mode1_reset, .ring_get_wptr = psp_v12_0_ring_get_wptr, .ring_set_wptr = psp_v12_0_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 735c43c7daab..f2e725f72d2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -50,9 +50,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); #define smnMP1_FIRMWARE_FLAGS 0x3010028 -static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554}; - -static bool psp_v3_1_support_vmr_ring(struct psp_context *psp); static int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type); @@ -60,9 +57,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; const char *chip_name; - char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -76,55 +71,15 @@ static int psp_v3_1_init_microcode(struct psp_context *psp) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); - err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->psp.sos_fw); + err = psp_init_sos_microcode(psp, chip_name); if (err) - goto out; - - hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; - adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version); - adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes); - adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) - - le32_to_cpu(hdr->sos_size_bytes); - adev->psp.sys_start_addr = (uint8_t *)hdr + - le32_to_cpu(hdr->header.ucode_array_offset_bytes); - adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(hdr->sos_offset_bytes); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); - if (err) - goto out; + return err; - err = amdgpu_ucode_validate(adev->psp.asd_fw); + err = psp_init_asd_microcode(psp, chip_name); if (err) - goto out; - - hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)hdr + - le32_to_cpu(hdr->header.ucode_array_offset_bytes); + return err; return 0; -out: - if (err) { - dev_err(adev->dev, - "psp v3.1: Failed to load firmware \"%s\"\n", - fw_name); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; - } - - return err; } static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) @@ -168,41 +123,19 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) return ret; } -static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver) -{ - int i; - - if (ver == adev->psp.sos_fw_version) - return true; - - /* - * Double check if the latest four legacy versions. - * If yes, it is still the right version. - */ - for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) { - if (sos_old_versions[i] == adev->psp.sos_fw_version) - return true; - } - - return false; -} - static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) { int ret; unsigned int psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg, ver; + uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - printk("sos fw version = 0x%x.\n", psp->sos_fw_version); + if (sol_reg) return 0; - } /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), @@ -227,11 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, true); - - ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - if (!psp_v3_1_match_version(adev, ver)) - DRM_WARN("SOS version doesn't match\n"); - return ret; } @@ -302,7 +230,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp, psp_v3_1_reroute_ih(psp); - if (psp_v3_1_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(adev)) { ret = psp_v3_1_ring_stop(psp, ring_type); if (ret) { DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n"); @@ -360,34 +288,26 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; - unsigned int psp_ring_reg = 0; struct amdgpu_device *adev = psp->adev; - if (psp_v3_1_support_vmr_ring(psp)) { - /* Write the Destroy GPCOM ring command to C2PMSG_101 */ - psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg); - - /* there might be handshake issue which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); - } else { - /* Write the ring destroy command to C2PMSG_64 */ - psp_ring_reg = 3 << 16; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + /* Write the ring destroy command*/ + if (amdgpu_sriov_vf(adev)) + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); - /* there might be handshake issue which needs delay */ - mdelay(20); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); - /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); - } + /* Wait for response flag (bit 31) */ + if (amdgpu_sriov_vf(adev)) + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); return ret; } @@ -410,128 +330,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp, return ret; } -static int -psp_v3_1_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) -{ - int ret = 0; - - switch(ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static bool psp_v3_1_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) -{ - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; - struct amdgpu_device *adev = psp->adev; - - err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -575,20 +373,12 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) return 0; } -static bool psp_v3_1_support_vmr_ring(struct psp_context *psp) -{ - if (amdgpu_sriov_vf(psp->adev)) - return true; - - return false; -} - static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp) { uint32_t data; struct amdgpu_device *adev = psp->adev; - if (psp_v3_1_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); else data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); @@ -599,7 +389,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value) { struct amdgpu_device *adev = psp->adev; - if (psp_v3_1_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(adev)) { WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); /* send interrupt to PSP for SRIOV ring write pointer update */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, @@ -616,10 +406,8 @@ static const struct psp_funcs psp_v3_1_funcs = { .ring_create = psp_v3_1_ring_create, .ring_stop = psp_v3_1_ring_stop, .ring_destroy = psp_v3_1_ring_destroy, - .compare_sram_data = psp_v3_1_compare_sram_data, .smu_reload_quirk = psp_v3_1_smu_reload_quirk, .mode1_reset = psp_v3_1_mode1_reset, - .support_vmr_ring = psp_v3_1_support_vmr_ring, .ring_get_wptr = psp_v3_1_ring_get_wptr, .ring_set_wptr = psp_v3_1_ring_set_wptr, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 7d509a40076f..9a7f194c730a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -355,8 +355,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -614,7 +612,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -874,7 +873,8 @@ static int sdma_v2_4_sw_init(void *handle) &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index b6109a99fc43..e6b5b4f672a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -529,8 +529,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -886,7 +884,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -1158,7 +1157,8 @@ static int sdma_v3_0_sw_init(void *handle) &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 5f3a5ee2a3f4..3e99f31b4bd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -115,17 +115,21 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma_4_1[] = { @@ -174,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { @@ -203,6 +208,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma_rv1[] = @@ -222,27 +228,35 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), - SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002) + SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001) }; static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { @@ -923,8 +937,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); - - sdma[i]->sched.ready = false; } } @@ -971,8 +983,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); - - sdma[i]->sched.ready = false; } } @@ -1539,7 +1549,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -1840,7 +1851,7 @@ static int sdma_v4_0_sw_init(void *handle) ring->ring_obj = NULL; ring->use_doorbell = true; - DRM_INFO("use_doorbell being set to: [%s]\n", + DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, ring->use_doorbell?"true":"false"); /* doorbell size is 2 dwords, get DWORD offset */ @@ -1848,7 +1859,8 @@ static int sdma_v4_0_sw_init(void *handle) sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -1866,7 +1878,8 @@ static int sdma_v4_0_sw_init(void *handle) sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index ebfd2cdf4e65..e206064ffb82 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00) }; +static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), +}; + static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), @@ -141,9 +164,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); break; case CHIP_NAVI12: - soc15_program_register_sequence(adev, - golden_settings_sdma_5, - (const u32)ARRAY_SIZE(golden_settings_sdma_5)); + if (amdgpu_sriov_vf(adev)) + soc15_program_register_sequence(adev, + golden_settings_sdma_5_sriov, + (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov)); + else + soc15_program_register_sequence(adev, + golden_settings_sdma_5, + (const u32)ARRAY_SIZE(golden_settings_sdma_5)); soc15_program_register_sequence(adev, golden_settings_sdma_nv12, (const u32)ARRAY_SIZE(golden_settings_sdma_nv12)); @@ -502,9 +530,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); } - - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -529,7 +554,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl, phase_quantum = 0; + u32 f32_cntl = 0, phase_quantum = 0; int i; if (amdgpu_sdma_phase_quantum) { @@ -557,9 +582,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) } for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (!amdgpu_sriov_vf(adev)) { + f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + } + if (enable && amdgpu_sdma_phase_quantum) { WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), phase_quantum); @@ -568,7 +596,8 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), phase_quantum); } - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + if (!amdgpu_sriov_vf(adev)) + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); } } @@ -591,6 +620,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) sdma_v5_0_rlc_stop(adev); } + if (amdgpu_sriov_vf(adev)) + return; + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); @@ -623,7 +655,8 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) ring = &adev->sdma.instance[i].ring; wb_offset = (ring->rptr_offs * 4); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); @@ -699,26 +732,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) /* set minor_ptr_update to 0 after wptr programed */ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + } if (!amdgpu_sriov_vf(adev)) { /* unhalt engine */ @@ -948,7 +983,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; @@ -1224,7 +1260,7 @@ static int sdma_v5_0_sw_init(void *handle) ring->ring_obj = NULL; ring->use_doorbell = true; - DRM_INFO("use_doorbell being set to: [%s]\n", + DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, ring->use_doorbell?"true":"false"); ring->doorbell_index = (i == 0) ? @@ -1236,7 +1272,8 @@ static int sdma_v5_0_sw_init(void *handle) &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -1387,14 +1424,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev, { u32 sdma_cntl; - u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? - sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : - sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); + if (!amdgpu_sriov_vf(adev)) { + u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? + sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : + sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); - sdma_cntl = RREG32(reg_offset); - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32(reg_offset, sdma_cntl); + sdma_cntl = RREG32(reg_offset); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32(reg_offset, sdma_cntl); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 4d415bfdb42f..153db3f763bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) return 0; } -static void si_detect_hw_virtualization(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { @@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block = int si_set_ip_blocks(struct amdgpu_device *adev) { - si_detect_hw_virtualization(adev); - switch (adev->asic_type) { case CHIP_VERDE: case CHIP_TAHITI: diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 42d5601b6bf3..e4b8283ad11d 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -124,7 +124,6 @@ static void si_dma_stop(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, false); - ring->sched.ready = false; } } @@ -267,7 +266,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -504,7 +504,8 @@ static int si_dma_sw_init(void *handle) &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 0860e85a2d35..c00ba4b23c9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -345,26 +345,6 @@ static const struct si_dte_data dte_data_tahiti = false }; -#if 0 -static const struct si_dte_data dte_data_tahiti_le = -{ - { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 }, - { 0x7D, 0x7D, 0x4E4, 0xB00, 0 }, - 0x5, - 0xAFC8, - 0x64, - 0x32, - 1, - 0, - 0x10, - { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 }, - { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 }, - { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 }, - 85, - true -}; -#endif - static const struct si_dte_data dte_data_tahiti_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index d42a8d8a0dea..58a440a15525 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -708,7 +708,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) adev->df.funcs = &df_v1_7_funcs; adev->rev_id = soc15_get_rev_id(adev); - adev->nbio.funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_ai_virt_ops; @@ -1218,7 +1217,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; - adev->pg_flags = 0; + adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x32; break; case CHIP_RENOIR: diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 82abd8e728ab..3cafba726587 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle) ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -210,13 +211,10 @@ done: static int uvd_v4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v4_2_stop(adev); - ring->sched.ready = false; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 0fa8aae2d78e..a566ff926e90 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle) ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -208,13 +209,10 @@ done: static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v5_0_stop(adev); - ring->sched.ready = false; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index e0aadcaf6c8b..0a880bc101b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -416,7 +418,8 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -428,7 +431,9 @@ static int uvd_v6_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst->ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -535,13 +540,10 @@ done: static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v6_0_stop(adev); - ring->sched.ready = false; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 0995378d8263..7a55457e6f9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -450,7 +452,9 @@ static int uvd_v7_0_sw_init(void *handle) if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; sprintf(ring->name, "uvd_%d", ring->me); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->uvd.inst[j].irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -469,7 +473,9 @@ static int uvd_v7_0_sw_init(void *handle) else ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1; } - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->uvd.inst[j].irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -598,7 +604,6 @@ done: static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; if (!amdgpu_sriov_vf(adev)) uvd_v7_0_stop(adev); @@ -607,12 +612,6 @@ static int uvd_v7_0_hw_fini(void *handle) DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { - if (adev->uvd.harvest_config & (1 << i)) - continue; - adev->uvd.inst[i].ring.sched.ready = false; - } - return 0; } @@ -1694,7 +1693,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); uvd_v7_0_set_bypass_mode(adev, enable); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index b6837fcfdba7..0e2945baf0f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -434,7 +434,8 @@ static int vce_v2_0_sw_init(void *handle) ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); r = amdgpu_ring_init(adev, ring, 512, - &adev->vce.irq, 0); + &adev->vce.irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 217db187207c..6d9108fa22e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -442,7 +442,8 @@ static int vce_v3_0_sw_init(void *handle) for (i = 0; i < adev->vce.num_rings; i++) { ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 3fd102efb7af..a0fb119240f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -476,7 +476,8 @@ static int vce_v4_0_sw_init(void *handle) else ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; } - r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -539,7 +540,6 @@ static int vce_v4_0_hw_init(void *handle) static int vce_v4_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; if (!amdgpu_sriov_vf(adev)) { /* vce_v4_0_wait_for_idle(handle); */ @@ -549,9 +549,6 @@ static int vce_v4_0_hw_fini(void *handle) DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].sched.ready = false; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 09b0572b838d..1ad79155ed00 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -127,7 +127,8 @@ static int vcn_v1_0_sw_init(void *handle) ring = &adev->vcn.inst->ring_dec; sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -145,7 +146,8 @@ static int vcn_v1_0_sw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -227,14 +229,11 @@ done: static int vcn_v1_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || RREG32_SOC15(VCN, 0, mmUVD_STATUS)) vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); - ring->sched.ready = false; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index ec8091a661df..349da7bf7c68 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -92,6 +92,7 @@ static int vcn_v2_0_sw_init(void *handle) struct amdgpu_ring *ring; int i, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -133,7 +134,8 @@ static int vcn_v2_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -163,7 +165,8 @@ static int vcn_v2_0_sw_init(void *handle) else ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } @@ -174,6 +177,8 @@ static int vcn_v2_0_sw_init(void *handle) if (r) return r; + fw_shared = adev->vcn.inst->fw_shared_cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG); return 0; } @@ -188,6 +193,9 @@ static int vcn_v2_0_sw_fini(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; + + fw_shared->present_flag_0 = 0; amdgpu_virt_free_mm_table(adev); @@ -223,6 +231,10 @@ static int vcn_v2_0_hw_init(void *handle) if (r) goto done; + //Disable vcn decode for sriov + if (amdgpu_sriov_vf(adev)) + ring->sched.ready = false; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; r = amdgpu_ring_test_helper(ring); @@ -248,21 +260,12 @@ done: static int vcn_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; - int i; if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, 0, mmUVD_STATUS))) vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); - ring->sched.ready = false; - - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = false; - } - return 0; } @@ -359,6 +362,15 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + /* non-cache window */ + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); + WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } @@ -442,13 +454,16 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec /* non-cache window */ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( @@ -773,6 +788,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; @@ -872,6 +888,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; + /* set the write pointer delay */ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); @@ -894,11 +916,16 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); return 0; } static int vcn_v2_0_start(struct amdgpu_device *adev) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; @@ -1033,6 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; /* programm the RB_BASE for ring buffer */ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); @@ -1045,20 +1073,25 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; return 0; } @@ -1180,6 +1213,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); if (!ret_code) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); @@ -1189,23 +1223,38 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); /* Restore */ + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[0]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[1]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index c6363f5ad564..0fa1c5cec439 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle) return r; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + volatile struct amdgpu_fw_shared *fw_shared; + if (adev->vcn.harvest_config & (1 << j)) continue; adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; @@ -191,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (amdgpu_sriov_vf(adev) ? 2*j : 8*j); sprintf(ring->name, "vcn_dec_%d", j); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, + 0, AMDGPU_RING_PRIO_DEFAULT); if (r) return r; @@ -203,10 +206,15 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); sprintf(ring->name, "vcn_enc_%d.%d", j, i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->vcn.inst[j].irq, 0, + AMDGPU_RING_PRIO_DEFAULT); if (r) return r; } + + fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG); } if (amdgpu_sriov_vf(adev)) { @@ -230,8 +238,16 @@ static int vcn_v2_5_sw_init(void *handle) */ static int vcn_v2_5_sw_fini(void *handle) { - int r; + int i, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 = 0; + } if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); @@ -308,25 +324,16 @@ done: static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring; - int i, j; + int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - ring = &adev->vcn.inst[i].ring_dec; if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); - - ring->sched.ready = false; - - for (j = 0; j < adev->vcn.num_enc_rings; ++j) { - ring = &adev->vcn.inst[i].ring_enc[j]; - ring->sched.ready = false; - } } return 0; @@ -424,6 +431,15 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); } } @@ -507,13 +523,16 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx /* non-cache window */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( @@ -750,6 +769,7 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; @@ -855,6 +875,12 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; + /* set the write pointer delay */ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); @@ -877,6 +903,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + return 0; } @@ -962,6 +993,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) vcn_v2_5_mc_resume(adev); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; if (adev->vcn.harvest_config & (1 << i)) continue; /* VCN global tiling registers */ @@ -1035,6 +1067,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; /* programm the RB_BASE for ring buffer */ WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); @@ -1047,19 +1080,25 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR); WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[i].ring_enc[0]; WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[i].ring_enc[1]; WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; } return 0; @@ -1380,6 +1419,8 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); if (!ret_code) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data); @@ -1389,23 +1430,40 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + /* Restore */ + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + ring->wptr = 0; WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[inst_idx].ring_enc[1]; + ring->wptr = 0; WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 3ce10e05d0d6..af8986a55354 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static void vi_detect_hw_virtualization(struct amdgpu_device *adev) -{ - uint32_t reg = 0; - - if (adev->asic_type == CHIP_TONGA || - adev->asic_type == CHIP_FIJI) { - reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); - /* bit0: 0 means pf and 1 means vf */ - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - /* bit31: 0 means disable IOV and 1 means enable */ - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - } - - if (reg == 0) { - if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { {mmGRBM_STATUS}, {mmGRBM_STATUS2}, @@ -1728,9 +1707,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block = int vi_set_ip_blocks(struct amdgpu_device *adev) { - /* in early init stage, vbios code won't work */ - vi_detect_hw_virtualization(adev); - if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_vi_virt_ops; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index de9f68d5c312..1009a3b8dcc2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -502,7 +502,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list, num_nodes = crat_table->num_domains; image_len = crat_table->length; - pr_info("Parsing CRAT table with %d nodes\n", num_nodes); + pr_debug("Parsing CRAT table with %d nodes\n", num_nodes); for (node_id = 0; node_id < num_nodes; node_id++) { top_dev = kfd_create_topology_device(device_list); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index aa0bfa78a667..8e6409bc7c91 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -787,7 +787,6 @@ static int kfd_topology_update_sysfs(void) { int ret; - pr_info("Creating topology SYSFS entries\n"); if (!sys_props.kobj_topology) { sys_props.kobj_topology = kfd_alloc_struct(sys_props.kobj_topology); @@ -1048,7 +1047,6 @@ int kfd_topology_init(void) sys_props.generation_count++; kfd_update_system_properties(); kfd_debug_print_topology(); - pr_info("Finished initializing topology\n"); } else pr_err("Failed to update topology in sysfs ret=%d\n", ret); diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 87858bc57e64..1911a34cc060 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -21,16 +21,12 @@ config DRM_AMD_DC_HDCP bool "Enable HDCP support in DC" depends on DRM_AMD_DC help - Choose this option - if you want to support - HDCP authentication + Choose this option if you want to support HDCP authentication. config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" depends on DRM_AMD_DC help - Choose this option - if you want to hit - kdgb_break in assert. + Choose this option if you want to hit kdgb_break in assert. endmenu diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f7c5cdc10a70..4c8b1bc98913 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -825,8 +825,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) fw_inst_const_size); } - memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, - fw_bss_data_size); + if (fw_bss_data_size) + memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, + fw_bss_data, fw_bss_data_size); /* Copy firmware bios info into FB memory. */ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, @@ -1265,6 +1266,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->dm.dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + le32_to_cpu(hdr->inst_const_bytes); + region_params.fw_inst_const = + adev->dm.dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + PSP_HEADER_BYTES; status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, ®ion_info); @@ -3340,7 +3345,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, const union dc_tiling_info *tiling_info, const uint64_t info, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -3352,6 +3358,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, memset(&input, 0, sizeof(input)); memset(&output, 0, sizeof(output)); + if (force_disable_dcc) + return 0; + if (!offset) return 0; @@ -3401,7 +3410,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, union dc_tiling_info *tiling_info, struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = &afb->base; int ret; @@ -3507,7 +3517,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, ret = fill_plane_dcc_attributes(adev, afb, format, rotation, plane_size, tiling_info, - tiling_flags, dcc, address); + tiling_flags, dcc, address, + force_disable_dcc); if (ret) return ret; } @@ -3599,7 +3610,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_plane_state *plane_state, const uint64_t tiling_flags, struct dc_plane_info *plane_info, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = @@ -3681,7 +3693,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->rotation, tiling_flags, &plane_info->tiling_info, &plane_info->plane_size, - &plane_info->dcc, address); + &plane_info->dcc, address, + force_disable_dcc); if (ret) return ret; @@ -3704,6 +3717,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct dc_plane_info plane_info; uint64_t tiling_flags; int ret; + bool force_disable_dcc = false; ret = fill_dc_scaling_info(plane_state, &scaling_info); if (ret) @@ -3718,9 +3732,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, if (ret) return ret; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, &plane_info, - &dc_plane_state->address); + &dc_plane_state->address, + force_disable_dcc); if (ret) return ret; @@ -4324,14 +4340,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); - if (stream->link->psr_feature_enabled) { + if (stream->link->psr_settings.psr_feature_enabled) { struct dc *core_dc = stream->link->ctx->dc; if (dc_is_dmcu_initialized(core_dc)) { - struct dmcu *dmcu = core_dc->res_pool->dmcu; - - stream->psr_version = dmcu->dmcu_version.psr_version; - // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet @@ -4664,6 +4676,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) i2c_del_adapter(&aconnector->i2c->base); kfree(aconnector->i2c); } + kfree(aconnector->dm_dp_aux.aux.name); kfree(connector); } @@ -4726,6 +4739,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) #if defined(CONFIG_DEBUG_FS) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + int r; + + if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; + r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); + if (r) + return r; + } connector_debugfs_init(amdgpu_dm_connector); #endif @@ -5332,6 +5354,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, uint64_t tiling_flags; uint32_t domain; int r; + bool force_disable_dcc = false; dm_plane_state_old = to_dm_plane_state(plane->state); dm_plane_state_new = to_dm_plane_state(new_state); @@ -5390,11 +5413,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; fill_plane_buffer_attributes( adev, afb, plane_state->format, plane_state->rotation, tiling_flags, &plane_state->tiling_info, &plane_state->plane_size, &plane_state->dcc, - &plane_state->address); + &plane_state->address, + force_disable_dcc); } return 0; @@ -6092,7 +6117,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) - amdgpu_dm_initialize_dp_connector(dm, aconnector); + amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); out_free: if (res) { @@ -6619,6 +6644,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; } fill_dc_scaling_info(new_plane_state, @@ -6666,7 +6692,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, &bundle->plane_infos[planes_count], - &bundle->flip_addrs[planes_count].address); + &bundle->flip_addrs[planes_count].address, + false); + + DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n", + new_plane_state->plane->index, + bundle->plane_infos[planes_count].dcc.enable); bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; @@ -6807,7 +6838,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, } mutex_lock(&dm->dc_lock); if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_allow_active) + acrtc_state->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(acrtc_state->stream); dc_commit_updates_for_stream(dm->dc, @@ -6818,12 +6849,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_state); if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->psr_version && - !acrtc_state->stream->link->psr_feature_enabled) + acrtc_state->stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED && + !acrtc_state->stream->link->psr_settings.psr_feature_enabled) amdgpu_dm_link_setup_psr(acrtc_state->stream); else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_feature_enabled && - !acrtc_state->stream->link->psr_allow_active) { + acrtc_state->stream->link->psr_settings.psr_feature_enabled && + !acrtc_state->stream->link->psr_settings.psr_allow_active) { amdgpu_dm_psr_enable(acrtc_state->stream); } @@ -7137,7 +7168,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); /* i.e. reset mode */ if (dm_old_crtc_state->stream) { - if (dm_old_crtc_state->stream->link->psr_allow_active) + if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(dm_old_crtc_state->stream); remove_stream(adev, acrtc, dm_old_crtc_state->stream); @@ -8063,6 +8094,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, new_dm_plane_state->dc_state->gamma_correction; bundle->surface_updates[num_plane].in_transfer_func = new_dm_plane_state->dc_state->in_transfer_func; + bundle->surface_updates[num_plane].gamut_remap_matrix = + &new_dm_plane_state->dc_state->gamut_remap_matrix; bundle->stream_update.gamut_remap = &new_dm_crtc_state->stream->gamut_remap_matrix; bundle->stream_update.output_csc_transform = @@ -8086,7 +8119,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, ret = fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, plane_info, - &flip_addr->address); + &flip_addr->address, + false); if (ret) goto cleanup; @@ -8586,8 +8620,17 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link) return; if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, dpcd_data, sizeof(dpcd_data))) { - link->psr_feature_enabled = dpcd_data[0] ? true:false; - DRM_INFO("PSR support:%d\n", link->psr_feature_enabled); + link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; + + if (dpcd_data[0] == 0) { + link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED; + link->psr_settings.psr_feature_enabled = false; + } else { + link->psr_settings.psr_version = PSR_VERSION_1; + link->psr_settings.psr_feature_enabled = true; + } + + DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); } } @@ -8611,7 +8654,7 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) link = stream->link; dc = link->ctx->dc; - psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version; + psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; if (psr_config.psr_version > 0) { psr_config.psr_exit_link_training_required = 0x1; @@ -8623,7 +8666,7 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); } - DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled); + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled); return ret; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index 2233d293a707..838f35668f12 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -419,9 +419,21 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, struct dc_plane_state *dc_plane_state) { const struct drm_color_lut *degamma_lut; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; uint32_t degamma_size; int r; + /* Get the correct base transfer function for implicit degamma. */ + switch (dc_plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + /* DC doesn't have a transfer function for BT601 specifically. */ + tf = TRANSFER_FUNCTION_BT709; + break; + default: + break; + } + if (crtc->cm_has_degamma) { degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size); @@ -455,8 +467,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, * map these to the atomic one instead. */ if (crtc->cm_is_degamma_srgb) - dc_plane_state->in_transfer_func->tf = - TRANSFER_FUNCTION_SRGB; + dc_plane_state->in_transfer_func->tf = tf; else dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; @@ -471,7 +482,12 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, * in linear space. Assume that the input is sRGB. */ dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + dc_plane_state->in_transfer_func->tf = tf; + + if (tf != TRANSFER_FUNCTION_SRGB && + !mod_color_calculate_degamma_params( + dc_plane_state->in_transfer_func, NULL, false)) + return -ENOMEM; } else { /* ...Otherwise we can just bypass the DGM block. */ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 0461fecd68db..b3b7efd973ca 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -838,6 +838,44 @@ static int vrr_range_show(struct seq_file *m, void *data) return 0; } +#ifdef CONFIG_DRM_AMD_DC_HDCP +/* + * Returns the HDCP capability of the Display (1.4 for now). + * + * NOTE* Not all HDMI displays report their HDCP caps even when they are capable. + * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable. + * + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability + * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability + */ +static int hdcp_sink_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + bool hdcp_cap, hdcp2_cap; + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); + + hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link); + hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link); + + + if (hdcp_cap) + seq_printf(m, "%s ", "HDCP1.4"); + if (hdcp2_cap) + seq_printf(m, "%s ", "HDCP2.2"); + + if (!hdcp_cap && !hdcp2_cap) + seq_printf(m, "%s ", "None"); + + seq_puts(m, "\n"); + + return 0; +} +#endif /* function description * * generic SDP message access for testing @@ -964,6 +1002,9 @@ DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); DEFINE_SHOW_ATTRIBUTE(output_bpc); DEFINE_SHOW_ATTRIBUTE(vrr_range); +#ifdef CONFIG_DRM_AMD_DC_HDCP +DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); +#endif static const struct file_operations dp_link_settings_debugfs_fops = { .owner = THIS_MODULE, @@ -1019,12 +1060,23 @@ static const struct { {"test_pattern", &dp_phy_test_pattern_fops}, {"output_bpc", &output_bpc_fops}, {"vrr_range", &vrr_range_fops}, +#ifdef CONFIG_DRM_AMD_DC_HDCP + {"hdcp_sink_capability", &hdcp_sink_capability_fops}, +#endif {"sdp_message", &sdp_message_fops}, {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops}, {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops}, {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops} }; +#ifdef CONFIG_DRM_AMD_DC_HDCP +static const struct { + char *name; + const struct file_operations *fops; +} hdmi_debugfs_entries[] = { + {"hdcp_sink_capability", &hdcp_sink_capability_fops} +}; +#endif /* * Force YUV420 output if available from the given mode */ @@ -1093,6 +1145,15 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) connector->debugfs_dpcd_address = 0; connector->debugfs_dpcd_size = 0; +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) { + for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) { + debugfs_create_file(hdmi_debugfs_entries[i].name, + 0644, dir, connector, + hdmi_debugfs_entries[i].fops); + } + } +#endif } /* @@ -1167,8 +1228,9 @@ static int current_backlight_read(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct dc *dc = adev->dm.dc; - unsigned int backlight = dc_get_current_backlight_pwm(dc); + struct amdgpu_display_manager *dm = &adev->dm; + + unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link); seq_printf(m, "0x%x\n", backlight); return 0; @@ -1184,8 +1246,9 @@ static int target_backlight_read(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct dc *dc = adev->dm.dc; - unsigned int backlight = dc_get_target_backlight_pwm(dc); + struct amdgpu_display_manager *dm = &adev->dm; + + unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link); seq_printf(m, "0x%x\n", backlight); return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index c20fb08c450b..c407f06cd1f5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to find connector for link!"); + DC_LOG_DC("Failed to find connector for link!\n"); return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 3db1ec35d2b4..ae0a7ef1d595 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -41,53 +41,10 @@ #include "amdgpu_dm_debugfs.h" #endif - #if defined(CONFIG_DRM_AMD_DC_DCN) #include "dc/dcn20/dcn20_resource.h" #endif -/* #define TRACE_DPCD */ - -#ifdef TRACE_DPCD -#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI) - -static inline char *side_band_msg_type_to_str(uint32_t address) -{ - static char str[10] = {0}; - - if (address < DP_SIDEBAND_MSG_UP_REP_BASE) - strcpy(str, "DOWN_REQ"); - else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE) - strcpy(str, "UP_REP"); - else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE) - strcpy(str, "DOWN_REP"); - else - strcpy(str, "UP_REQ"); - - return str; -} - -static void log_dpcd(uint8_t type, - uint32_t address, - uint8_t *data, - uint32_t size, - bool res) -{ - DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n", - (type == DP_AUX_NATIVE_READ) || - (type == DP_AUX_I2C_READ) ? - "Read" : "Write", - address, - SIDE_BAND_MSG(address) ? - side_band_msg_type_to_str(address) : "Nop", - res ? "OK" : "Fail"); - - if (res) { - print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false); - } -} -#endif - static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -162,16 +119,16 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); int r; - amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; - r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); - if (r) + r = drm_dp_mst_connector_late_register(connector, + amdgpu_dm_connector->port); + if (r < 0) return r; #if defined(CONFIG_DEBUG_FS) connector_debugfs_init(amdgpu_dm_connector); #endif - return r; + return 0; } static void @@ -451,9 +408,12 @@ static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { }; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, - struct amdgpu_dm_connector *aconnector) + struct amdgpu_dm_connector *aconnector, + int link_index) { - aconnector->dm_dp_aux.aux.name = "dmdc"; + aconnector->dm_dp_aux.aux.name = + kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", + link_index); aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index d6813ce67bbd..d2c56579a2cc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -32,7 +32,8 @@ struct amdgpu_dm_connector; int dm_mst_get_pbn_divider(struct dc_link *link); void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, - struct amdgpu_dm_connector *aconnector); + struct amdgpu_dm_connector *aconnector, + int link_index); #if defined(CONFIG_DRM_AMD_DC_DCN) bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile index 7ad0cad0f4ef..01b99e0d788e 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/Makefile +++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile @@ -24,8 +24,7 @@ # It provides the general basic services required by other DAL # subcomponents. -BASICS = conversion.o fixpt31_32.o \ - log_helpers.o vector.o dc_common.o +BASICS = conversion.o fixpt31_32.o vector.o dc_common.o AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 8ec2dfe45d40..a5c2114e4292 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -90,7 +90,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m dc->hwss.exit_optimized_pwr_state(dc, dc->current_state); if (edp_link) { - clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active; + clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active; dc_link_set_psr_allow_active(edp_link, false, false); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26db1c5d4e4d..b210f8e9d592 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); int dprefclk_wdivider; int dprefclk_src_sel; - int dp_ref_clk_khz = 600000; + int dp_ref_clk_khz; int target_div; /* ASSERT DP Reference Clock source is from DFS*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 97b7f32294fd..c320b7af7d34 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -97,9 +97,6 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di VBIOSSMC_MSG_SetDispclkFreq, requested_dispclk_khz / 1000); - /* Actual dispclk set is returned in the parameter register */ - actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 8489f1e56892..0f7810571be3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -66,6 +66,8 @@ #include "dce/dce_i2c.h" +#include "dmub/inc/dmub_cmd_dal.h" + #define CTX \ dc->ctx @@ -348,7 +350,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream == stream) + if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) break; } /* Stream not found */ @@ -365,6 +367,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, param.windowb_x_end = pipe->stream->timing.h_addressable; param.windowb_y_end = pipe->stream->timing.v_addressable; + param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; + param.odm_mode = pipe->next_odm_pipe ? 1:0; + /* Default to the union of both windows */ param.selection = UNION_WINDOW_A_B; param.continuous_mode = continuous; @@ -2641,33 +2646,12 @@ void dc_set_power_state( void dc_resume(struct dc *dc) { - uint32_t i; for (i = 0; i < dc->link_count; i++) core_link_resume(dc->links[i]); } -unsigned int dc_get_current_backlight_pwm(struct dc *dc) -{ - struct abm *abm = dc->res_pool->abm; - - if (abm) - return abm->funcs->get_current_backlight(abm); - - return 0; -} - -unsigned int dc_get_target_backlight_pwm(struct dc *dc) -{ - struct abm *abm = dc->res_pool->abm; - - if (abm) - return abm->funcs->get_target_backlight(abm); - - return 0; -} - bool dc_is_dmcu_initialized(struct dc *dc) { struct dmcu *dmcu = dc->res_pool->dmcu; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 67cfff1586e9..9c4686edcf3e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -46,10 +46,11 @@ #include "dmcu.h" #include "hw/clk_mgr.h" #include "dce/dmub_psr.h" +#include "dmub/inc/dmub_cmd_dal.h" +#include "inc/hw/panel_cntl.h" #define DC_LOGGER_INIT(logger) - #define LINK_INFO(...) \ DC_LOG_HW_HOTPLUG( \ __VA_ARGS__) @@ -64,11 +65,11 @@ enum { PEAK_FACTOR_X1000 = 1006, /* - * Some receivers fail to train on first try and are good - * on subsequent tries. 2 retries should be plenty. If we - * don't have a successful training then we don't expect to - * ever get one. - */ + * Some receivers fail to train on first try and are good + * on subsequent tries. 2 retries should be plenty. If we + * don't have a successful training then we don't expect to + * ever get one. + */ LINK_TRAINING_MAX_VERIFY_RETRY = 2 }; @@ -79,7 +80,7 @@ static void dc_link_destruct(struct dc_link *link) { int i; - if (link->hpd_gpio != NULL) { + if (link->hpd_gpio) { dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } @@ -87,7 +88,10 @@ static void dc_link_destruct(struct dc_link *link) if (link->ddc) dal_ddc_service_destroy(&link->ddc); - if(link->link_enc) + if (link->panel_cntl) + link->panel_cntl->funcs->destroy(&link->panel_cntl); + + if (link->link_enc) link->link_enc->funcs->destroy(&link->link_enc); if (link->local_sink) @@ -98,8 +102,8 @@ static void dc_link_destruct(struct dc_link *link) } struct gpio *get_hpd_gpio(struct dc_bios *dcb, - struct graphics_object_id link_id, - struct gpio_service *gpio_service) + struct graphics_object_id link_id, + struct gpio_service *gpio_service) { enum bp_result bp_result; struct graphics_object_hpd_info hpd_info; @@ -116,10 +120,9 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb, return NULL; } - return dal_gpio_service_create_irq( - gpio_service, - pin_info.offset, - pin_info.mask); + return dal_gpio_service_create_irq(gpio_service, + pin_info.offset, + pin_info.mask); } /* @@ -134,13 +137,10 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb, * @return * true on success, false otherwise */ -static bool program_hpd_filter( - const struct dc_link *link) +static bool program_hpd_filter(const struct dc_link *link) { bool result = false; - struct gpio *hpd; - int delay_on_connect_in_ms = 0; int delay_on_disconnect_in_ms = 0; @@ -159,10 +159,10 @@ static bool program_hpd_filter( case SIGNAL_TYPE_DISPLAY_PORT_MST: /* Program hpd filter to allow DP signal to settle */ /* 500: not able to detect MST <-> SST switch as HPD is low for - * only 100ms on DELL U2413 - * 0: some passive dongle still show aux mode instead of i2c - * 20-50:not enough to hide bouncing HPD with passive dongle. - * also see intermittent i2c read issues. + * only 100ms on DELL U2413 + * 0: some passive dongle still show aux mode instead of i2c + * 20-50: not enough to hide bouncing HPD with passive dongle. + * also see intermittent i2c read issues. */ delay_on_connect_in_ms = 80; delay_on_disconnect_in_ms = 0; @@ -175,7 +175,8 @@ static bool program_hpd_filter( } /* Obtain HPD handle */ - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); + hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); if (!hpd) return result; @@ -226,8 +227,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) } /* todo: may need to lock gpio access */ - hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); - if (hpd_pin == NULL) + hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + if (!hpd_pin) goto hpd_gpio_failure; dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); @@ -248,8 +250,7 @@ hpd_gpio_failure: return false; } -static enum ddc_transaction_type get_ddc_transaction_type( - enum signal_type sink_signal) +static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal) { enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE; @@ -270,7 +271,8 @@ static enum ddc_transaction_type get_ddc_transaction_type( case SIGNAL_TYPE_DISPLAY_PORT_MST: /* MST does not use I2COverAux, but there is the * SPECIAL use case for "immediate dwnstrm device - * access" (EPR#370830). */ + * access" (EPR#370830). + */ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; break; @@ -281,9 +283,8 @@ static enum ddc_transaction_type get_ddc_transaction_type( return transaction_type; } -static enum signal_type get_basic_signal_type( - struct graphics_object_id encoder, - struct graphics_object_id downstream) +static enum signal_type get_basic_signal_type(struct graphics_object_id encoder, + struct graphics_object_id downstream) { if (downstream.type == OBJECT_TYPE_CONNECTOR) { switch (downstream.id) { @@ -369,10 +370,11 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) /* Open GPIO and set it to I2C mode */ /* Note: this GpioMode_Input will be converted * to GpioConfigType_I2cAuxDualMode in GPIO component, - * which indicates we need additional delay */ + * which indicates we need additional delay + */ - if (GPIO_RESULT_OK != dal_ddc_open( - ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { + if (dal_ddc_open(ddc, GPIO_MODE_INPUT, + GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) { dal_ddc_close(ddc); return present; @@ -406,25 +408,25 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) * @brief * Detect output sink type */ -static enum signal_type link_detect_sink( - struct dc_link *link, - enum dc_detect_reason reason) +static enum signal_type link_detect_sink(struct dc_link *link, + enum dc_detect_reason reason) { - enum signal_type result = get_basic_signal_type( - link->link_enc->id, link->link_id); + enum signal_type result = get_basic_signal_type(link->link_enc->id, + link->link_id); /* Internal digital encoder will detect only dongles - * that require digital signal */ + * that require digital signal + */ /* Detection mechanism is different * for different native connectors. * LVDS connector supports only LVDS signal; * PCIE is a bus slot, the actual connector needs to be detected first; * eDP connector supports only eDP signal; - * HDMI should check straps for audio */ + * HDMI should check straps for audio + */ /* PCIE detects the actual connector on add-on board */ - if (link->link_id.id == CONNECTOR_ID_PCIE) { /* ZAZTODO implement PCIE add-on card detection */ } @@ -432,8 +434,10 @@ static enum signal_type link_detect_sink( switch (link->link_id.id) { case CONNECTOR_ID_HDMI_TYPE_A: { /* check audio support: - * if native HDMI is not supported, switch to DVI */ - struct audio_support *aud_support = &link->dc->res_pool->audio_support; + * if native HDMI is not supported, switch to DVI + */ + struct audio_support *aud_support = + &link->dc->res_pool->audio_support; if (!aud_support->hdmi_audio_native) if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A) @@ -461,16 +465,15 @@ static enum signal_type link_detect_sink( return result; } -static enum signal_type decide_signal_from_strap_and_dongle_type( - enum display_dongle_type dongle_type, - struct audio_support *audio_support) +static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type, + struct audio_support *audio_support) { enum signal_type signal = SIGNAL_TYPE_NONE; switch (dongle_type) { case DISPLAY_DONGLE_DP_HDMI_DONGLE: if (audio_support->hdmi_audio_on_dongle) - signal = SIGNAL_TYPE_HDMI_TYPE_A; + signal = SIGNAL_TYPE_HDMI_TYPE_A; else signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; @@ -491,16 +494,14 @@ static enum signal_type decide_signal_from_strap_and_dongle_type( return signal; } -static enum signal_type dp_passive_dongle_detection( - struct ddc_service *ddc, - struct display_sink_capability *sink_cap, - struct audio_support *audio_support) +static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, + struct display_sink_capability *sink_cap, + struct audio_support *audio_support) { - dal_ddc_service_i2c_query_dp_dual_mode_adaptor( - ddc, sink_cap); - return decide_signal_from_strap_and_dongle_type( - sink_cap->dongle_type, - audio_support); + dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap); + + return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, + audio_support); } static void link_disconnect_sink(struct dc_link *link) @@ -519,6 +520,96 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin link->local_sink = prev_sink; } +#if defined(CONFIG_DRM_AMD_DC_HDCP) +bool dc_link_is_hdcp14(struct dc_link *link) +{ + bool ret = false; + + switch (link->connector_signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE; + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable, + * we can poll for bksv but some displays have an issue with this. Since its so rare + * for a display to not be 1.4 capable, this assumtion is ok + */ + ret = true; + break; + default: + break; + } + return ret; +} + +bool dc_link_is_hdcp22(struct dc_link *link) +{ + bool ret = false; + + switch (link->connector_signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE && + link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable && + (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0; + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0; + break; + default: + break; + } + + return ret; +} + +static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) +{ + struct hdcp_protection_message msg22; + struct hdcp_protection_message msg14; + + memset(&msg22, 0, sizeof(struct hdcp_protection_message)); + memset(&msg14, 0, sizeof(struct hdcp_protection_message)); + memset(link->hdcp_caps.rx_caps.raw, 0, + sizeof(link->hdcp_caps.rx_caps.raw)); + + if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->ddc->transaction_type == + DDC_TRANSACTION_TYPE_I2C_OVER_AUX) || + link->connector_signal == SIGNAL_TYPE_EDP) { + msg22.data = link->hdcp_caps.rx_caps.raw; + msg22.length = sizeof(link->hdcp_caps.rx_caps.raw); + msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS; + } else { + msg22.data = &link->hdcp_caps.rx_caps.fields.version; + msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version); + msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION; + } + msg22.version = HDCP_VERSION_22; + msg22.link = HDCP_LINK_PRIMARY; + msg22.max_retries = 5; + dc_process_hdcp_msg(signal, link, &msg22); + + if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED; + + msg14.data = &link->hdcp_caps.bcaps.raw; + msg14.length = sizeof(link->hdcp_caps.bcaps.raw); + msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS; + msg14.version = HDCP_VERSION_14; + msg14.link = HDCP_LINK_PRIMARY; + msg14.max_retries = 5; + + status = dc_process_hdcp_msg(signal, link, &msg14); + } + +} +#endif static void read_current_link_settings_on_detect(struct dc_link *link) { @@ -532,18 +623,18 @@ static void read_current_link_settings_on_detect(struct dc_link *link) // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_LANE_COUNT_SET, - &lane_count_set.raw, - sizeof(lane_count_set)); + status = core_link_read_dpcd(link, + DP_LANE_COUNT_SET, + &lane_count_set.raw, + sizeof(lane_count_set)); /* First DPCD read after VDD ON can fail if the particular board * does not have HPD pin wired correctly. So if DPCD read fails, * which it should never happen, retry a few times. Target worst * case scenario of 80 ms. */ if (status == DC_OK) { - link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET; + link->cur_link_settings.lane_count = + lane_count_set.bits.LANE_COUNT_SET; break; } @@ -552,7 +643,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link) // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, - &link_bw_set, sizeof(link_bw_set)); + &link_bw_set, sizeof(link_bw_set)); if (link_bw_set == 0) { if (link->connector_signal == SIGNAL_TYPE_EDP) { @@ -560,12 +651,12 @@ static void read_current_link_settings_on_detect(struct dc_link *link) * Read DPCD 00115h to find the edp link rate set used */ core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); + &link_rate_set, sizeof(link_rate_set)); // edp_supported_link_rates_count = 0 for DP if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { link->cur_link_settings.link_rate = - link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; link->cur_link_settings.link_rate_set = link_rate_set; link->cur_link_settings.use_link_rate_set = true; } @@ -579,7 +670,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link) } // Read DPCD 00003h to find the max down spread. core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, - &max_down_spread.raw, sizeof(max_down_spread)); + &max_down_spread.raw, sizeof(max_down_spread)); link->cur_link_settings.link_spread = max_down_spread.bits.MAX_DOWN_SPREAD ? LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; @@ -612,6 +703,12 @@ static bool detect_dp(struct dc_link *link, dal_ddc_service_set_transaction_type(link->ddc, sink_caps->transaction_type); +#if defined(CONFIG_DRM_AMD_DC_HDCP) + /* In case of fallback to SST when topology discovery below fails + * HDCP caps will be querried again later by the upper layer (caller + * of this function). */ + query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link); +#endif /* * This call will initiate MST topology discovery. Which * will detect MST ports and add new DRM connector DRM @@ -683,12 +780,12 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) if (new_edid->length == 0) return false; - return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); + return (memcmp(old_edid->raw_edid, + new_edid->raw_edid, new_edid->length) == 0); } -static bool wait_for_alt_mode(struct dc_link *link) +static bool wait_for_entering_dp_alt_mode(struct dc_link *link) { - /** * something is terribly wrong if time out is > 200ms. (5Hz) * 500 microseconds * 400 tries us 200 ms @@ -703,7 +800,7 @@ static bool wait_for_alt_mode(struct dc_link *link) DC_LOGGER_INIT(link->ctx->logger); - if (link->link_enc->funcs->is_in_alt_mode == NULL) + if (!link->link_enc->funcs->is_in_alt_mode) return true; is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); @@ -718,21 +815,21 @@ static bool wait_for_alt_mode(struct dc_link *link) udelay(sleep_time_in_microseconds); /* ask the link if alt mode is enabled, if so return ok */ if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) { - finish_timestamp = dm_get_timestamp(link->ctx); - time_taken_in_ns = dm_get_elapse_time_in_ns( - link->ctx, finish_timestamp, enter_timestamp); + time_taken_in_ns = + dm_get_elapse_time_in_ns(link->ctx, + finish_timestamp, + enter_timestamp); DC_LOG_WARNING("Alt mode entered finished after %llu ms\n", div_u64(time_taken_in_ns, 1000000)); return true; } - } finish_timestamp = dm_get_timestamp(link->ctx); time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); DC_LOG_WARNING("Alt mode has timed out after %llu ms\n", - div_u64(time_taken_in_ns, 1000000)); + div_u64(time_taken_in_ns, 1000000)); return false; } @@ -768,30 +865,30 @@ static bool dc_link_detect_helper(struct dc_link *link, return false; if ((link->connector_signal == SIGNAL_TYPE_LVDS || - link->connector_signal == SIGNAL_TYPE_EDP) && - link->local_sink) { - + link->connector_signal == SIGNAL_TYPE_EDP) && + link->local_sink) { // need to re-write OUI and brightness in resume case if (link->connector_signal == SIGNAL_TYPE_EDP) { dpcd_set_source_specific_data(link); - dc_link_set_default_brightness_aux(link); //TODO: use cached + dc_link_set_default_brightness_aux(link); + //TODO: use cached } return true; } - if (false == dc_link_detect_sink(link, &new_connection_type)) { + if (!dc_link_detect_sink(link, &new_connection_type)) { BREAK_TO_DEBUGGER(); return false; } prev_sink = link->local_sink; - if (prev_sink != NULL) { + if (prev_sink) { dc_sink_retain(prev_sink); memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); } - link_disconnect_sink(link); + link_disconnect_sink(link); if (new_connection_type != dc_connection_none) { link->type = new_connection_type; link->link_state_valid = false; @@ -838,35 +935,31 @@ static bool dc_link_detect_helper(struct dc_link *link, } case SIGNAL_TYPE_DISPLAY_PORT: { - /* wa HPD high coming too early*/ if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { - /* if alt mode times out, return false */ - if (wait_for_alt_mode(link) == false) { + if (!wait_for_entering_dp_alt_mode(link)) return false; - } } - if (!detect_dp( - link, - &sink_caps, - &converter_disable_audio, - aud_support, reason)) { - if (prev_sink != NULL) + if (!detect_dp(link, &sink_caps, + &converter_disable_audio, + aud_support, reason)) { + if (prev_sink) dc_sink_release(prev_sink); return false; } // Check if dpcp block is the same - if (prev_sink != NULL) { - if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) + if (prev_sink) { + if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, + sizeof(struct dpcd_caps))) same_dpcd = false; } /* Active dongle downstream unplug*/ if (link->type == dc_connection_active_dongle && - link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { - if (prev_sink != NULL) + link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { + if (prev_sink) /* Downstream unplug */ dc_sink_release(prev_sink); return true; @@ -874,7 +967,7 @@ static bool dc_link_detect_helper(struct dc_link *link, if (link->type == dc_connection_mst_branch) { LINK_INFO("link=%d, mst branch is now Connected\n", - link->link_index); + link->link_index); /* Need to setup mst link_cap struct here * otherwise dc_link_detect() will leave mst link_cap * empty which leads to allocate_mst_payload() has "0" @@ -882,15 +975,15 @@ static bool dc_link_detect_helper(struct dc_link *link, */ dp_verify_mst_link_cap(link); - if (prev_sink != NULL) + if (prev_sink) dc_sink_release(prev_sink); return false; } // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. if (reason == DETECT_REASON_BOOT && - dc_ctx->dc->config.power_down_display_on_boot == false && - link->link_status.link_active == true) + !dc_ctx->dc->config.power_down_display_on_boot && + link->link_status.link_active) perform_dp_seamless_boot = true; if (perform_dp_seamless_boot) { @@ -903,24 +996,23 @@ static bool dc_link_detect_helper(struct dc_link *link, default: DC_ERROR("Invalid connector type! signal:%d\n", - link->connector_signal); - if (prev_sink != NULL) + link->connector_signal); + if (prev_sink) dc_sink_release(prev_sink); return false; } /* switch() */ if (link->dpcd_caps.sink_count.bits.SINK_COUNT) - link->dpcd_sink_count = link->dpcd_caps.sink_count. - bits.SINK_COUNT; + link->dpcd_sink_count = + link->dpcd_caps.sink_count.bits.SINK_COUNT; else link->dpcd_sink_count = 1; - dal_ddc_service_set_transaction_type( - link->ddc, - sink_caps.transaction_type); + dal_ddc_service_set_transaction_type(link->ddc, + sink_caps.transaction_type); - link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode( - link->ddc); + link->aux_mode = + dal_ddc_service_is_in_aux_transaction_mode(link->ddc); sink_init_data.link = link; sink_init_data.sink_signal = sink_caps.signal; @@ -928,7 +1020,7 @@ static bool dc_link_detect_helper(struct dc_link *link, sink = dc_sink_create(&sink_init_data); if (!sink) { DC_ERROR("Failed to create sink!\n"); - if (prev_sink != NULL) + if (prev_sink) dc_sink_release(prev_sink); return false; } @@ -939,10 +1031,8 @@ static bool dc_link_detect_helper(struct dc_link *link, /* dc_sink_create returns a new reference */ link->local_sink = sink; - edid_status = dm_helpers_read_local_edid( - link->ctx, - link, - sink); + edid_status = dm_helpers_read_local_edid(link->ctx, + link, sink); switch (edid_status) { case EDID_BAD_CHECKSUM: @@ -950,7 +1040,6 @@ static bool dc_link_detect_helper(struct dc_link *link, break; case EDID_NO_RESPONSE: DC_LOG_ERROR("No EDID read.\n"); - /* * Abort detection for non-DP connectors if we have * no EDID @@ -961,7 +1050,7 @@ static bool dc_link_detect_helper(struct dc_link *link, */ if (dc_is_hdmi_signal(link->connector_signal) || dc_is_dvi_signal(link->connector_signal)) { - if (prev_sink != NULL) + if (prev_sink) dc_sink_release(prev_sink); return false; @@ -974,45 +1063,53 @@ static bool dc_link_detect_helper(struct dc_link *link, link->ctx->dc->debug.disable_fec = true; // Check if edid is the same - if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK))) - same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); + if ((prev_sink) && + (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) + same_edid = is_same_edid(&prev_sink->dc_edid, + &sink->dc_edid); if (sink->edid_caps.panel_patch.skip_scdc_overwrite) link->ctx->dc->debug.hdmi20_disable = true; if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { + sink_caps.transaction_type == + DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* * TODO debug why Dell 2413 doesn't like * two link trainings */ +#if defined(CONFIG_DRM_AMD_DC_HDCP) + query_hdcp_capability(sink->sink_signal, link); +#endif // verify link cap for SST non-seamless boot if (!perform_dp_seamless_boot) dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); + &link->reported_link_cap, + LINK_TRAINING_MAX_VERIFY_RETRY); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { link_disconnect_remap(prev_sink, link); sink = prev_sink; prev_sink = NULL; - } +#if defined(CONFIG_DRM_AMD_DC_HDCP) + query_hdcp_capability(sink->sink_signal, link); +#endif } /* HDMI-DVI Dongle */ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && - !sink->edid_caps.edid_hdmi) + !sink->edid_caps.edid_hdmi) sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; /* Connectivity log: detection */ for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { CONN_DATA_DETECT(link, - &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], - DC_EDID_BLOCK_SIZE, - "%s: [Block %d] ", sink->edid_caps.display_name, i); + &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], + DC_EDID_BLOCK_SIZE, + "%s: [Block %d] ", sink->edid_caps.display_name, i); } DC_LOG_DETECTION_EDID_PARSER("%s: " @@ -1047,17 +1144,18 @@ static bool dc_link_detect_helper(struct dc_link *link, sink->edid_caps.audio_modes[i].sample_rate, sink->edid_caps.audio_modes[i].sample_size); } - } else { /* From Connected-to-Disconnected. */ if (link->type == dc_connection_mst_branch) { LINK_INFO("link=%d, mst branch is now Disconnected\n", - link->link_index); + link->link_index); dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); link->mst_stream_alloc_table.stream_count = 0; - memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations)); + memset(link->mst_stream_alloc_table.stream_allocations, + 0, + sizeof(link->mst_stream_alloc_table.stream_allocations)); } link->type = dc_connection_none; @@ -1071,16 +1169,15 @@ static bool dc_link_detect_helper(struct dc_link *link, } LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n", - link->link_index, sink, - (sink_caps.signal == SIGNAL_TYPE_NONE ? - "Disconnected":"Connected"), prev_sink, - same_dpcd, same_edid); + link->link_index, sink, + (sink_caps.signal == + SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), + prev_sink, same_dpcd, same_edid); - if (prev_sink != NULL) + if (prev_sink) dc_sink_release(prev_sink); return true; - } bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) @@ -1110,13 +1207,13 @@ bool dc_link_get_hpd_state(struct dc_link *dc_link) return state; } -static enum hpd_source_id get_hpd_line( - struct dc_link *link) +static enum hpd_source_id get_hpd_line(struct dc_link *link) { struct gpio *hpd; enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN; - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); + hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); if (hpd) { switch (dal_irq_get_source(hpd)) { @@ -1191,8 +1288,7 @@ static enum channel_id get_ddc_line(struct dc_link *link) return channel; } -static enum transmitter translate_encoder_to_transmitter( - struct graphics_object_id encoder) +static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder) { switch (encoder.id) { case ENCODER_ID_INTERNAL_UNIPHY: @@ -1256,17 +1352,18 @@ static enum transmitter translate_encoder_to_transmitter( } } -static bool dc_link_construct( - struct dc_link *link, - const struct link_init_data *init_params) +static bool dc_link_construct(struct dc_link *link, + const struct link_init_data *init_params) { uint8_t i; struct ddc_service_init_data ddc_service_init_data = { { 0 } }; struct dc_context *dc_ctx = init_params->ctx; struct encoder_init_data enc_init_data = { 0 }; + struct panel_cntl_init_data panel_cntl_init_data = { 0 }; struct integrated_info info = {{{ 0 }}}; struct dc_bios *bios = init_params->dc->ctx->dc_bios; const struct dc_vbios_funcs *bp_funcs = bios->funcs; + DC_LOGGER_INIT(dc_ctx->logger); link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; @@ -1278,23 +1375,27 @@ static bool dc_link_construct( link->ctx = dc_ctx; link->link_index = init_params->link_index; - memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides)); - memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings)); + memset(&link->preferred_training_settings, 0, + sizeof(struct dc_link_training_overrides)); + memset(&link->preferred_link_setting, 0, + sizeof(struct dc_link_settings)); - link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); + link->link_id = + bios->funcs->get_connector_id(bios, init_params->connector_index); if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", - __func__, init_params->connector_index, - link->link_id.type, OBJECT_TYPE_CONNECTOR); + __func__, init_params->connector_index, + link->link_id.type, OBJECT_TYPE_CONNECTOR); goto create_fail; } if (link->dc->res_pool->funcs->link_init) link->dc->res_pool->funcs->link_init(link); - link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); - if (link->hpd_gpio != NULL) { + link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + if (link->hpd_gpio) { dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); dal_gpio_unlock_pin(link->hpd_gpio); link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); @@ -1314,9 +1415,9 @@ static bool dc_link_construct( link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; case CONNECTOR_ID_DISPLAY_PORT: - link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; + link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; - if (link->hpd_gpio != NULL) + if (link->hpd_gpio) link->irq_source_hpd_rx = dal_irq_get_rx_source(link->hpd_gpio); @@ -1324,42 +1425,60 @@ static bool dc_link_construct( case CONNECTOR_ID_EDP: link->connector_signal = SIGNAL_TYPE_EDP; - if (link->hpd_gpio != NULL) { + if (link->hpd_gpio) { link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; link->irq_source_hpd_rx = dal_irq_get_rx_source(link->hpd_gpio); } + break; case CONNECTOR_ID_LVDS: link->connector_signal = SIGNAL_TYPE_LVDS; break; default: - DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id); + DC_LOG_WARNING("Unsupported Connector type:%d!\n", + link->link_id.id); goto create_fail; } /* TODO: #DAL3 Implement id to str function.*/ LINK_INFO("Connector[%d] description:" - "signal %d\n", - init_params->connector_index, - link->connector_signal); + "signal %d\n", + init_params->connector_index, + link->connector_signal); ddc_service_init_data.ctx = link->ctx; ddc_service_init_data.id = link->link_id; ddc_service_init_data.link = link; link->ddc = dal_ddc_service_create(&ddc_service_init_data); - if (link->ddc == NULL) { + if (!link->ddc) { DC_ERROR("Failed to create ddc_service!\n"); goto ddc_create_fail; } link->ddc_hw_inst = - dal_ddc_get_line( - dal_ddc_service_get_ddc_pin(link->ddc)); + dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc)); + + + if (link->dc->res_pool->funcs->panel_cntl_create && + (link->link_id.id == CONNECTOR_ID_EDP || + link->link_id.id == CONNECTOR_ID_LVDS)) { + panel_cntl_init_data.ctx = dc_ctx; + panel_cntl_init_data.inst = 0; + link->panel_cntl = + link->dc->res_pool->funcs->panel_cntl_create( + &panel_cntl_init_data); + + if (link->panel_cntl == NULL) { + DC_ERROR("Failed to create link panel_cntl!\n"); + goto panel_cntl_create_fail; + } + } enc_init_data.ctx = dc_ctx; - bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder); + bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, + &enc_init_data.encoder); enc_init_data.connector = link->link_id; enc_init_data.channel = get_ddc_line(link); enc_init_data.hpd_source = get_hpd_line(link); @@ -1367,11 +1486,11 @@ static bool dc_link_construct( link->hpd_src = enc_init_data.hpd_source; enc_init_data.transmitter = - translate_encoder_to_transmitter(enc_init_data.encoder); - link->link_enc = link->dc->res_pool->funcs->link_enc_create( - &enc_init_data); + translate_encoder_to_transmitter(enc_init_data.encoder); + link->link_enc = + link->dc->res_pool->funcs->link_enc_create(&enc_init_data); - if (link->link_enc == NULL) { + if (!link->link_enc) { DC_ERROR("Failed to create link encoder!\n"); goto link_enc_create_fail; } @@ -1379,8 +1498,9 @@ static bool dc_link_construct( link->link_enc_hw_inst = link->link_enc->transmitter; for (i = 0; i < 4; i++) { - if (BP_RESULT_OK != - bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) { + if (bp_funcs->get_device_tag(dc_ctx->dc_bios, + link->link_id, i, + &link->device_tag) != BP_RESULT_OK) { DC_ERROR("Failed to find device tag!\n"); goto device_tag_fail; } @@ -1388,13 +1508,14 @@ static bool dc_link_construct( /* Look for device tag that matches connector signal, * CRT for rgb, LCD for other supported signal tyes */ - if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id)) + if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, + link->device_tag.dev_id)) continue; - if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT - && link->connector_signal != SIGNAL_TYPE_RGB) + if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT && + link->connector_signal != SIGNAL_TYPE_RGB) continue; - if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD - && link->connector_signal == SIGNAL_TYPE_RGB) + if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD && + link->connector_signal == SIGNAL_TYPE_RGB) continue; break; } @@ -1406,16 +1527,16 @@ static bool dc_link_construct( for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { struct external_display_path *path = &info.ext_disp_conn_info.path[i]; - if (path->device_connector_id.enum_id == link->link_id.enum_id - && path->device_connector_id.id == link->link_id.id - && path->device_connector_id.type == link->link_id.type) { - if (link->device_tag.acpi_device != 0 - && path->device_acpi_enum == link->device_tag.acpi_device) { + if (path->device_connector_id.enum_id == link->link_id.enum_id && + path->device_connector_id.id == link->link_id.id && + path->device_connector_id.type == link->link_id.type) { + if (link->device_tag.acpi_device != 0 && + path->device_acpi_enum == link->device_tag.acpi_device) { link->ddi_channel_mapping = path->channel_mapping; link->chip_caps = path->caps; } else if (path->device_tag == - link->device_tag.dev_id.raw_device_tag) { + link->device_tag.dev_id.raw_device_tag) { link->ddi_channel_mapping = path->channel_mapping; link->chip_caps = path->caps; } @@ -1431,15 +1552,20 @@ static bool dc_link_construct( */ program_hpd_filter(link); + link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED; + return true; device_tag_fail: link->link_enc->funcs->destroy(&link->link_enc); link_enc_create_fail: + if (link->panel_cntl != NULL) + link->panel_cntl->funcs->destroy(&link->panel_cntl); +panel_cntl_create_fail: dal_ddc_service_destroy(&link->ddc); ddc_create_fail: create_fail: - if (link->hpd_gpio != NULL) { + if (link->hpd_gpio) { dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } @@ -2339,9 +2465,28 @@ enum dc_status dc_link_validate_mode_timing( return DC_OK; } +static struct abm *get_abm_from_stream_res(const struct dc_link *link) +{ + int i; + struct dc *dc = link->ctx->dc; + struct abm *abm = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx.stream; + + if (stream && stream->link == link) { + abm = pipe_ctx.stream_res.abm; + break; + } + } + return abm; +} + int dc_link_get_backlight_level(const struct dc_link *link) { - struct abm *abm = link->ctx->dc->res_pool->abm; + + struct abm *abm = get_abm_from_stream_res(link); if (abm == NULL || abm->funcs->get_current_backlight == NULL) return DC_ERROR_UNEXPECTED; @@ -2349,24 +2494,33 @@ int dc_link_get_backlight_level(const struct dc_link *link) return (int) abm->funcs->get_current_backlight(abm); } +int dc_link_get_target_backlight_pwm(const struct dc_link *link) +{ + struct abm *abm = get_abm_from_stream_res(link); + + if (abm == NULL || abm->funcs->get_target_backlight == NULL) + return DC_ERROR_UNEXPECTED; + + return (int) abm->funcs->get_target_backlight(abm); +} + bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { struct dc *dc = link->ctx->dc; - struct abm *abm = dc->res_pool->abm; + struct abm *abm = get_abm_from_stream_res(link); struct dmcu *dmcu = dc->res_pool->dmcu; unsigned int controller_id = 0; - bool use_smooth_brightness = true; + bool fw_set_brightness = true; int i; DC_LOGGER_INIT(link->ctx->logger); - if ((dmcu == NULL) || - (abm == NULL) || - (abm->funcs->set_backlight_level_pwm == NULL)) + if (abm == NULL || (abm->funcs->set_backlight_level_pwm == NULL)) return false; - use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); + if (dmcu) + fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); @@ -2398,7 +2552,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, backlight_pwm_u16_16, frame_ramp, controller_id, - use_smooth_brightness); + fw_set_brightness); } return true; @@ -2406,15 +2560,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, bool dc_link_set_abm_disable(const struct dc_link *link) { - struct dc *dc = link->ctx->dc; - struct abm *abm = dc->res_pool->abm; + struct abm *abm = get_abm_from_stream_res(link); + bool success = false; - if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL)) - return false; - - abm->funcs->set_abm_immediate_disable(abm); + if (abm) + success = abm->funcs->set_abm_immediate_disable(abm); - return true; + return success; } bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait) @@ -2423,12 +2575,12 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool struct dmcu *dmcu = dc->res_pool->dmcu; struct dmub_psr *psr = dc->res_pool->psr; - if (psr != NULL && link->psr_feature_enabled) + if (psr != NULL && link->psr_settings.psr_feature_enabled) psr->funcs->psr_enable(psr, allow_active); - else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) + else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled) dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); - link->psr_allow_active = allow_active; + link->psr_settings.psr_allow_active = allow_active; return true; } @@ -2439,9 +2591,9 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) struct dmcu *dmcu = dc->res_pool->dmcu; struct dmub_psr *psr = dc->res_pool->psr; - if (psr != NULL && link->psr_feature_enabled) + if (psr != NULL && link->psr_settings.psr_feature_enabled) psr->funcs->psr_get_state(psr, psr_state); - else if (dmcu != NULL && link->psr_feature_enabled) + else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) dmcu->funcs->get_psr_state(dmcu, psr_state); return true; @@ -2612,14 +2764,14 @@ bool dc_link_setup_psr(struct dc_link *link, psr_context->frame_delay = 0; if (psr) - link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context); + link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context); else - link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); /* psr_enabled == 0 indicates setup_psr did not succeed, but this * should not happen since firmware should be running at this point */ - if (link->psr_feature_enabled == 0) + if (link->psr_settings.psr_feature_enabled == 0) ASSERT(0); return true; @@ -2966,7 +3118,7 @@ void core_link_enable_stream( enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && + if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) return; @@ -3040,6 +3192,18 @@ void core_link_enable_stream( if (pipe_ctx->stream->dpms_off) return; + /* Have to setup DSC before DIG FE and BE are connected (which happens before the + * link training). This is to make sure the bandwidth sent to DIG BE won't be + * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag + * will be automatically set at a later time when the video is enabled + * (DP_VID_STREAM_EN = 1). + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + dp_set_dsc_enable(pipe_ctx, true); + } + status = enable_link(state, pipe_ctx); if (status != DC_OK) { @@ -3067,11 +3231,6 @@ void core_link_enable_stream( CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, COLOR_DEPTH_UNDEFINED); - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - dp_set_dsc_enable(pipe_ctx, true); - } dc->hwss.enable_stream(pipe_ctx); /* Set DPS PPS SDP (AKA "info frames") */ @@ -3109,7 +3268,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && + if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) return; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index aa3c45a69b5e..d5b306384d79 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1710,19 +1710,10 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) static struct dc_link_settings get_max_link_cap(struct dc_link *link) { - /* Set Default link settings */ - struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH, - LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0}; - - /* Higher link settings based on feature supported */ - if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE) - max_link_cap.link_rate = LINK_RATE_HIGH2; - - if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) - max_link_cap.link_rate = LINK_RATE_HIGH3; + struct dc_link_settings max_link_cap = {0}; - if (link->link_enc->funcs->get_max_link_cap) - link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); + /* get max link encoder capability */ + link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) @@ -2426,7 +2417,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) { union dpcd_psr_configuration psr_configuration; - if (!link->psr_feature_enabled) + if (!link->psr_settings.psr_feature_enabled) return false; dm_helpers_dp_read_dpcd( @@ -2911,6 +2902,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + link->dc->hwss.blank_stream(pipe_ctx); + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) break; } @@ -2927,6 +2924,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_reallocate_mst_payload(link); + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings); + } + status = false; if (out_link_loss) *out_link_loss = true; @@ -4227,6 +4230,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) void dpcd_set_source_specific_data(struct dc_link *link) { const uint32_t post_oui_delay = 30; // 30ms + uint8_t dspc = 0; + enum dc_status ret = DC_ERROR_UNEXPECTED; + + ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc, + sizeof(dspc)); + + if (ret != DC_OK) { + DC_LOG_ERROR("Error in DP aux read transaction," + " not writing source specific data\n"); + return; + } + + /* Return if OUI unsupported */ + if (!(dspc & DP_OUI_SUPPORT)) + return; if (!link->dc->vendor_signature.is_valid) { struct dpcd_amd_signature amd_signature; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 51e0ee6e7695..6590f51caefa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -400,7 +400,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) result = true; else result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index f4bcc71b2920..12f5c6881cd0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -692,6 +692,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) /* Round up, assume original video size always even dimensions */ data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div; data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div; + + data->viewport_unadjusted = data->viewport; + data->viewport_c_unadjusted = data->viewport_c; } static void calculate_recout(struct pipe_ctx *pipe_ctx) @@ -1358,9 +1361,6 @@ bool dc_add_plane_to_context( dc_plane_state_retain(plane_state); while (head_pipe) { - tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe); - ASSERT(tail_pipe); - free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -1378,6 +1378,8 @@ bool dc_add_plane_to_context( free_pipe->plane_state = plane_state; if (head_pipe != free_pipe) { + tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe); + ASSERT(tail_pipe); free_pipe->stream_res.tg = tail_pipe->stream_res.tg; free_pipe->stream_res.abm = tail_pipe->stream_res.abm; free_pipe->stream_res.opp = tail_pipe->stream_res.opp; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index a249a0e5edd0..9e16af22e4aa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -54,6 +54,7 @@ static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_da sink->ctx = link->ctx; sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk; sink->converter_disable_audio = init_params->converter_disable_audio; + sink->is_mst_legacy = init_params->sink_is_legacy; sink->dc_container_id = NULL; sink->sink_id = init_params->link->ctx->dc_sink_id_count; // increment dc_sink_id_count because we don't want two sinks with same ID diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1935cf6601eb..5432ca1657b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -29,6 +29,9 @@ #include "dc_types.h" #include "grph_object_defs.h" #include "logger_types.h" +#if defined(CONFIG_DRM_AMD_DC_HDCP) +#include "hdcp_types.h" +#endif #include "gpio_types.h" #include "link_service_types.h" #include "grph_object_ctrl_defs.h" @@ -39,7 +42,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.76" +#define DC_VER "3.2.81" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -230,7 +233,7 @@ struct dc_config { bool forced_clocks; bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; - bool psr_on_dmub; + bool disable_dmcu; }; enum visual_confirm { @@ -238,6 +241,7 @@ enum visual_confirm { VISUAL_CONFIRM_SURFACE = 1, VISUAL_CONFIRM_HDR = 2, VISUAL_CONFIRM_MPCTREE = 4, + VISUAL_CONFIRM_PSR = 5, }; enum dcc_option { @@ -987,6 +991,7 @@ struct dpcd_caps { union dpcd_fec_capability fec_cap; struct dpcd_dsc_capabilities dsc_caps; struct dc_lttpr_caps lttpr_caps; + struct psr_caps psr_caps; }; @@ -1004,6 +1009,35 @@ union dpcd_sink_ext_caps { uint8_t raw; }; +#if defined(CONFIG_DRM_AMD_DC_HDCP) +union hdcp_rx_caps { + struct { + uint8_t version; + uint8_t reserved; + struct { + uint8_t repeater : 1; + uint8_t hdcp_capable : 1; + uint8_t reserved : 6; + } byte0; + } fields; + uint8_t raw[3]; +}; + +union hdcp_bcaps { + struct { + uint8_t HDCP_CAPABLE:1; + uint8_t REPEATER:1; + uint8_t RESERVED:6; + } bits; + uint8_t raw; +}; + +struct hdcp_caps { + union hdcp_rx_caps rx_caps; + union hdcp_bcaps bcaps; +}; +#endif + #include "dc_link.h" /******************************************************************************* @@ -1046,7 +1080,7 @@ struct dc_sink { void *priv; struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; - + bool is_mst_legacy; struct dc_sink_dsc_caps dsc_caps; struct dc_sink_fec_caps fec_caps; @@ -1073,6 +1107,7 @@ struct dc_sink_init_data { struct dc_link *link; uint32_t dongle_max_pix_clk; bool converter_disable_audio; + bool sink_is_legacy; }; struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params); @@ -1104,9 +1139,16 @@ void dc_set_power_state( struct dc *dc, enum dc_acpi_cm_power_state power_state); void dc_resume(struct dc *dc); -unsigned int dc_get_current_backlight_pwm(struct dc *dc); -unsigned int dc_get_target_backlight_pwm(struct dc *dc); +#if defined(CONFIG_DRM_AMD_DC_HDCP) +/* + * HDCP Interfaces + */ +enum hdcp_message_status dc_process_hdcp_msg( + enum signal_type signal, + struct dc_link *link, + struct hdcp_protection_message *message_info); +#endif bool dc_is_dmcu_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index bb2730e9521e..af177c087d3b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -740,5 +740,11 @@ struct dpcd_dsc_capabilities { union dpcd_dsc_ext_capabilities dsc_ext_caps; }; +/* These parameters are from PSR capabilities reported by Sink DPCD */ +struct psr_caps { + unsigned char psr_version; + unsigned int psr_rfb_setup_time; + bool psr_exit_link_training_required; +}; #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 00ff5e98278c..80fb4149f36a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -29,6 +29,7 @@ #include "dc.h" #include "dc_types.h" #include "grph_object_defs.h" +#include "dmub/inc/dmub_cmd_dal.h" enum dc_link_fec_state { dc_link_fec_not_ready, @@ -66,6 +67,22 @@ struct time_stamp { struct link_trace { struct time_stamp time_stamp; }; + +/* PSR feature flags */ +struct psr_settings { + bool psr_feature_enabled; // PSR is supported by sink + bool psr_allow_active; // PSR is currently active + enum psr_version psr_version; // Internal PSR version, determined based on DPCD + + /* These parameters are calculated in Driver, + * based on display timing and Sink capabilities. + * If VBLANK region is too small and Sink takes a long time + * to set up RFB, it may take an extra frame to enter PSR state. + */ + bool psr_frame_capture_indication_req; + unsigned int psr_sdp_transmit_line_num_deadline; +}; + /* * A link contains one or more sinks and their connected status. * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. @@ -118,6 +135,7 @@ struct dc_link { struct dc_context *ctx; + struct panel_cntl *panel_cntl; struct link_encoder *link_enc; struct graphics_object_id link_id; union ddi_channel_mapping ddi_channel_mapping; @@ -126,11 +144,14 @@ struct dc_link { uint32_t dongle_max_pix_clk; unsigned short chip_caps; unsigned int dpcd_sink_count; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + struct hdcp_caps hdcp_caps; +#endif enum edp_revision edp_revision; - bool psr_feature_enabled; - bool psr_allow_active; union dpcd_sink_ext_caps dpcd_sink_ext_caps; + struct psr_settings psr_settings; + /* MST record stream using this link */ struct link_flags { bool dp_keep_receiver_powered; @@ -197,6 +218,8 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link); int dc_link_get_backlight_level(const struct dc_link *dc_link); +int dc_link_get_target_backlight_pwm(const struct dc_link *link); + bool dc_link_set_abm_disable(const struct dc_link *dc_link); bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait); @@ -290,6 +313,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type); * DPCD access interfaces */ +#ifdef CONFIG_DRM_AMD_DC_HDCP +bool dc_link_is_hdcp14(struct dc_link *link); +bool dc_link_is_hdcp22(struct dc_link *link); +#endif void dc_link_set_drive_settings(struct dc *dc, struct link_training_settings *lt_settings, const struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index a5c7ef47b8d3..49aad691e687 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -167,8 +167,6 @@ struct dc_stream_state { /* TODO: custom INFO packets */ /* TODO: ABM info (DMCU) */ - /* PSR info */ - unsigned char psr_version; /* TODO: CEA VIC */ /* DMCU info */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index fbfcff700971..f704a8fd52e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -29,7 +29,7 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ -dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o +dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index b8a3fc505c9b..4dae9efebb6f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -420,7 +420,7 @@ static bool dce_abm_set_backlight_level_pwm( unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int controller_id, - bool use_smooth_brightness) + bool fw_set_brightness) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); @@ -428,7 +428,7 @@ static bool dce_abm_set_backlight_level_pwm( backlight_pwm_u16_16, backlight_pwm_u16_16); /* If DMCU is in reset state, DMCU is uninitialized */ - if (use_smooth_brightness) + if (fw_set_brightness) dmcu_set_backlight_level(abm_dce, backlight_pwm_u16_16, frame_ramp, @@ -447,6 +447,7 @@ static const struct abm_funcs dce_funcs = { .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm, .get_current_backlight = dce_abm_get_current_backlight, .get_target_backlight = dce_abm_get_target_backlight, + .init_abm_config = NULL, .set_abm_immediate_disable = dce_abm_immediate_disable }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 2e992fbc0d71..d2ad0504b0de 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -1014,39 +1014,6 @@ struct pixel_rate_range_table_entry { unsigned short div_factor; }; -static const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = { - // /1.001 rates - {25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17 - {59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340 - {74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758 - {125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87 - {148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516 - {167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83 - {222520, 222530, 222750, 1000, 1001}, //222.75Mhz -> 222.527 - {257140, 257150, 257400, 1000, 1001}, //257.4Mhz -> 257.1429 - {296700, 296710, 297000, 1000, 1001}, //297Mhz -> 296.7033 - {342850, 342860, 343200, 1000, 1001}, //343.2Mhz -> 342.857 - {395600, 395610, 396000, 1000, 1001}, //396Mhz -> 395.6 - {409090, 409100, 409500, 1000, 1001}, //409.5Mhz -> 409.091 - {445050, 445060, 445500, 1000, 1001}, //445.5Mhz -> 445.055 - {467530, 467540, 468000, 1000, 1001}, //468Mhz -> 467.5325 - {519230, 519240, 519750, 1000, 1001}, //519.75Mhz -> 519.231 - {525970, 525980, 526500, 1000, 1001}, //526.5Mhz -> 525.974 - {545450, 545460, 546000, 1000, 1001}, //546Mhz -> 545.455 - {593400, 593410, 594000, 1000, 1001}, //594Mhz -> 593.4066 - {623370, 623380, 624000, 1000, 1001}, //624Mhz -> 623.377 - {692300, 692310, 693000, 1000, 1001}, //693Mhz -> 692.308 - {701290, 701300, 702000, 1000, 1001}, //702Mhz -> 701.2987 - {791200, 791210, 792000, 1000, 1001}, //792Mhz -> 791.209 - {890100, 890110, 891000, 1000, 1001}, //891Mhz -> 890.1099 - {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz -> 1186.8131 - - // *1.001 rates - {27020, 27030, 27000, 1001, 1000}, //27Mhz - {54050, 54060, 54000, 1001, 1000}, //54Mhz - {108100, 108110, 108000, 1001, 1000},//108Mhz -}; - static bool dcn20_program_pix_clk( struct clock_source *clock_source, struct pixel_clk_params *pix_clk_params, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index c5aa1f48593a..5479d959ec62 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -27,10 +27,6 @@ #include "dc_types.h" -#define BL_REG_LIST()\ - SR(LVTMA_PWRSEQ_CNTL), \ - SR(LVTMA_PWRSEQ_STATE) - #define HWSEQ_DCEF_REG_LIST_DCE8() \ .DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \ .DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \ @@ -94,20 +90,17 @@ SRII(BLND_CONTROL, BLND, 0),\ SRII(BLND_CONTROL, BLND, 1),\ SR(BLNDV_CONTROL),\ - HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\ - BL_REG_LIST() + HWSEQ_PIXEL_RATE_REG_LIST(CRTC) #define HWSEQ_DCE8_REG_LIST() \ HWSEQ_DCEF_REG_LIST_DCE8(), \ HWSEQ_BLND_REG_LIST(), \ - HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\ - BL_REG_LIST() + HWSEQ_PIXEL_RATE_REG_LIST(CRTC) #define HWSEQ_DCE10_REG_LIST() \ HWSEQ_DCEF_REG_LIST(), \ HWSEQ_BLND_REG_LIST(), \ - HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \ - BL_REG_LIST() + HWSEQ_PIXEL_RATE_REG_LIST(CRTC) #define HWSEQ_ST_REG_LIST() \ HWSEQ_DCE11_REG_LIST_BASE(), \ @@ -134,8 +127,7 @@ SR(DCHUB_FB_LOCATION),\ SR(DCHUB_AGP_BASE),\ SR(DCHUB_AGP_BOT),\ - SR(DCHUB_AGP_TOP), \ - BL_REG_LIST() + SR(DCHUB_AGP_TOP) #define HWSEQ_VG20_REG_LIST() \ HWSEQ_DCE120_REG_LIST(),\ @@ -144,8 +136,7 @@ #define HWSEQ_DCE112_REG_LIST() \ HWSEQ_DCE10_REG_LIST(), \ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \ - HWSEQ_PHYPLL_REG_LIST(CRTC), \ - BL_REG_LIST() + HWSEQ_PHYPLL_REG_LIST(CRTC) #define HWSEQ_DCN_REG_LIST()\ SR(REFCLK_CNTL), \ @@ -207,8 +198,7 @@ SR(D3VGA_CONTROL), \ SR(D4VGA_CONTROL), \ SR(VGA_TEST_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - BL_REG_LIST() + SR(DC_IP_REQUEST_CNTL) #define HWSEQ_DCN2_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ @@ -273,8 +263,7 @@ SR(D4VGA_CONTROL), \ SR(D5VGA_CONTROL), \ SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - BL_REG_LIST() + SR(DC_IP_REQUEST_CNTL) #define HWSEQ_DCN21_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ @@ -324,15 +313,9 @@ SR(D4VGA_CONTROL), \ SR(D5VGA_CONTROL), \ SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - BL_REG_LIST() + SR(DC_IP_REQUEST_CNTL) struct dce_hwseq_registers { - - /* Backlight registers */ - uint32_t LVTMA_PWRSEQ_CNTL; - uint32_t LVTMA_PWRSEQ_STATE; - uint32_t DCFE_CLOCK_CONTROL[6]; uint32_t DCFEV_CLOCK_CONTROL; uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; @@ -465,26 +448,18 @@ struct dce_hwseq_registers { HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh) -#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\ - HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\ - HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\ - HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\ - HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) - #define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\ .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\ HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\ - HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\ - HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) + HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_) #define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\ HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\ HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\ - HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\ - HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) + HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_) #define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\ HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\ @@ -507,8 +482,7 @@ struct dce_hwseq_registers { HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\ HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\ - HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\ - HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) + HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh) #define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\ HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\ @@ -570,8 +544,7 @@ struct dce_hwseq_registers { HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\ HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\ - HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ - HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) + HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh) #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ @@ -630,8 +603,7 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh) #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ @@ -671,10 +643,7 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ - HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ - HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh) #define HWSEQ_REG_FIELD_LIST(type) \ type DCFE_CLOCK_ENABLE; \ @@ -706,11 +675,7 @@ struct dce_hwseq_registers { type PF_LFB_REGION;\ type PF_MAX_REGION;\ type ENABLE_L1_TLB;\ - type SYSTEM_ACCESS_MODE;\ - type LVTMA_BLON;\ - type LVTMA_DIGON;\ - type LVTMA_DIGON_OVRD;\ - type LVTMA_PWRSEQ_TARGET_STATE_R; + type SYSTEM_ACCESS_MODE; #define HWSEQ_DCN_REG_FIELD_LIST(type) \ type HUBP_VTG_SEL; \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 8527cce81c6f..8d8c84c81b34 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -118,7 +118,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = { .enable_hpd = dce110_link_encoder_enable_hpd, .disable_hpd = dce110_link_encoder_disable_hpd, .is_dig_enabled = dce110_is_dig_enabled, - .destroy = dce110_link_encoder_destroy + .destroy = dce110_link_encoder_destroy, + .get_max_link_cap = dce110_link_encoder_get_max_link_cap }; static enum bp_result link_transmitter_control( @@ -1389,3 +1390,20 @@ void dce110_link_encoder_disable_hpd(struct link_encoder *enc) set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN); } + +void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + /* Set Default link settings */ + struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH, + LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0}; + + /* Higher link settings based on feature supported */ + if (enc->features.flags.bits.IS_HBR2_CAPABLE) + max_link_cap.link_rate = LINK_RATE_HIGH2; + + if (enc->features.flags.bits.IS_HBR3_CAPABLE) + max_link_cap.link_rate = LINK_RATE_HIGH3; + + *link_settings = max_link_cap; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 3c9368df4093..384389f0e2c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -271,4 +271,7 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc, bool dce110_is_dig_enabled(struct link_encoder *enc); +void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); + #endif /* __DC_LINK_ENCODER__DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c new file mode 100644 index 000000000000..d9b0ff7eb2a4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -0,0 +1,105 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "core_types.h" +#include "dc_dmub_srv.h" +#include "panel_cntl.h" +#include "dce_panel_cntl.h" + +#define TO_DCE_PANEL_CNTL(panel_cntl)\ + container_of(panel_cntl, struct dce_panel_cntl, base) + +#define CTX \ + dce_panel_cntl->base.ctx + +#define DC_LOGGER \ + dce_panel_cntl->base.ctx->logger + +#define REG(reg)\ + dce_panel_cntl->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name + +void dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl) +{ + +} + +bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl) +{ + struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); + uint32_t value; + + REG_GET(PWRSEQ_CNTL, BLON, &value); + + return value; +} + +bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl) +{ + struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); + uint32_t pwr_seq_state, dig_on, dig_on_ovrd; + + REG_GET(PWRSEQ_STATE, PWRSEQ_TARGET_STATE_R, &pwr_seq_state); + + REG_GET_2(PWRSEQ_CNTL, DIGON, &dig_on, DIGON_OVRD, &dig_on_ovrd); + + return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1); +} + +static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl) +{ + struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl); + + kfree(dce_panel_cntl); + *panel_cntl = NULL; +} + +static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = { + .destroy = dce_panel_cntl_destroy, + .hw_init = dce_panel_cntl_hw_init, + .is_panel_backlight_on = dce_is_panel_backlight_on, + .is_panel_powered_on = dce_is_panel_powered_on, + +}; + +void dce_panel_cntl_construct( + struct dce_panel_cntl *dce_panel_cntl, + const struct panel_cntl_init_data *init_data, + const struct dce_panel_cntl_registers *regs, + const struct dce_panel_cntl_shift *shift, + const struct dce_panel_cntl_mask *mask) +{ + dce_panel_cntl->regs = regs; + dce_panel_cntl->shift = shift; + dce_panel_cntl->mask = mask; + + dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs; + dce_panel_cntl->base.ctx = init_data->ctx; + dce_panel_cntl->base.inst = init_data->inst; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h new file mode 100644 index 000000000000..6dc6984f9248 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h @@ -0,0 +1,117 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_PANEL_CNTL__DCE_H__ +#define __DC_PANEL_CNTL__DCE_H__ + +#include "panel_cntl.h" + +/* set register offset with instance */ +#define DCE_PANEL_CNTL_SR(reg_name, block)\ + .reg_name = mm ## block ## _ ## reg_name + +#define DCE_PANEL_CNTL_REG_LIST()\ + DCE_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \ + DCE_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \ + SR(BL_PWM_CNTL), \ + SR(BL_PWM_CNTL2), \ + SR(BL_PWM_PERIOD_CNTL), \ + SR(BL_PWM_GRP1_REG_LOCK) + +#define DCN_PANEL_CNTL_SR(reg_name, block)\ + .reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## _ ## reg_name + +#define DCN_PANEL_CNTL_REG_LIST()\ + DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \ + DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \ + SR(BL_PWM_CNTL), \ + SR(BL_PWM_CNTL2), \ + SR(BL_PWM_PERIOD_CNTL), \ + SR(BL_PWM_GRP1_REG_LOCK) + +#define DCE_PANEL_CNTL_SF(block, reg_name, field_name, post_fix)\ + .field_name = block ## reg_name ## __ ## block ## field_name ## post_fix + +#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \ + DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_CNTL, BLON, mask_sh),\ + DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_CNTL, DIGON, mask_sh),\ + DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_CNTL, DIGON_OVRD, mask_sh),\ + DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_STATE, PWRSEQ_TARGET_STATE_R, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_CNTL, BL_PWM_EN, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \ + DCE_PANEL_CNTL_SF(, BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh) + +#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \ + type BLON;\ + type DIGON;\ + type DIGON_OVRD;\ + type PWRSEQ_TARGET_STATE_R; \ + type BL_PWM_EN; \ + type BL_ACTIVE_INT_FRAC_CNT; \ + type BL_PWM_FRACTIONAL_EN; \ + type BL_PWM_PERIOD; \ + type BL_PWM_PERIOD_BITCNT; \ + type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \ + type BL_PWM_GRP1_REG_LOCK; \ + type BL_PWM_GRP1_REG_UPDATE_PENDING + +struct dce_panel_cntl_shift { + DCE_PANEL_CNTL_REG_FIELD_LIST(uint8_t); +}; + +struct dce_panel_cntl_mask { + DCE_PANEL_CNTL_REG_FIELD_LIST(uint32_t); +}; + +struct dce_panel_cntl_registers { + uint32_t PWRSEQ_CNTL; + uint32_t PWRSEQ_STATE; + uint32_t BL_PWM_CNTL; + uint32_t BL_PWM_CNTL2; + uint32_t BL_PWM_PERIOD_CNTL; + uint32_t BL_PWM_GRP1_REG_LOCK; +}; + +struct dce_panel_cntl { + struct panel_cntl base; + const struct dce_panel_cntl_registers *regs; + const struct dce_panel_cntl_shift *shift; + const struct dce_panel_cntl_mask *mask; +}; + +void dce_panel_cntl_construct( + struct dce_panel_cntl *panel_cntl, + const struct panel_cntl_init_data *init_data, + const struct dce_panel_cntl_registers *regs, + const struct dce_panel_cntl_shift *shift, + const struct dce_panel_cntl_mask *mask); + +#endif /* __DC_PANEL_CNTL__DCE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c new file mode 100644 index 000000000000..a19f359e45d7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -0,0 +1,438 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_abm.h" +#include "dce_abm.h" +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../../dmub/inc/dmub_srv.h" +#include "core_types.h" +#include "dm_services.h" +#include "reg_helper.h" +#include "fixed31_32.h" + +#include "atom.h" + +#define TO_DMUB_ABM(abm)\ + container_of(abm, struct dce_abm, base) + +#define REG(reg) \ + (dce_abm->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name + +#define CTX \ + dce_abm->base.ctx + +#define DISABLE_ABM_IMMEDIATELY 255 + +static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint32_t ramping_boundary = 0xFFFF; + + cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; + cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; + cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; + cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; + cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.abm_set_pipe.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *dce_abm) +{ + uint64_t current_backlight; + uint32_t round_result; + uint32_t bl_period, bl_int_count; + uint32_t bl_pwm, fractional_duty_cycle_en; + uint32_t bl_period_mask, bl_pwm_mask; + + REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period); + REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count); + + REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &bl_pwm); + REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en); + + if (bl_int_count == 0) + bl_int_count = 16; + + bl_period_mask = (1 << bl_int_count) - 1; + bl_period &= bl_period_mask; + + bl_pwm_mask = bl_period_mask << (16 - bl_int_count); + + if (fractional_duty_cycle_en == 0) + bl_pwm &= bl_pwm_mask; + else + bl_pwm &= 0xFFFF; + + current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count); + + if (bl_period == 0) + bl_period = 0xFFFF; + + current_backlight = div_u64(current_backlight, bl_period); + current_backlight = (current_backlight + 1) >> 1; + + current_backlight = (uint64_t)(current_backlight) * bl_period; + + round_result = (uint32_t)(current_backlight & 0xFFFFFFFF); + + round_result = (round_result >> (bl_int_count-1)) & 1; + + current_backlight >>= bl_int_count; + current_backlight += round_result; + + return (uint32_t)(current_backlight); +} + +static void dmcub_set_backlight_level( + struct dce_abm *dce_abm, + uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp, + uint32_t otg_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dce_abm->base.ctx; + unsigned int backlight_8_bit = 0; + uint32_t s2; + + if (backlight_pwm_u16_16 & 0x10000) + // Check for max backlight condition + backlight_8_bit = 0xFF; + else + // Take MSB of fractional part since backlight is not max + backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF; + + dmub_abm_set_pipe(&dce_abm->base, otg_inst); + + REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16); + + if (otg_inst == 0) + frame_ramp = 0; + + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.abm_set_backlight.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + // Update requested backlight level + s2 = REG_READ(BIOS_SCRATCH_2); + + s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; + backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> + ATOM_S2_CURRENT_BL_LEVEL_SHIFT); + s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); + + REG_WRITE(BIOS_SCRATCH_2, s2); +} + +static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) +{ + union dmub_rb_cmd cmd; + uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0; + + cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM; + cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm; + cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.abm_set_pwm_frac.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +static void dmub_abm_init(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + unsigned int backlight = calculate_16_bit_backlight_from_pwm(dce_abm); + + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101); + REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101); + + REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, + ABM1_HG_NUM_OF_BINS_SEL, 0, + ABM1_HG_VMAX_SEL, 1, + ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0); + + REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0, + ABM1_IPCSC_COEFF_SEL_R, 2, + ABM1_IPCSC_COEFF_SEL_G, 4, + ABM1_IPCSC_COEFF_SEL_B, 2); + + REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL, + BL1_PWM_CURRENT_ABM_LEVEL, backlight); + + REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL, + BL1_PWM_TARGET_ABM_LEVEL, backlight); + + REG_UPDATE(BL1_PWM_USER_LEVEL, + BL1_PWM_USER_LEVEL, backlight); + + REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, + ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, + ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000); + + REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0, + ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1, + ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1, + ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); + + dmub_abm_enable_fractional_pwm(abm->ctx); +} + +static unsigned int dmub_abm_get_current_backlight(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); + + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; +} + +static unsigned int dmub_abm_get_target_backlight(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); + + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; +} + +static bool dmub_abm_set_level(struct abm *abm, uint32_t level) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + + cmd.abm_set_level.header.type = DMUB_CMD__ABM; + cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL; + cmd.abm_set_level.abm_set_level_data.level = level; + cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.abm_set_level.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +static bool dmub_abm_immediate_disable(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + + dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY); + + abm->stored_backlight_registers.BL_PWM_CNTL = + REG_READ(BL_PWM_CNTL); + abm->stored_backlight_registers.BL_PWM_CNTL2 = + REG_READ(BL_PWM_CNTL2); + abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL = + REG_READ(BL_PWM_PERIOD_CNTL); + + REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, + &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); + + return true; +} + +static bool dmub_abm_init_backlight(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + uint32_t value; + + /* It must not be 0, so we have to restore them + * Bios bug w/a - period resets to zero, + * restoring to cache values which is always correct + */ + REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value); + + if (value == 0 || value == 1) { + if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) { + REG_WRITE(BL_PWM_CNTL, + abm->stored_backlight_registers.BL_PWM_CNTL); + REG_WRITE(BL_PWM_CNTL2, + abm->stored_backlight_registers.BL_PWM_CNTL2); + REG_WRITE(BL_PWM_PERIOD_CNTL, + abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL); + REG_UPDATE(LVTMA_PWRSEQ_REF_DIV, + BL_PWM_REF_DIV, + abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); + } else { + /* TODO: Note: This should not really happen since VBIOS + * should have initialized PWM registers on boot. + */ + REG_WRITE(BL_PWM_CNTL, 0xC000FA00); + REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0); + } + } else { + abm->stored_backlight_registers.BL_PWM_CNTL = + REG_READ(BL_PWM_CNTL); + abm->stored_backlight_registers.BL_PWM_CNTL2 = + REG_READ(BL_PWM_CNTL2); + abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL = + REG_READ(BL_PWM_PERIOD_CNTL); + + REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, + &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); + } + + // Have driver take backlight control + // TakeBacklightControl(true) + value = REG_READ(BIOS_SCRATCH_2); + value |= ATOM_S2_VRI_BRIGHT_ENABLE; + REG_WRITE(BIOS_SCRATCH_2, value); + + // Enable the backlight output + REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1); + + // Unlock group 2 backlight registers + REG_UPDATE(BL_PWM_GRP1_REG_LOCK, + BL_PWM_GRP1_REG_LOCK, 0); + + return true; +} + +static bool dmub_abm_set_backlight_level_pwm( + struct abm *abm, + unsigned int backlight_pwm_u16_16, + unsigned int frame_ramp, + unsigned int otg_inst, + bool fw_set_brightness) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + + dmcub_set_backlight_level(dce_abm, + backlight_pwm_u16_16, + frame_ramp, + otg_inst); + + return true; +} + +static bool dmub_abm_init_config(struct abm *abm, + const char *src, + unsigned int bytes) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + + // TODO: Optimize by only reading back final 4 bytes + dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + + // Copy iramtable into cw7 + memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes); + + // Fw will copy from cw7 to fw_state + cmd.abm_init_config.header.type = DMUB_CMD__ABM; + cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG; + cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.abm_init_config.abm_init_config_data.bytes = bytes; + cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.abm_init_config.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +static const struct abm_funcs abm_funcs = { + .abm_init = dmub_abm_init, + .set_abm_level = dmub_abm_set_level, + .init_backlight = dmub_abm_init_backlight, + .set_pipe = dmub_abm_set_pipe, + .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm, + .get_current_backlight = dmub_abm_get_current_backlight, + .get_target_backlight = dmub_abm_get_target_backlight, + .set_abm_immediate_disable = dmub_abm_immediate_disable, + .init_abm_config = dmub_abm_init_config, +}; + +static void dmub_abm_construct( + struct dce_abm *abm_dce, + struct dc_context *ctx, + const struct dce_abm_registers *regs, + const struct dce_abm_shift *abm_shift, + const struct dce_abm_mask *abm_mask) +{ + struct abm *base = &abm_dce->base; + + base->ctx = ctx; + base->funcs = &abm_funcs; + base->stored_backlight_registers.BL_PWM_CNTL = 0; + base->stored_backlight_registers.BL_PWM_CNTL2 = 0; + base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0; + base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0; + base->dmcu_is_running = false; + + abm_dce->regs = regs; + abm_dce->abm_shift = abm_shift; + abm_dce->abm_mask = abm_mask; +} + +struct abm *dmub_abm_create( + struct dc_context *ctx, + const struct dce_abm_registers *regs, + const struct dce_abm_shift *abm_shift, + const struct dce_abm_mask *abm_mask) +{ + struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); + + if (abm_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask); + + return &abm_dce->base; +} + +void dmub_abm_destroy(struct abm **abm) +{ + struct dce_abm *abm_dce = TO_DMUB_ABM(*abm); + + kfree(abm_dce); + *abm = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h index 26583f346c39..3a5d5ac7a86e 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h @@ -1,5 +1,5 @@ /* - * Copyright 2012-16 Advanced Micro Devices, Inc. + * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,17 +23,18 @@ * */ -#include "core_types.h" -#include "logger.h" -#include "include/logger_interface.h" -#include "dm_helpers.h" +#ifndef __DMUB_ABM_H__ +#define __DMUB_ABM_H__ -void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count) -{ - int i; +#include "abm.h" +#include "dce_abm.h" - if (hex_data) - for (i = 0; i < hex_data_count; i++) - DC_LOG_DEBUG("%2.2X ", hex_data[i]); -} +struct abm *dmub_abm_create( + struct dc_context *ctx, + const struct dce_abm_registers *regs, + const struct dce_abm_shift *abm_shift, + const struct dce_abm_mask *abm_mask); +void dmub_abm_destroy(struct abm **abm); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index bc109d4fc6e6..3b8a49e8e665 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -33,6 +33,45 @@ #define MAX_PIPES 6 /** + * Convert dmcub psr state to dmcu psr state. + */ +static void convert_psr_state(uint32_t *psr_state) +{ + if (*psr_state == 0) + *psr_state = 0; + else if (*psr_state == 0x10) + *psr_state = 1; + else if (*psr_state == 0x11) + *psr_state = 2; + else if (*psr_state == 0x20) + *psr_state = 3; + else if (*psr_state == 0x21) + *psr_state = 4; + else if (*psr_state == 0x30) + *psr_state = 5; + else if (*psr_state == 0x31) + *psr_state = 6; + else if (*psr_state == 0x40) + *psr_state = 7; + else if (*psr_state == 0x41) + *psr_state = 8; + else if (*psr_state == 0x42) + *psr_state = 9; + else if (*psr_state == 0x43) + *psr_state = 10; + else if (*psr_state == 0x44) + *psr_state = 11; + else if (*psr_state == 0x50) + *psr_state = 12; + else if (*psr_state == 0x51) + *psr_state = 13; + else if (*psr_state == 0x52) + *psr_state = 14; + else if (*psr_state == 0x53) + *psr_state = 15; +} + +/** * Get PSR state from firmware. */ static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state) @@ -43,6 +82,8 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state) dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30); dmub_srv_get_gpint_response(srv, psr_state); + + convert_psr_state(psr_state); } /** @@ -53,19 +94,15 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state * union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; - cmd.psr_set_version.header.type = DMUB_CMD__PSR; - cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION; - - if (stream->psr_version == 0x0) // Unsupported + if (stream->link->psr_settings.psr_version == PSR_VERSION_UNSUPPORTED) return false; - else if (stream->psr_version == 0x1) - cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1; - else if (stream->psr_version == 0x2) - cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2; - cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); + cmd.psr_set_version.header.type = DMUB_CMD__PSR; + cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION; + cmd.psr_set_version.psr_set_version_data.version = stream->link->psr_settings.psr_version; + cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header); + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_version.header); dc_dmub_srv_cmd_execute(dc->dmub_srv); dc_dmub_srv_wait_idle(dc->dmub_srv); @@ -162,7 +199,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data); // Hw insts - copy_settings_data->dpphy_inst = psr_context->phyType; + copy_settings_data->dpphy_inst = psr_context->transmitterId; copy_settings_data->aux_inst = psr_context->channel; copy_settings_data->digfe_inst = psr_context->engineId; copy_settings_data->digbe_inst = psr_context->transmitterId; @@ -187,6 +224,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; copy_settings_data->frame_delay = psr_context->frame_delay; copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; + copy_settings_data->debug.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ? + true : false; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header); dc_dmub_srv_cmd_execute(dc->dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 8f78bf9abbca..a28c4ae0f259 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -46,6 +46,7 @@ #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "dce100/dce100_hw_sequencer.h" +#include "dce/dce_panel_cntl.h" #include "reg_helper.h" @@ -249,6 +250,18 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_100_REG_LIST(id),\ @@ -627,6 +640,23 @@ struct link_encoder *dce100_link_encoder_create( return &enc110->base; } +static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + struct output_pixel_processor *dce100_opp_create( struct dc_context *ctx, uint32_t inst) @@ -943,6 +973,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( static const struct resource_funcs dce100_res_pool_funcs = { .destroy = dce100_destroy_resource_pool, .link_enc_create = dce100_link_encoder_create, + .panel_cntl_create = dce100_panel_cntl_create, .validate_bandwidth = dce100_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index c279982947e1..9cd130c8894a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -53,6 +53,7 @@ #include "abm.h" #include "audio.h" #include "reg_helper.h" +#include "panel_cntl.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" @@ -697,31 +698,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) } -/*todo: cloned in stream enc, fix*/ -bool dce110_is_panel_backlight_on(struct dc_link *link) -{ - struct dc_context *ctx = link->ctx; - struct dce_hwseq *hws = ctx->dc->hwseq; - uint32_t value; - - REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value); - - return value; -} - -bool dce110_is_panel_powered_on(struct dc_link *link) -{ - struct dc_context *ctx = link->ctx; - struct dce_hwseq *hws = ctx->dc->hwseq; - uint32_t pwr_seq_state, dig_on, dig_on_ovrd; - - REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state); - - REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd); - - return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1); -} - static enum bp_result link_transmitter_control( struct dc_bios *bios, struct bp_transmitter_control *cntl) @@ -810,7 +786,6 @@ void dce110_edp_power_control( bool power_up) { struct dc_context *ctx = link->ctx; - struct dce_hwseq *hwseq = ctx->dc->hwseq; struct bp_transmitter_control cntl = { 0 }; enum bp_result bp_result; @@ -821,7 +796,11 @@ void dce110_edp_power_control( return; } - if (power_up != hwseq->funcs.is_panel_powered_on(link)) { + if (!link->panel_cntl) + return; + + if (power_up != + link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) { /* Send VBIOS command to prompt eDP panel power */ if (power_up) { unsigned long long current_ts = dm_get_timestamp(ctx); @@ -892,7 +871,6 @@ void dce110_edp_backlight_control( bool enable) { struct dc_context *ctx = link->ctx; - struct dce_hwseq *hws = ctx->dc->hwseq; struct bp_transmitter_control cntl = { 0 }; if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) @@ -901,7 +879,8 @@ void dce110_edp_backlight_control( return; } - if (enable && hws->funcs.is_panel_backlight_on(link)) { + if (enable && link->panel_cntl && + link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl)) { DC_LOG_HW_RESUME_S3( "%s: panel already powered up. Do nothing.\n", __func__); @@ -1432,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; - pipe_ctx->stream->link->psr_feature_enabled = false; + pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; return DC_OK; } @@ -1838,7 +1817,7 @@ static bool should_enable_fbc(struct dc *dc, return false; /* PSR should not be enabled */ - if (pipe_ctx->stream->link->psr_feature_enabled) + if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled) return false; /* Nothing to compress */ @@ -2784,8 +2763,6 @@ static const struct hwseq_private_funcs dce110_private_funcs = { .disable_stream_gating = NULL, .enable_stream_gating = NULL, .edp_backlight_control = dce110_edp_backlight_control, - .is_panel_backlight_on = dce110_is_panel_backlight_on, - .is_panel_powered_on = dce110_is_panel_powered_on, }; void dce110_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 34be166e8ff0..26a9c14a58b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -85,9 +85,5 @@ void dce110_edp_wait_for_hpd_ready( struct dc_link *link, bool power_up); -bool dce110_is_panel_backlight_on(struct dc_link *link); - -bool dce110_is_panel_powered_on(struct dc_link *link); - #endif /* __DC_HWSS_DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index bf14e9ab040c..9597fc79d7fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -53,6 +53,7 @@ #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_i2c.h" +#include "dce/dce_panel_cntl.h" #define DC_LOGGER \ dc->ctx->logger @@ -275,6 +276,18 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + static const struct dce110_aux_registers_shift aux_shift = { DCE_AUX_MASK_SH_LIST(__SHIFT) }; @@ -673,6 +686,23 @@ static struct link_encoder *dce110_link_encoder_create( return &enc110->base; } +static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + static struct output_pixel_processor *dce110_opp_create( struct dc_context *ctx, uint32_t inst) @@ -1203,6 +1233,7 @@ struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( static const struct resource_funcs dce110_res_pool_funcs = { .destroy = dce110_destroy_resource_pool, .link_enc_create = dce110_link_encoder_create, + .panel_cntl_create = dce110_panel_cntl_create, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, .acquire_idle_pipe_for_layer = dce110_acquire_underlay, diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 700ad8b3e54b..4a7796de2ff5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -51,6 +51,7 @@ #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" +#include "dce/dce_panel_cntl.h" #include "reg_helper.h" @@ -238,6 +239,18 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(5) }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ @@ -631,6 +644,23 @@ struct link_encoder *dce112_link_encoder_create( return &enc110->base; } +static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + static struct input_pixel_processor *dce112_ipp_create( struct dc_context *ctx, uint32_t inst) { @@ -1021,6 +1051,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool) static const struct resource_funcs dce112_res_pool_funcs = { .destroy = dce112_destroy_resource_pool, .link_enc_create = dce112_link_encoder_create, + .panel_cntl_create = dce112_panel_cntl_create, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 53ab88ef71f5..9a9764cbd78d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -44,6 +44,7 @@ #include "dce/dce_clock_source.h" #include "dce/dce_ipp.h" #include "dce/dce_mem_input.h" +#include "dce/dce_panel_cntl.h" #include "dce110/dce110_hw_sequencer.h" #include "dce120/dce120_hw_sequencer.h" @@ -293,6 +294,18 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + static const struct dce110_aux_registers_shift aux_shift = { DCE12_AUX_MASK_SH_LIST(__SHIFT) }; @@ -715,6 +728,23 @@ static struct link_encoder *dce120_link_encoder_create( return &enc110->base; } +static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + static struct input_pixel_processor *dce120_ipp_create( struct dc_context *ctx, uint32_t inst) { @@ -880,6 +910,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool) static const struct resource_funcs dce120_res_pool_funcs = { .destroy = dce120_destroy_resource_pool, .link_enc_create = dce120_link_encoder_create, + .panel_cntl_create = dce120_panel_cntl_create, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index 893261c81854..d2ceebdbdf51 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -36,34 +36,6 @@ #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" -struct dce80_hw_seq_reg_offsets { - uint32_t crtc; -}; - -static const struct dce80_hw_seq_reg_offsets reg_offsets[] = { -{ - .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), -}, -{ - .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), -}, -{ - .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), -}, -{ - .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), -}, -{ - .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), -}, -{ - .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), -} -}; - -#define HW_REG_CRTC(reg, id)\ - (reg + reg_offsets[id].crtc) - /******************************************************************************* * Private definitions ******************************************************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 2ad5c28c6e66..a19be9de2df7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -50,6 +50,7 @@ #include "dce/dce_hwseq.h" #include "dce80/dce80_hw_sequencer.h" #include "dce100/dce100_resource.h" +#include "dce/dce_panel_cntl.h" #include "reg_helper.h" @@ -266,6 +267,18 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_80_REG_LIST(id),\ @@ -728,6 +741,23 @@ struct link_encoder *dce80_link_encoder_create( return &enc110->base; } +static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + struct clock_source *dce80_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, @@ -909,6 +939,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool) static const struct resource_funcs dce80_res_pool_funcs = { .destroy = dce80_destroy_resource_pool, .link_enc_create = dce80_link_encoder_create, + .panel_cntl_create = dce80_panel_cntl_create, .validate_bandwidth = dce80_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 31b64733d693..319366ebb44f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1139,6 +1139,8 @@ void hubp1_cursor_set_position( int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y; int x_hotspot = pos->x_hotspot; int y_hotspot = pos->y_hotspot; + int cursor_height = (int)hubp->curs_attr.height; + int cursor_width = (int)hubp->curs_attr.width; uint32_t dst_x_offset; uint32_t cur_en = pos->enable ? 1 : 0; @@ -1152,10 +1154,16 @@ void hubp1_cursor_set_position( if (hubp->curs_attr.address.quad_part == 0) return; + // Rotated cursor width/height and hotspots tweaks for offset calculation if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { - src_x_offset = pos->y - pos->y_hotspot - param->viewport.x; - y_hotspot = pos->x_hotspot; - x_hotspot = pos->y_hotspot; + swap(cursor_height, cursor_width); + if (param->rotation == ROTATION_ANGLE_90) { + src_x_offset = pos->x - pos->y_hotspot - param->viewport.x; + src_y_offset = pos->y - pos->x_hotspot - param->viewport.y; + } + } else if (param->rotation == ROTATION_ANGLE_180) { + src_x_offset = pos->x - param->viewport.x; + src_y_offset = pos->y - param->viewport.y; } if (param->mirror) { @@ -1177,13 +1185,13 @@ void hubp1_cursor_set_position( if (src_x_offset >= (int)param->viewport.width) cur_en = 0; /* not visible beyond right edge*/ - if (src_x_offset + (int)hubp->curs_attr.width <= 0) + if (src_x_offset + cursor_width <= 0) cur_en = 0; /* not visible beyond left edge*/ if (src_y_offset >= (int)param->viewport.height) cur_en = 0; /* not visible beyond bottom edge*/ - if (src_y_offset + (int)hubp->curs_attr.height <= 0) + if (src_y_offset + cursor_height <= 0) cur_en = 0; /* not visible beyond top edge*/ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index b0357546471b..0d53e1d6d6b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1238,7 +1238,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) void dcn10_init_hw(struct dc *dc) { - int i; + int i, j; struct abm *abm = dc->res_pool->abm; struct dmcu *dmcu = dc->res_pool->dmcu; struct dce_hwseq *hws = dc->hwseq; @@ -1333,17 +1333,28 @@ void dcn10_init_hw(struct dc *dc) continue; /* - * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(), - * which needs to read dpcd info with the help of aconnector. - * If aconnector (dc->links[i]->prev) is NULL, then dpcd status - * cannot be read. + * If any of the displays are lit up turn them off. + * The reason is that some MST hubs cannot be turned off + * completely until we tell them to do so. + * If not turned off, then displays connected to MST hub + * won't light up. */ - if (dc->links[i]->priv) { - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) - dp_receiver_power_ctrl(dc->links[i], false); + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { + /* blank dp stream before power off receiver*/ + if (dc->links[i]->link_enc->funcs->get_dig_frontend) { + unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc); + + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank( + dc->res_pool->stream_enc[j]); + break; + } + } + } + dp_receiver_power_ctrl(dc->links[i], false); } } } @@ -1361,6 +1372,38 @@ void dcn10_init_hw(struct dc *dc) !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } + /* In headless boot cases, DIG may be turned + * on which causes HW/SW discrepancies. + * To avoid this, power down hardware on boot + * if DIG is turned on and seamless boot not enabled + */ + if (dc->config.power_down_display_on_boot) { + struct dc_link *edp_link = get_edp_link(dc); + + if (edp_link && + edp_link->link_enc->funcs->is_dig_enabled && + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && + dc->hwss.edp_backlight_control && + dc->hwss.power_down && + dc->hwss.edp_power_control) { + dc->hwss.edp_backlight_control(edp_link, false); + dc->hwss.power_down(dc); + dc->hwss.edp_power_control(edp_link, false); + } else { + for (i = 0; i < dc->link_count; i++) { + struct dc_link *link = dc->links[i]; + + if (link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc) && + dc->hwss.power_down) { + dc->hwss.power_down(dc); + break; + } + + } + } + } + for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; @@ -2085,25 +2128,25 @@ void dcn10_get_surface_visual_confirm_color( switch (pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB8888: - /* set boarder color to red */ + /* set border color to red */ color->color_r_cr = color_value; break; case PIXEL_FORMAT_ARGB2101010: - /* set boarder color to blue */ + /* set border color to blue */ color->color_b_cb = color_value; break; case PIXEL_FORMAT_420BPP8: - /* set boarder color to green */ + /* set border color to green */ color->color_g_y = color_value; break; case PIXEL_FORMAT_420BPP10: - /* set boarder color to yellow */ + /* set border color to yellow */ color->color_g_y = color_value; color->color_r_cr = color_value; break; case PIXEL_FORMAT_FP16: - /* set boarder color to white */ + /* set border color to white */ color->color_r_cr = color_value; color->color_b_cb = color_value; color->color_g_y = color_value; @@ -2128,25 +2171,25 @@ void dcn10_get_hdr_visual_confirm_color( switch (top_pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB2101010: if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { - /* HDR10, ARGB2101010 - set boarder color to red */ + /* HDR10, ARGB2101010 - set border color to red */ color->color_r_cr = color_value; } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { - /* FreeSync 2 ARGB2101010 - set boarder color to pink */ + /* FreeSync 2 ARGB2101010 - set border color to pink */ color->color_r_cr = color_value; color->color_b_cb = color_value; } break; case PIXEL_FORMAT_FP16: if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { - /* HDR10, FP16 - set boarder color to blue */ + /* HDR10, FP16 - set border color to blue */ color->color_b_cb = color_value; } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { - /* FreeSync 2 HDR - set boarder color to green */ + /* FreeSync 2 HDR - set border color to green */ color->color_g_y = color_value; } break; default: - /* SDR - set boarder color to Gray */ + /* SDR - set border color to Gray */ color->color_r_cr = color_value/2; color->color_b_cb = color_value/2; color->color_g_y = color_value/2; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index dd02d3983695..b88ef9703b2b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -87,8 +87,6 @@ static const struct hwseq_private_funcs dcn10_private_funcs = { .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, .enable_stream_timing = dcn10_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, - .is_panel_backlight_on = dce110_is_panel_backlight_on, - .is_panel_powered_on = dce110_is_panel_powered_on, .disable_stream_gating = NULL, .enable_stream_gating = NULL, .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index d3617d6785a7..7fd385be3f3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -90,7 +90,8 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = { .is_dig_enabled = dcn10_is_dig_enabled, .get_dig_frontend = dcn10_get_dig_frontend, .get_dig_mode = dcn10_get_dig_mode, - .destroy = dcn10_link_encoder_destroy + .destroy = dcn10_link_encoder_destroy, + .get_max_link_cap = dcn10_link_encoder_get_max_link_cap, }; static enum bp_result link_transmitter_control( @@ -1370,7 +1371,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc) DC_HPD_EN, 0); } - #define AUX_REG(reg)\ (enc10->aux_regs->reg) @@ -1425,3 +1425,19 @@ enum signal_type dcn10_get_dig_mode( return SIGNAL_TYPE_NONE; } +void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + /* Set Default link settings */ + struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH, + LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0}; + + /* Higher link settings based on feature supported */ + if (enc->features.flags.bits.IS_HBR2_CAPABLE) + max_link_cap.link_rate = LINK_RATE_HIGH2; + + if (enc->features.flags.bits.IS_HBR3_CAPABLE) + max_link_cap.link_rate = LINK_RATE_HIGH3; + + *link_settings = max_link_cap; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 762109174fb8..68395bcc24fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -575,4 +575,7 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10); enum signal_type dcn10_get_dig_mode( struct link_encoder *enc); + +void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); #endif /* __DC_LINK_ENCODER__DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 17d96ec6acd8..ec0ab42becba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -299,6 +299,7 @@ void optc1_set_vtg_params(struct timing_generator *optc, uint32_t asic_blank_end; uint32_t v_init; uint32_t v_fp2 = 0; + int32_t vertical_line_start; struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -315,8 +316,9 @@ void optc1_set_vtg_params(struct timing_generator *optc, patched_crtc_timing.v_border_top; /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */ - if (optc1->vstartup_start > asic_blank_end) - v_fp2 = optc1->vstartup_start - asic_blank_end; + vertical_line_start = asic_blank_end - optc1->vstartup_start + 1; + if (vertical_line_start < 0) + v_fp2 = -vertical_line_start; /* Interlace */ if (REG(OTG_INTERLACE_CONTROL)) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 9a459a8fe8a0..8d1e52fb0393 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -158,6 +158,7 @@ struct dcn_optc_registers { uint32_t OTG_GSL_WINDOW_Y; uint32_t OTG_VUPDATE_KEEPOUT; uint32_t OTG_CRC_CNTL; + uint32_t OTG_CRC_CNTL2; uint32_t OTG_CRC0_DATA_RG; uint32_t OTG_CRC0_DATA_B; uint32_t OTG_CRC0_WINDOWA_X_CONTROL; @@ -475,7 +476,11 @@ struct dcn_optc_registers { type OPTC_DSC_SLICE_WIDTH;\ type OPTC_SEGMENT_WIDTH;\ type OPTC_DWB0_SOURCE_SELECT;\ - type OPTC_DWB1_SOURCE_SELECT; + type OPTC_DWB1_SOURCE_SELECT;\ + type OTG_CRC_DSC_MODE;\ + type OTG_CRC_DATA_STREAM_COMBINE_MODE;\ + type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ + type OTG_CRC_DATA_FORMAT; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 07265ca7d28c..43116749af9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -51,6 +51,7 @@ #include "dce112/dce112_resource.h" #include "dcn10_hubp.h" #include "dcn10_hubbub.h" +#include "dce/dce_panel_cntl.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" @@ -321,6 +322,18 @@ static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + static const struct dce110_aux_registers_shift aux_shift = { DCN10_AUX_MASK_SH_LIST(__SHIFT) }; @@ -807,6 +820,23 @@ struct link_encoder *dcn10_link_encoder_create( return &enc10->base; } +static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + struct clock_source *dcn10_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, @@ -1291,6 +1321,7 @@ static const struct dc_cap_funcs cap_funcs = { static const struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, + .panel_cntl_create = dcn10_panel_cntl_create, .validate_bandwidth = dcn_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 22f421e82733..6ad4ed7da629 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1478,8 +1478,11 @@ static void dcn20_program_pipe( if (pipe_ctx->update_flags.bits.odm) hws->funcs.update_odm(dc, context, pipe_ctx); - if (pipe_ctx->update_flags.bits.enable) + if (pipe_ctx->update_flags.bits.enable) { dcn20_enable_plane(dc, pipe_ctx, context); + if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes) + dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); + } if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) dcn20_update_dchubp_dpp(dc, pipe_ctx, context); @@ -2171,6 +2174,13 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) */ mpcc_id = hubp->inst; + /* If there is no full update, don't need to touch MPC tree*/ + if (!pipe_ctx->plane_state->update_flags.bits.full_update && + !pipe_ctx->update_flags.bits.mpcc) { + mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); + return; + } + /* check if this MPCC is already being used */ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); /* remove MPCC if being used */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 1e73357eda34..1642bf546ceb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -97,8 +97,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = { .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, - .is_panel_backlight_on = dce110_is_panel_backlight_on, - .is_panel_powered_on = dce110_is_panel_powered_on, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index e4ac73035c84..8d209dae66e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -49,6 +49,12 @@ #define IND_REG(index) \ (enc10->link_regs->index) +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif static struct mpll_cfg dcn2_mpll_cfg[] = { // RBR @@ -260,6 +266,38 @@ void dcn20_link_encoder_enable_dp_output( } +void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t is_in_usb_c_dp4_mode = 0; + + dcn10_link_encoder_get_max_link_cap(enc, link_settings); + + /* in usb c dp2 mode, max lane count is 2 */ + if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); + if (!is_in_usb_c_dp4_mode) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); + } + +} + +bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t dp_alt_mode_disable = 0; + bool is_usb_c_alt_mode = false; + + if (enc->features.flags.bits.DP_IS_USB_C) { + /* if value == 1 alt mode is disabled, otherwise it is enabled */ + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + is_usb_c_alt_mode = (dp_alt_mode_disable == 0); + } + + return is_usb_c_alt_mode; +} + #define AUX_REG(reg)\ (enc10->aux_regs->reg) @@ -338,6 +376,8 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = { .fec_is_active = enc2_fec_is_active, .get_dig_mode = dcn10_get_dig_mode, .get_dig_frontend = dcn10_get_dig_frontend, + .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, }; void dcn20_link_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 8cab8107fd94..284a1ee4d249 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -343,6 +343,10 @@ void dcn20_link_encoder_enable_dp_output( const struct dc_link_settings *link_settings, enum clock_source_id clock_source); +bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc); +void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); + void dcn20_link_encoder_construct( struct dcn20_link_encoder *enc20, const struct encoder_init_data *init_data, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index d875b0c38fde..8c16967fe018 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -409,6 +409,18 @@ void optc2_program_manual_trigger(struct timing_generator *optc) OTG_TRIGA_MANUAL_TRIG, 1); } +bool optc2_configure_crc(struct timing_generator *optc, + const struct crc_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_2(OTG_CRC_CNTL2, 0, + OTG_CRC_DSC_MODE, params->dsc_mode, + OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode); + + return optc1_configure_crc(optc, params); +} + static struct timing_generator_funcs dcn20_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -452,7 +464,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, - .configure_crc = optc1_configure_crc, + .configure_crc = optc2_configure_crc, .set_dsc_config = optc2_set_dsc_config, .set_dwb_source = optc2_set_dwb_source, .set_odm_bypass = optc2_set_odm_bypass, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index 239cc40ae474..e0a0a8a8e2c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -36,6 +36,7 @@ SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ SRI(OTG_DSC_START_POSITION, OTG, inst),\ + SRI(OTG_CRC_CNTL2, OTG, inst),\ SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ @@ -62,6 +63,10 @@ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ @@ -109,4 +114,6 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc); void optc2_setup_manual_trigger(struct timing_generator *optc); void optc2_program_manual_trigger(struct timing_generator *optc); bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); +bool optc2_configure_crc(struct timing_generator *optc, + const struct crc_params *params); #endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 5cdbba0cd873..63044ae06327 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -61,6 +61,7 @@ #include "dcn20_dccg.h" #include "dcn20_vmid.h" #include "dc_link_ddc.h" +#include "dce/dce_panel_cntl.h" #include "navi10_ip_offset.h" @@ -687,6 +688,18 @@ static const struct dcn10_link_enc_mask le_mask = { DPCS_DCN2_MASK_SH_LIST(_MASK) }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN20(id),\ @@ -1289,6 +1302,23 @@ struct link_encoder *dcn20_link_encoder_create( return &enc20->enc10.base; } +static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + struct clock_source *dcn20_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, @@ -2212,12 +2242,12 @@ int dcn20_populate_dml_pipes_from_context( || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; - pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y; - pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y; - pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width; - pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; - pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; - pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; + pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y; + pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y; + pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width; + pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width; + pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height; + pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height; pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width; pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width; @@ -3003,7 +3033,7 @@ void dcn20_calculate_dlg_params( pipe_idx, cstate_en, context->bw_ctx.bw.dcn.clk.p_state_change_support, - false, false, false); + false, false, true); context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].rq_regs, @@ -3189,6 +3219,7 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat static struct resource_funcs dcn20_res_pool_funcs = { .destroy = dcn20_destroy_resource_pool, .link_enc_create = dcn20_link_encoder_create, + .panel_cntl_create = dcn20_panel_cntl_create, .validate_bandwidth = dcn20_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn20_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index b9ff9767e08f..8410a6305a9a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -90,6 +90,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .power_down = dce110_power_down, }; static const struct hwseq_private_funcs dcn21_private_funcs = { @@ -105,8 +106,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, - .is_panel_backlight_on = dce110_is_panel_backlight_on, - .is_panel_powered_on = dce110_is_panel_powered_on, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index e45683ac871a..aa46c35b05a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -203,29 +203,6 @@ static bool update_cfg_data( return true; } -void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc, - struct dc_link_settings *link_settings) -{ - struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t value; - - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value); - - if (!value && link_settings->lane_count > LANE_COUNT_TWO) - link_settings->lane_count = LANE_COUNT_TWO; -} - -bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc) -{ - struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t value; - - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value); - - // if value == 1 alt mode is disabled, otherwise it is enabled - return !value; -} - bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -348,8 +325,8 @@ static const struct link_encoder_funcs dcn21_link_enc_funcs = { .fec_set_ready = enc2_fec_set_ready, .fec_is_active = enc2_fec_is_active, .get_dig_frontend = dcn10_get_dig_frontend, - .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode, - .get_max_link_cap = dcn21_link_encoder_get_max_link_cap, + .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, }; void dcn21_link_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index b25484aa8222..78e6259b4ac9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -61,6 +61,7 @@ #include "dcn21_hubbub.h" #include "dcn10/dcn10_resource.h" #include "dce110/dce110_resource.h" +#include "dce/dce_panel_cntl.h" #include "dcn20/dcn20_dwb.h" #include "dcn20/dcn20_mmhubbub.h" @@ -85,6 +86,7 @@ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" #include "dce/dmub_psr.h" +#include "dce/dmub_abm.h" #define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) @@ -991,9 +993,12 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) pool->base.dp_clock_source = NULL; } - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); + if (pool->base.abm != NULL) { + if (pool->base.abm->ctx->dc->config.disable_dmcu) + dmub_abm_destroy(&pool->base.abm); + else + dce_abm_destroy(&pool->base.abm); + } if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); @@ -1602,6 +1607,18 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(4, E), }; +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + #define aux_regs(id)\ [id] = {\ DCN2_AUX_REG_LIST(id)\ @@ -1687,6 +1704,24 @@ static struct link_encoder *dcn21_link_encoder_create( return &enc21->enc10.base; } + +static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + #define CTX ctx #define REG(reg_name) \ @@ -1705,12 +1740,8 @@ static int dcn21_populate_dml_pipes_from_context( { uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes); int i; - struct resource_context *res_ctx = &context->res_ctx; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (!res_ctx->pipe_ctx[i].stream) - continue; + for (i = 0; i < pipe_cnt; i++) { pipes[i].pipe.src.hostvm = 1; pipes[i].pipe.src.gpuvm = 1; @@ -1735,6 +1766,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, .link_enc_create = dcn21_link_encoder_create, + .panel_cntl_create = dcn21_panel_cntl_create, .validate_bandwidth = dcn21_validate_bandwidth, .populate_dml_pipes = dcn21_populate_dml_pipes_from_context, .add_stream_to_ctx = dcn20_add_stream_to_ctx, @@ -1842,17 +1874,19 @@ static bool dcn21_resource_construct( goto create_fail; } - pool->base.dmcu = dcn21_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; + if (!dc->config.disable_dmcu) { + pool->base.dmcu = dcn21_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } } - if (dc->debug.disable_dmcu) { + if (dc->config.disable_dmcu) { pool->base.psr = dmub_psr_create(ctx); if (pool->base.psr == NULL) { @@ -1862,15 +1896,16 @@ static bool dcn21_resource_construct( } } - pool->base.abm = dce_abm_create(ctx, + if (dc->config.disable_dmcu) + pool->base.abm = dmub_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + else + pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } pool->base.pp_smu = dcn21_pp_smu_create(ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 5bbbafacc720..922ab7169e52 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -2614,6 +2614,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP if (mode_lib->vba.DRAMClockChangeSupportsVActive && mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { + mode_lib->vba.DRAMClockChangeWatermark += 25; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { @@ -2622,7 +2623,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinTTUVBlank[k] += 25; } } - mode_lib->vba.DRAMClockChangeWatermark += 25; + mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else if (mode_lib->vba.DummyPStateCheck && mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index e6617c958bb8..5bc80b6084da 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -5944,7 +5944,7 @@ static void CalculateMetaAndPTETimes( * PixelPTEReqHeightY[k]; } dpte_groups_per_row_luma_ub = dml_ceil( - dpte_row_width_luma_ub[k] / dpte_group_width_luma, + (float) dpte_row_width_luma_ub[k] / dpte_group_width_luma, 1); time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub; @@ -5968,7 +5968,7 @@ static void CalculateMetaAndPTETimes( * PixelPTEReqHeightC[k]; } dpte_groups_per_row_chroma_ub = dml_ceil( - dpte_row_width_chroma_ub[k] + (float) dpte_row_width_chroma_ub[k] / dpte_group_width_chroma, 1); time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a38baa73d484..193f31b8ac4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1490,19 +1490,30 @@ static void dml_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); - ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); + if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) && + disp_dlg_regs->refcyc_per_pte_group_vblank_l >= (unsigned int)dml_pow(2, 13)) + disp_dlg_regs->refcyc_per_pte_group_vblank_l = (1 << 13) - 1; + else + ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); if (dual_plane) { disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_c); - ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c + if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) && + disp_dlg_regs->refcyc_per_pte_group_vblank_c >= (unsigned int)dml_pow(2, 13)) + disp_dlg_regs->refcyc_per_pte_group_vblank_c = (1 << 13) - 1; + else + ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)dml_pow(2, 13)); } - disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = + if (src->dcc) + disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l); + else + disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = 0; ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13)); disp_dlg_regs->refcyc_per_meta_chunk_vblank_c = diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 6f730b5bfe42..5e384a8a83dc 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -322,3 +322,92 @@ static const struct protection_properties dp_11_protection = { .process_transaction = dp_11_process_transaction }; +static const struct protection_properties *get_protection_properties_by_signal( + struct dc_link *link, + enum signal_type st, + enum hdcp_version version) +{ + switch (version) { + case HDCP_VERSION_14: + switch (st) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + return &hdmi_14_protection; + case SIGNAL_TYPE_DISPLAY_PORT: + if (link && + (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER || + link->dpcd_caps.dongle_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)) { + return &non_supported_protection; + } + return &dp_11_protection; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + case SIGNAL_TYPE_EDP: + return &dp_11_protection; + default: + return &non_supported_protection; + } + break; + case HDCP_VERSION_22: + switch (st) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + return &hdmi_14_protection; //todo version2.2 + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + case SIGNAL_TYPE_EDP: + return &dp_11_protection; //todo version2.2 + default: + return &non_supported_protection; + } + break; + default: + return &non_supported_protection; + } +} + +enum hdcp_message_status dc_process_hdcp_msg( + enum signal_type signal, + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + enum hdcp_message_status status = HDCP_MESSAGE_FAILURE; + uint32_t i = 0; + + const struct protection_properties *protection_props; + + if (!message_info) + return HDCP_MESSAGE_UNSUPPORTED; + + if (message_info->msg_id < HDCP_MESSAGE_ID_READ_BKSV || + message_info->msg_id >= HDCP_MESSAGE_ID_MAX) + return HDCP_MESSAGE_UNSUPPORTED; + + protection_props = + get_protection_properties_by_signal( + link, + signal, + message_info->version); + + if (!protection_props->supported) + return HDCP_MESSAGE_UNSUPPORTED; + + if (protection_props->process_transaction( + link, + message_info)) { + status = HDCP_MESSAGE_SUCCESS; + } else { + for (i = 0; i < message_info->max_retries; i++) { + if (protection_props->process_transaction( + link, + message_info)) { + status = HDCP_MESSAGE_SUCCESS; + break; + } + } + } + + return status; +} + diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d523fc9547e7..c7fd702a4a87 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -38,6 +38,7 @@ #endif #include "dwb.h" #include "mcif_wb.h" +#include "panel_cntl.h" #define MAX_CLOCK_SOURCES 7 @@ -92,6 +93,8 @@ struct clk_bw_params; struct resource_funcs { void (*destroy)(struct resource_pool **pool); void (*link_init)(struct dc_link *link); + struct panel_cntl*(*panel_cntl_create)( + const struct panel_cntl_init_data *panel_cntl_init_data); struct link_encoder *(*link_enc_create)( const struct encoder_init_data *init); bool (*validate_bandwidth)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index d607b3191954..0dd12c4fc23c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -56,10 +56,13 @@ struct abm_funcs { unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int controller_id, - bool use_smooth_brightness); + bool fw_set_brightness); unsigned int (*get_current_backlight)(struct abm *abm); unsigned int (*get_target_backlight)(struct abm *abm); + bool (*init_abm_config)(struct abm *abm, + const char *src, + unsigned int bytes); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index f5dd0cc73c63..47a566d82d6e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -144,6 +144,8 @@ struct hubbub_funcs { void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); + + void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h new file mode 100644 index 000000000000..5a75d035f1fa --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -0,0 +1,53 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +/* + * panel_cntl.h + * + * Created on: Oct 6, 2015 + * Author: yonsun + */ + +#ifndef DC_PANEL_CNTL_H_ +#define DC_PANEL_CNTL_H_ + +#include "dc_types.h" + +struct panel_cntl_funcs { + void (*destroy)(struct panel_cntl **panel_cntl); + void (*hw_init)(struct panel_cntl *panel_cntl); + bool (*is_panel_backlight_on)(struct panel_cntl *panel_cntl); + bool (*is_panel_powered_on)(struct panel_cntl *panel_cntl); +}; + +struct panel_cntl_init_data { + struct dc_context *ctx; + uint32_t inst; +}; + +struct panel_cntl { + const struct panel_cntl_funcs *funcs; + struct dc_context *ctx; + uint32_t inst; +}; + +#endif /* DC_PANEL_CNTL_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index e5e7d94026fc..f803191e3134 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -117,6 +117,9 @@ struct crc_params { enum crc_selection selection; + uint8_t dsc_mode; + uint8_t odm_mode; + bool continuous_mode; bool enable; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h index fecc80c47c26..2947d1b15512 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h @@ -173,6 +173,8 @@ struct scaler_data { struct scaling_taps taps; struct rect viewport; struct rect viewport_c; + struct rect viewport_unadjusted; + struct rect viewport_c_unadjusted; struct rect recout; struct scaling_ratios ratios; struct scl_inits inits; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d4c1fb242c63..9380721f28b8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -75,9 +75,13 @@ struct hw_sequencer_funcs { void (*wait_for_mpcc_disconnect)(struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); + void (*edp_backlight_control)( + struct dc_link *link, + bool enable); void (*program_triplebuffer)(const struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); void (*update_pending_status)(struct pipe_ctx *pipe_ctx); + void (*power_down)(struct dc *dc); /* Pipe Lock Related */ void (*pipe_control_lock)(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index 52a26e6be066..36e906bb6bfc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -100,8 +100,6 @@ struct hwseq_private_funcs { struct dc *dc); void (*edp_backlight_control)(struct dc_link *link, bool enable); - bool (*is_panel_backlight_on)(struct dc_link *link); - bool (*is_panel_powered_on)(struct dc_link *link); void (*setup_vupdate_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx); bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c index 3464b2d5b89a..348e9a600a72 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c @@ -84,6 +84,14 @@ static void virtual_link_encoder_destroy(struct link_encoder **enc) *enc = NULL; } +static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + /* Set Default link settings */ + struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH, + LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0}; + *link_settings = max_link_cap; +} static const struct link_encoder_funcs virtual_lnk_enc_funcs = { .validate_output_with_stream = @@ -94,6 +102,7 @@ static const struct link_encoder_funcs virtual_lnk_enc_funcs = { .enable_dp_output = virtual_link_encoder_enable_dp_output, .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output, .disable_output = virtual_link_encoder_disable_output, + .get_max_link_cap = virtual_link_encoder_get_max_link_cap, .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 10b5fa9d2588..7c7a3561b6aa 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -215,6 +215,11 @@ struct dmub_rb_cmd_dpphy_init { uint8_t reserved[60]; }; +struct dmub_psr_debug_flags { + uint8_t visual_confirm : 1; + uint8_t reserved : 7; +}; + struct dmub_cmd_psr_copy_settings_data { uint16_t psr_level; uint8_t dpp_inst; @@ -228,6 +233,7 @@ struct dmub_cmd_psr_copy_settings_data { uint8_t smu_optimizations_en; uint8_t frame_delay; uint8_t frame_cap_ind; + struct dmub_psr_debug_flags debug; }; struct dmub_rb_cmd_psr_copy_settings { @@ -303,6 +309,16 @@ struct dmub_rb_cmd_abm_set_pwm_frac { struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data; }; +struct dmub_cmd_abm_init_config_data { + union dmub_addr src; + uint16_t bytes; +}; + +struct dmub_rb_cmd_abm_init_config { + struct dmub_cmd_header header; + struct dmub_cmd_abm_init_config_data abm_init_config_data; +}; + union dmub_rb_cmd { struct dmub_rb_cmd_read_modify_write read_modify_write; struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq; @@ -324,6 +340,7 @@ union dmub_rb_cmd { struct dmub_rb_cmd_abm_set_level abm_set_level; struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level; struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac; + struct dmub_rb_cmd_abm_init_config abm_init_config; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h index d37535d21928..e42de9ded275 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -32,17 +32,16 @@ */ enum dmub_cmd_psr_type { - DMUB_CMD__PSR_SET_VERSION = 0, - DMUB_CMD__PSR_COPY_SETTINGS = 1, - DMUB_CMD__PSR_ENABLE = 2, - DMUB_CMD__PSR_DISABLE = 3, - DMUB_CMD__PSR_SET_LEVEL = 4, + DMUB_CMD__PSR_SET_VERSION = 0, + DMUB_CMD__PSR_COPY_SETTINGS = 1, + DMUB_CMD__PSR_ENABLE = 2, + DMUB_CMD__PSR_DISABLE = 3, + DMUB_CMD__PSR_SET_LEVEL = 4, }; enum psr_version { - PSR_VERSION_1 = 0x10, // PSR Version 1 - PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update - PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU + PSR_VERSION_1 = 0, + PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, }; enum dmub_cmd_abm_type { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index c2671f2616c8..e40d1cdbcfaa 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -151,6 +151,7 @@ struct dmub_srv_region_params { uint32_t inst_const_size; uint32_t bss_data_size; uint32_t vbios_size; + const uint8_t *fw_inst_const; const uint8_t *fw_bss_data; }; @@ -565,6 +566,16 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub, enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, uint32_t *response); +/** + * dmub_flush_buffer_mem() - Read back entire frame buffer region. + * This ensures that the write from x86 has been flushed and will not + * hang the DMCUB. + * @fb: frame buffer to flush + * + * Can be called after software initialization. + */ +void dmub_flush_buffer_mem(const struct dmub_fb *fb); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 63bb9e2c81de..45638d61b73d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -186,14 +186,22 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); - dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); - - REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); - REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); - REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, - DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, - DMCUB_REGION3_CW2_ENABLE, 1); + if (cw2->region.base != cw2->region.top) { + dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, + &offset); + + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); + REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, + DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, + DMCUB_REGION3_CW2_ENABLE, 1); + } else { + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0); + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0); + REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0); + REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0); + } dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index ce32cc7933c4..0a1a851741c5 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -70,7 +70,7 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor) return (val + factor - 1) / factor * factor; } -static void dmub_flush_buffer_mem(const struct dmub_fb *fb) +void dmub_flush_buffer_mem(const struct dmub_fb *fb) { const uint8_t *base = (const uint8_t *)fb->cpu_addr; uint8_t buf[64]; @@ -91,17 +91,29 @@ static void dmub_flush_buffer_mem(const struct dmub_fb *fb) } static const struct dmub_fw_meta_info * -dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size) +dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) { const union dmub_fw_meta *meta; + const uint8_t *blob = NULL; + uint32_t blob_size = 0; + + if (params->fw_bss_data) { + /* Legacy metadata region. */ + blob = params->fw_bss_data; + blob_size = params->bss_data_size; + } else if (params->fw_inst_const) { + /* Combined metadata region. */ + blob = params->fw_inst_const; + blob_size = params->inst_const_size; + } - if (fw_bss_data == NULL) + if (!blob || !blob_size) return NULL; - if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET) + if (blob_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET) return NULL; - meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size - + meta = (const union dmub_fw_meta *)(blob + blob_size - DMUB_FW_META_OFFSET - sizeof(union dmub_fw_meta)); @@ -247,8 +259,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, mail->base = dmub_align(bios->top, 256); mail->top = mail->base + DMUB_MAILBOX_SIZE; - fw_info = dmub_get_fw_meta_info(params->fw_bss_data, - params->bss_data_size); + fw_info = dmub_get_fw_meta_info(params); if (fw_info) { fw_state_size = fw_info->fw_region_size; diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h index f31e6befc8d6..42229b4effdc 100644 --- a/drivers/gpu/drm/amd/display/include/hdcp_types.h +++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h @@ -83,6 +83,12 @@ enum hdcp_link { HDCP_LINK_SECONDARY }; +enum hdcp_message_status { + HDCP_MESSAGE_SUCCESS, + HDCP_MESSAGE_FAILURE, + HDCP_MESSAGE_UNSUPPORTED +}; + struct hdcp_protection_message { enum hdcp_version version; /* relevant only for DVI */ @@ -91,6 +97,7 @@ struct hdcp_protection_message { uint32_t length; uint8_t max_retries; uint8_t *data; + enum hdcp_message_status status; }; #endif diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h index 6e008de25629..02c23b04d34b 100644 --- a/drivers/gpu/drm/amd/display/include/logger_interface.h +++ b/drivers/gpu/drm/amd/display/include/logger_interface.h @@ -40,8 +40,6 @@ struct dc_state; * */ -void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count); - void pre_surface_trace( struct dc *dc, const struct dc_plane_state *const *plane_states, @@ -102,14 +100,12 @@ void context_clock_trace( #define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \ do { \ (void)(link); \ - dc_conn_log_hex_linux(hex_data, hex_len); \ DC_LOG_EVENT_DETECTION(__VA_ARGS__); \ } while (0) #define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \ do { \ (void)(link); \ - dc_conn_log_hex_linux(hex_data, hex_len); \ DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \ } while (0) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index cac09d500fda..d47253cdcc4e 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1902,7 +1902,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, - if (ramp->type == GAMMA_CUSTOM) + if (ramp && ramp->type == GAMMA_CUSTOM) apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); ret = true; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index c33454a9e0b4..eb7421e83b86 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -443,7 +443,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync, return true; } else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED && in_vrr->fixed.target_refresh_in_uhz != - in_config->min_refresh_in_uhz) { + in_config->fixed_refresh_in_uhz) { return true; } else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) { return true; @@ -491,7 +491,7 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync, return false; } -static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr, +static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr, struct dc_info_packet *infopacket) { /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */ @@ -523,14 +523,74 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr, vrr->state == VRR_STATE_ACTIVE_FIXED) infopacket->sb[6] |= 0x04; + // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range /* PB7 = FreeSync Minimum refresh rate (Hz) */ - infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000); + if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || + vrr->state == VRR_STATE_ACTIVE_FIXED) { + infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000); + } else { + infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000); + } /* PB8 = FreeSync Maximum refresh rate (Hz) * Note: We should never go above the field rate of the mode timing set. */ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000); + //FreeSync HDR + infopacket->sb[9] = 0; + infopacket->sb[10] = 0; +} + +static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, + struct dc_info_packet *infopacket) +{ + /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */ + infopacket->sb[1] = 0x1A; + + /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */ + infopacket->sb[2] = 0x00; + + /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */ + infopacket->sb[3] = 0x00; + + /* PB4 = Reserved */ + + /* PB5 = Reserved */ + + /* PB6 = [Bits 7:3 = Reserved] */ + + /* PB6 = [Bit 0 = FreeSync Supported] */ + if (vrr->state != VRR_STATE_UNSUPPORTED) + infopacket->sb[6] |= 0x01; + + /* PB6 = [Bit 1 = FreeSync Enabled] */ + if (vrr->state != VRR_STATE_DISABLED && + vrr->state != VRR_STATE_UNSUPPORTED) + infopacket->sb[6] |= 0x02; + + /* PB6 = [Bit 2 = FreeSync Active] */ + if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || + vrr->state == VRR_STATE_ACTIVE_FIXED) + infopacket->sb[6] |= 0x04; + + if (vrr->state == VRR_STATE_ACTIVE_FIXED) { + /* PB7 = FreeSync Minimum refresh rate (Hz) */ + infopacket->sb[7] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000); + /* PB8 = FreeSync Maximum refresh rate (Hz) */ + infopacket->sb[8] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000); + } else if (vrr->state == VRR_STATE_ACTIVE_VARIABLE) { + /* PB7 = FreeSync Minimum refresh rate (Hz) */ + infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000); + /* PB8 = FreeSync Maximum refresh rate (Hz) */ + infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000); + } else { + // Non-fs case, program nominal range + /* PB7 = FreeSync Minimum refresh rate (Hz) */ + infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000); + /* PB8 = FreeSync Maximum refresh rate (Hz) */ + infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000); + } //FreeSync HDR infopacket->sb[9] = 0; @@ -678,7 +738,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal, unsigned int payload_size = 0; build_vrr_infopacket_header_v1(signal, infopacket, &payload_size); - build_vrr_infopacket_data(vrr, infopacket); + build_vrr_infopacket_data_v1(vrr, infopacket); build_vrr_infopacket_checksum(&payload_size, infopacket); infopacket->valid = true; @@ -692,7 +752,24 @@ static void build_vrr_infopacket_v2(enum signal_type signal, unsigned int payload_size = 0; build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); - build_vrr_infopacket_data(vrr, infopacket); + build_vrr_infopacket_data_v1(vrr, infopacket); + + build_vrr_infopacket_fs2_data(app_tf, infopacket); + + build_vrr_infopacket_checksum(&payload_size, infopacket); + + infopacket->valid = true; +} + +static void build_vrr_infopacket_v3(enum signal_type signal, + const struct mod_vrr_params *vrr, + enum color_transfer_func app_tf, + struct dc_info_packet *infopacket) +{ + unsigned int payload_size = 0; + + build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); + build_vrr_infopacket_data_v3(vrr, infopacket); build_vrr_infopacket_fs2_data(app_tf, infopacket); @@ -717,11 +794,14 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, return; switch (packet_type) { - case PACKET_TYPE_FS2: + case PACKET_TYPE_FS_V3: + build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket); + break; + case PACKET_TYPE_FS_V2: build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket); break; case PACKET_TYPE_VRR: - case PACKET_TYPE_FS1: + case PACKET_TYPE_FS_V1: default: build_vrr_infopacket_v1(stream->signal, vrr, infopacket); } @@ -793,6 +873,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, calc_duration_in_us_from_refresh_in_uhz( (unsigned int)max_refresh_in_uhz); + if (in_config->state == VRR_STATE_ACTIVE_FIXED) + in_out_vrr->fixed_refresh_in_uhz = in_config->fixed_refresh_in_uhz; + else + in_out_vrr->fixed_refresh_in_uhz = 0; + refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; @@ -843,7 +928,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->min_refresh_in_uhz); } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) { in_out_vrr->fixed.target_refresh_in_uhz = - in_out_vrr->min_refresh_in_uhz; + in_out_vrr->fixed_refresh_in_uhz; if (in_out_vrr->fixed.ramping_active && in_out_vrr->fixed.fixed_active) { /* Do not update vtotals if ramping is already active diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index cc1d3f470b99..e9fbd94f8635 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -328,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, /* add display to connection */ hdcp->connection.link = *link; *display_container = *display; - status = mod_hdcp_add_display_to_topology(hdcp, display->index); + status = mod_hdcp_add_display_to_topology(hdcp, display_container); + if (status != MOD_HDCP_STATUS_SUCCESS) goto out; @@ -374,7 +375,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, status = mod_hdcp_remove_display_from_topology(hdcp, index); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - display->state = MOD_HDCP_DISPLAY_INACTIVE; + memset(display, 0, sizeof(struct mod_hdcp_display)); /* request authentication when connection is not reset */ if (current_state(hdcp) != HDCP_UNINITIALIZED) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index 5cb4546be0ef..b0cefed2eb02 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, /* psp functions */ enum mod_hdcp_status mod_hdcp_add_display_to_topology( - struct mod_hdcp *hdcp, uint8_t index); + struct mod_hdcp *hdcp, struct mod_hdcp_display *display); enum mod_hdcp_status mod_hdcp_remove_display_from_topology( struct mod_hdcp *hdcp, uint8_t index); enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); @@ -357,8 +357,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management( struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready( struct mod_hdcp *hdcp); -enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, - enum mod_hdcp_encryption_status *encryption_status); /* ddc functions */ enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp); @@ -503,11 +501,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display) return display->state >= MOD_HDCP_DISPLAY_ACTIVE; } -static inline uint8_t is_display_added(struct mod_hdcp_display *display) -{ - return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; -} - static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display) { return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; @@ -515,34 +508,23 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) { - uint8_t added_count = 0; + uint8_t active_count = 0; uint8_t i; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) if (is_display_active(&hdcp->displays[i])) - added_count++; - return added_count; -} - -static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) -{ - uint8_t added_count = 0; - uint8_t i; - - for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->displays[i])) - added_count++; - return added_count; + active_count++; + return active_count; } -static inline struct mod_hdcp_display *get_first_added_display( +static inline struct mod_hdcp_display *get_first_active_display( struct mod_hdcp *hdcp) { uint8_t i; struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->displays[i])) { + if (is_display_active(&hdcp->displays[i])) { display = &hdcp->displays[i]; break; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 37c8c05497d6..f244b72e74e0 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* device count must be greater than or equal to tracked hdcp displays */ - return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : MOD_HDCP_STATUS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 491c00f48026..549c113abcf7 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* device count must be greater than or equal to tracked hdcp displays */ - return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE : MOD_HDCP_STATUS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c index 44956f9ba178..fb6a19d020f9 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -98,8 +98,8 @@ char *mod_hdcp_status_to_str(int32_t status) return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED: return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED"; - case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION: - return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE"; case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE"; case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE: @@ -158,8 +158,8 @@ char *mod_hdcp_status_to_str(int32_t status) return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED"; case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY: return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY"; - case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION: - return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE"; case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING: return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING"; case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE: diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index c2929815c3ee..fb1161dd7ea8 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -51,12 +51,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology( struct ta_dtm_shared_memory *dtm_cmd; struct mod_hdcp_display *display = get_active_display_at_index(hdcp, index); + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; - if (!display || !is_display_added(display)) + if (!display || !is_display_active(display)) return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + mutex_lock(&psp->dtm_context.mutex); + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; @@ -66,34 +69,33 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology( psp_dtm_invoke(psp, dtm_cmd->cmd_id); - if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) - return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + } else { + display->state = MOD_HDCP_DISPLAY_ACTIVE; + HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); + } - display->state = MOD_HDCP_DISPLAY_ACTIVE; - HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); - - return MOD_HDCP_STATUS_SUCCESS; - + mutex_unlock(&psp->dtm_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp, - uint8_t index) + struct mod_hdcp_display *display) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_dtm_shared_memory *dtm_cmd; - struct mod_hdcp_display *display = - get_active_display_at_index(hdcp, index); struct mod_hdcp_link *link = &hdcp->connection.link; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; if (!psp->dtm_context.dtm_initialized) { DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); + display->state = MOD_HDCP_DISPLAY_INACTIVE; return MOD_HDCP_STATUS_FAILURE; } - if (!display || is_display_added(display)) - return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; - dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + mutex_lock(&psp->dtm_context.mutex); memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; @@ -113,21 +115,24 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp, psp_dtm_invoke(psp, dtm_cmd->cmd_id); - if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) - return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; - - display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; - HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { + display->state = MOD_HDCP_DISPLAY_INACTIVE; + status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + } else { + HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); + } - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->dtm_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; - struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct mod_hdcp_display *display = get_first_active_display(hdcp); struct ta_hdcp_shared_memory *hdcp_cmd; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; if (!psp->hdcp_context.hdcp_initialized) { DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized."); @@ -135,6 +140,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) } hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + mutex_lock(&psp->hdcp_context.mutex); memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index; @@ -144,16 +151,18 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; - - hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; - memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, - sizeof(hdcp->auth.msg.hdcp1.aksv)); - memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary, - sizeof(hdcp->auth.msg.hdcp1.an)); + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; + } else { + hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; + memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, + sizeof(hdcp->auth.msg.hdcp1.aksv)); + memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary, + sizeof(hdcp->auth.msg.hdcp1.an)); + } - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) @@ -162,7 +171,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; uint8_t i = 0; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -171,27 +182,30 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE; - - HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); - for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_encryption_enabled( - &hdcp->displays[i])) { - hdcp->displays[i].state = - MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; - HDCP_HDCP1_DISABLED_TRACE(hdcp, - hdcp->displays[i].index); - } + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE; + } else { + HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_encryption_enabled(&hdcp->displays[i])) { + hdcp->displays[i].state = + MOD_HDCP_DISPLAY_ACTIVE; + HDCP_HDCP1_DISABLED_TRACE( + hdcp, hdcp->displays[i].index); + } + } - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -206,10 +220,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; - - if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) { /* needs second part of authentication */ hdcp->connection.is_repeater = 1; @@ -219,20 +232,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) { hdcp->connection.is_hdcp1_revoked = 1; - return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED; + status = MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED; } else - return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; - + status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct mod_hdcp_display *display = get_first_active_display(hdcp); + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -241,14 +256,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION; - - if (!is_dp_mst_hdcp(hdcp)) { + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE; + } else if (!is_dp_mst_hdcp(hdcp)) { display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index); } - return MOD_HDCP_STATUS_SUCCESS; + + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) @@ -257,6 +273,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -287,6 +304,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE; } + mutex_unlock(&psp->hdcp_context.mutex); return status; } @@ -296,14 +314,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; int i = 0; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->displays[i].adjust.disable) - continue; + if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) + continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -313,21 +332,26 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; + break; + } hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -339,12 +363,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level != 1) + status = MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; - return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1) - ? MOD_HDCP_STATUS_SUCCESS - : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, @@ -364,19 +388,23 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct mod_hdcp_display *display = get_first_active_display(hdcp); + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + if (!psp->hdcp_context.hdcp_initialized) { DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized"); return MOD_HDCP_STATUS_FAILURE; } - hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; - memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); - if (!display) return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + mutex_lock(&psp->hdcp_context.mutex); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) @@ -393,12 +421,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE; - hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle; + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + status = MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE; + else + hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle; - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) @@ -406,7 +436,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; uint8_t i = 0; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -415,20 +447,21 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE; - - HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); - for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_encryption_enabled( - &hdcp->displays[i])) { - hdcp->displays[i].state = - MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; - HDCP_HDCP2_DISABLED_TRACE(hdcp, - hdcp->displays[i].index); - } + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE; + } else { + HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_encryption_enabled(&hdcp->displays[i])) { + hdcp->displays[i].state = + MOD_HDCP_DISPLAY_ACTIVE; + HDCP_HDCP2_DISABLED_TRACE( + hdcp, hdcp->displays[i].index); + } + } - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp) @@ -437,7 +470,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -452,12 +487,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE; - - memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0], - sizeof(hdcp->auth.msg.hdcp2.ake_init)); + status = MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE; + else + memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ake_init)); - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp) @@ -466,7 +502,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -488,26 +526,32 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; - - memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0], - sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); - - memcpy(hdcp->auth.msg.hdcp2.ake_stored_km, - &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)], - sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); - - if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { - hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; - hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; - return MOD_HDCP_STATUS_SUCCESS; - } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { - hdcp->connection.is_hdcp2_revoked = 1; - return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED; + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + } else { + memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, + &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); + + memcpy(hdcp->auth.msg.hdcp2.ake_stored_km, + &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)], + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); + + if (msg_out->process.msg1_status == + TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { + hdcp->connection.is_km_stored = + msg_out->process.is_km_stored ? 1 : 0; + hdcp->connection.is_repeater = + msg_out->process.is_repeater ? 1 : 0; + status = MOD_HDCP_STATUS_SUCCESS; + } else if (msg_out->process.msg1_status == + TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { + hdcp->connection.is_hdcp2_revoked = 1; + status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED; + } } - - return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) @@ -516,7 +560,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -543,16 +589,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; - - if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; + else if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; else if (!hdcp->connection.is_km_stored && - msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE; - + msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE; - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp) @@ -561,7 +606,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -577,12 +624,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE; - - memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0], - sizeof(hdcp->auth.msg.hdcp2.lc_init)); + status = MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE; + else + memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.lc_init)); - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp) @@ -591,7 +639,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -610,13 +660,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; - - if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp) @@ -625,7 +674,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -642,48 +693,55 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp) hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE; - - memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0], - sizeof(hdcp->auth.msg.hdcp2.ske_eks)); - msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks); - - if (is_dp_hdcp(hdcp)) { - memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp, - &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)], - sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE; + } else { + memcpy(hdcp->auth.msg.hdcp2.ske_eks, + &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ske_eks)); + msg_out->prepare.msg1_desc.msg_size = + sizeof(hdcp->auth.msg.hdcp2.ske_eks); + + if (is_dp_hdcp(hdcp)) { + memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp, + &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)], + sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); + } } + mutex_unlock(&psp->hdcp_context.mutex); - return MOD_HDCP_STATUS_SUCCESS; + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_added_display(hdcp); - - hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; - memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + struct mod_hdcp_display *display = get_first_active_display(hdcp); + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; if (!display) return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + mutex_lock(&psp->hdcp_context.mutex); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE; - - if (!is_dp_mst_hdcp(hdcp)) { + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE; + } else if (!is_dp_mst_hdcp(hdcp)) { display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index); } - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp) @@ -692,6 +750,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -712,23 +773,26 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; - - memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0], - sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); - - if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { - hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; - hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; - return MOD_HDCP_STATUS_SUCCESS; - } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { - hdcp->connection.is_hdcp2_revoked = 1; - return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED; + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; + } else { + memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, + &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); + + if (msg_out->process.msg1_status == + TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { + hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; + hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; + status = MOD_HDCP_STATUS_SUCCESS; + } else if (msg_out->process.msg1_status == + TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { + hdcp->connection.is_hdcp2_revoked = 1; + status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED; + } } - - - return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp) @@ -737,7 +801,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; uint8_t i; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -747,9 +813,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->displays[i].adjust.disable) - continue; + if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) + continue; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; @@ -763,8 +829,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } - return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS - : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION; + if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) + status = MOD_HDCP_STATUS_SUCCESS; + else + status = MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE; + + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp) @@ -774,7 +845,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -789,15 +862,17 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE; - - hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size; - - memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0], - sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage)); + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) { + status = MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE; + } else { + hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size; - return MOD_HDCP_STATUS_SUCCESS; + memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, + &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage)); + } + mutex_unlock(&psp->hdcp_context.mutex); + return status; } enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp) @@ -806,7 +881,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp) struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -825,38 +902,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp) hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) && - (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) - ? MOD_HDCP_STATUS_SUCCESS - : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE; -} - -enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, - enum mod_hdcp_encryption_status *encryption_status) -{ - struct psp_context *psp = hdcp->config.psp.handle; - struct ta_hdcp_shared_memory *hdcp_cmd; - - hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; - - memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); - - hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id; - hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0; - hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS; - *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - - psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_FAILURE; - - if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) { - if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1) - *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON; - else - *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON; - } + if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS && + msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + status = MOD_HDCP_STATUS_SUCCESS; + else + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE; - return MOD_HDCP_STATUS_SUCCESS; + mutex_unlock(&psp->hdcp_context.mutex); + return status; } + diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index dbe7835aabcf..0ba3cf7f336a 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -83,6 +83,8 @@ struct mod_freesync_config { bool btr; unsigned int min_refresh_in_uhz; unsigned int max_refresh_in_uhz; + unsigned int fixed_refresh_in_uhz; + }; struct mod_vrr_params_btr { @@ -112,6 +114,7 @@ struct mod_vrr_params { uint32_t max_duration_in_us; uint32_t max_refresh_in_uhz; uint32_t min_duration_in_us; + uint32_t fixed_refresh_in_uhz; struct dc_crtc_timing_adjust adjust; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index c088602bc1a0..eed560eecbab 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -60,7 +60,7 @@ enum mod_hdcp_status { MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED, - MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION, + MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE, MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE, @@ -90,7 +90,7 @@ enum mod_hdcp_status { MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED, - MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION, + MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE, MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE, @@ -117,7 +117,6 @@ enum mod_hdcp_operation_mode { enum mod_hdcp_display_state { MOD_HDCP_DISPLAY_INACTIVE = 0, MOD_HDCP_DISPLAY_ACTIVE, - MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED, MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED }; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h index fe2117904329..198c0e64d13a 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h @@ -40,8 +40,9 @@ enum color_transfer_func { enum vrr_packet_type { PACKET_TYPE_VRR, - PACKET_TYPE_FS1, - PACKET_TYPE_FS2, + PACKET_TYPE_FS_V1, + PACKET_TYPE_FS_V2, + PACKET_TYPE_FS_V3, PACKET_TYPE_VTEM }; diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index cff3ab15fc0c..22a5484d9f28 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -29,6 +29,7 @@ #include "mod_shared.h" #include "mod_freesync.h" #include "dc.h" +#include "dmub/inc/dmub_cmd_dal.h" enum vsc_packet_revision { vsc_packet_undefined = 0, @@ -144,7 +145,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } /*VSC packet set to 2 when DP revision >= 1.2*/ - if (stream->psr_version != 0) + if (stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED) vsc_packet_revision = vsc_packet_rev2; /* Update to revision 5 for extended colorimetry support */ diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index e75a4bb94488..8c37bcc27132 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -24,6 +24,9 @@ #include "power_helpers.h" #include "dc/inc/hw/dmcu.h" +#include "dc/inc/hw/abm.h" +#include "dc.h" +#include "core_types.h" #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b)) @@ -237,7 +240,7 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params, } static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params, - struct iram_table_v_2_2 *table) + struct iram_table_v_2_2 *table, bool big_endian) { unsigned int i; unsigned int num_entries = NUM_BL_CURVE_SEGS; @@ -261,10 +264,12 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1); ASSERT(lut_index < params.backlight_lut_array_size); - table->backlight_thresholds[i] = - cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)); - table->backlight_offsets[i] = - cpu_to_be16(params.backlight_lut_array[lut_index]); + table->backlight_thresholds[i] = (big_endian) ? + cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) : + cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries)); + table->backlight_offsets[i] = (big_endian) ? + cpu_to_be16(params.backlight_lut_array[lut_index]) : + cpu_to_le16(params.backlight_lut_array[lut_index]); } } @@ -584,18 +589,18 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame ram_table->crgb_slope[7] = cpu_to_be16(0x1910); fill_backlight_transform_table_v_2_2( - params, ram_table); + params, ram_table, true); } -void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params) +void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian) { unsigned int i, j; unsigned int set = params.set; ram_table->flags = 0x0; - - ram_table->min_abm_backlight = - cpu_to_be16(params.min_abm_backlight); + ram_table->min_abm_backlight = (big_endian) ? + cpu_to_be16(params.min_abm_backlight) : + cpu_to_le16(params.min_abm_backlight); for (i = 0; i < NUM_AGGR_LEVEL; i++) { ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain; @@ -619,33 +624,51 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame ram_table->iir_curve[4] = 0x65; //Gamma 2.2 - ram_table->crgb_thresh[0] = cpu_to_be16(0x127c); - ram_table->crgb_thresh[1] = cpu_to_be16(0x151b); - ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5); - ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56); - ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83); - ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72); - ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0); - ram_table->crgb_thresh[7] = cpu_to_be16(0x232b); - ram_table->crgb_offset[0] = cpu_to_be16(0x2999); - ram_table->crgb_offset[1] = cpu_to_be16(0x3999); - ram_table->crgb_offset[2] = cpu_to_be16(0x4666); - ram_table->crgb_offset[3] = cpu_to_be16(0x5999); - ram_table->crgb_offset[4] = cpu_to_be16(0x6333); - ram_table->crgb_offset[5] = cpu_to_be16(0x7800); - ram_table->crgb_offset[6] = cpu_to_be16(0x8c00); - ram_table->crgb_offset[7] = cpu_to_be16(0xa000); - ram_table->crgb_slope[0] = cpu_to_be16(0x3609); - ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa); - ram_table->crgb_slope[2] = cpu_to_be16(0x27ea); - ram_table->crgb_slope[3] = cpu_to_be16(0x235d); - ram_table->crgb_slope[4] = cpu_to_be16(0x2042); - ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3); - ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a); - ram_table->crgb_slope[7] = cpu_to_be16(0x1910); + ram_table->crgb_thresh[0] = (big_endian) ? cpu_to_be16(0x127c) : cpu_to_le16(0x127c); + ram_table->crgb_thresh[1] = (big_endian) ? cpu_to_be16(0x151b) : cpu_to_le16(0x151b); + ram_table->crgb_thresh[2] = (big_endian) ? cpu_to_be16(0x17d5) : cpu_to_le16(0x17d5); + ram_table->crgb_thresh[3] = (big_endian) ? cpu_to_be16(0x1a56) : cpu_to_le16(0x1a56); + ram_table->crgb_thresh[4] = (big_endian) ? cpu_to_be16(0x1c83) : cpu_to_le16(0x1c83); + ram_table->crgb_thresh[5] = (big_endian) ? cpu_to_be16(0x1e72) : cpu_to_le16(0x1e72); + ram_table->crgb_thresh[6] = (big_endian) ? cpu_to_be16(0x20f0) : cpu_to_le16(0x20f0); + ram_table->crgb_thresh[7] = (big_endian) ? cpu_to_be16(0x232b) : cpu_to_le16(0x232b); + ram_table->crgb_offset[0] = (big_endian) ? cpu_to_be16(0x2999) : cpu_to_le16(0x2999); + ram_table->crgb_offset[1] = (big_endian) ? cpu_to_be16(0x3999) : cpu_to_le16(0x3999); + ram_table->crgb_offset[2] = (big_endian) ? cpu_to_be16(0x4666) : cpu_to_le16(0x4666); + ram_table->crgb_offset[3] = (big_endian) ? cpu_to_be16(0x5999) : cpu_to_le16(0x5999); + ram_table->crgb_offset[4] = (big_endian) ? cpu_to_be16(0x6333) : cpu_to_le16(0x6333); + ram_table->crgb_offset[5] = (big_endian) ? cpu_to_be16(0x7800) : cpu_to_le16(0x7800); + ram_table->crgb_offset[6] = (big_endian) ? cpu_to_be16(0x8c00) : cpu_to_le16(0x8c00); + ram_table->crgb_offset[7] = (big_endian) ? cpu_to_be16(0xa000) : cpu_to_le16(0xa000); + ram_table->crgb_slope[0] = (big_endian) ? cpu_to_be16(0x3609) : cpu_to_le16(0x3609); + ram_table->crgb_slope[1] = (big_endian) ? cpu_to_be16(0x2dfa) : cpu_to_le16(0x2dfa); + ram_table->crgb_slope[2] = (big_endian) ? cpu_to_be16(0x27ea) : cpu_to_le16(0x27ea); + ram_table->crgb_slope[3] = (big_endian) ? cpu_to_be16(0x235d) : cpu_to_le16(0x235d); + ram_table->crgb_slope[4] = (big_endian) ? cpu_to_be16(0x2042) : cpu_to_le16(0x2042); + ram_table->crgb_slope[5] = (big_endian) ? cpu_to_be16(0x1dc3) : cpu_to_le16(0x1dc3); + ram_table->crgb_slope[6] = (big_endian) ? cpu_to_be16(0x1b1a) : cpu_to_le16(0x1b1a); + ram_table->crgb_slope[7] = (big_endian) ? cpu_to_be16(0x1910) : cpu_to_le16(0x1910); fill_backlight_transform_table_v_2_2( - params, ram_table); + params, ram_table, big_endian); +} + +bool dmub_init_abm_config(struct abm *abm, + struct dmcu_iram_parameters params) +{ + unsigned char ram_table[IRAM_SIZE]; + bool result = false; + + if (abm == NULL) + return false; + + memset(&ram_table, 0, sizeof(ram_table)); + + fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, false); + result = abm->funcs->init_abm_config( + abm, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2); + + return result; } bool dmcu_load_iram(struct dmcu *dmcu, @@ -657,17 +680,17 @@ bool dmcu_load_iram(struct dmcu *dmcu, if (dmcu == NULL) return false; - if (!dmcu->funcs->is_dmcu_initialized(dmcu)) + if (dmcu && !dmcu->funcs->is_dmcu_initialized(dmcu)) return true; memset(&ram_table, 0, sizeof(ram_table)); if (dmcu->dmcu_version.abm_version == 0x24) { - fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params); - result = dmcu->funcs->load_iram( - dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2); + fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true); + result = dmcu->funcs->load_iram( + dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2); } else if (dmcu->dmcu_version.abm_version == 0x23) { - fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params); + fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true); result = dmcu->funcs->load_iram( dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2); diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index e54157026330..46fbca2e2cd1 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -26,6 +26,7 @@ #define MODULES_POWER_POWER_HELPERS_H_ #include "dc/inc/hw/dmcu.h" +#include "dc/inc/hw/abm.h" enum abm_defines { @@ -44,5 +45,7 @@ struct dmcu_iram_parameters { bool dmcu_load_iram(struct dmcu *dmcu, struct dmcu_iram_parameters params); +bool dmub_init_abm_config(struct abm *abm, + struct dmcu_iram_parameters params); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h index 68d0ffad28c7..92fd27c26a77 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h @@ -1162,8 +1162,10 @@ #define mmRCC_CONFIG_MEMSIZE_BASE_IDX 0 #define mmRCC_CONFIG_RESERVED 0x0de4 // duplicate #define mmRCC_CONFIG_RESERVED_BASE_IDX 0 +#ifndef mmRCC_IOV_FUNC_IDENTIFIER #define mmRCC_IOV_FUNC_IDENTIFIER 0x0de5 // duplicate #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 0 +#endif // addressBlock: syshub_mmreg_ind_syshubdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h index 435462294fbc..a7cd760ebf8f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h @@ -4251,8 +4251,10 @@ #define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2 #define mmRCC_CONFIG_RESERVED 0x00c4 #define mmRCC_CONFIG_RESERVED_BASE_IDX 2 +#ifndef mmRCC_IOV_FUNC_IDENTIFIER #define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 +#endif // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h index ce5830ebe095..0c5a08bc034a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h @@ -2687,8 +2687,10 @@ #define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2 #define mmRCC_CONFIG_RESERVED 0x00c4 #define mmRCC_CONFIG_RESERVED_BASE_IDX 2 +#ifndef mmRCC_IOV_FUNC_IDENTIFIER #define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 +#endif // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h new file mode 100644 index 000000000000..e87c359ea1fe --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _pwr_10_0_OFFSET_HEADER +#define _pwr_10_0_OFFSET_HEADER + +#define mmPWR_MISC_CNTL_STATUS 0x0183 +#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h new file mode 100644 index 000000000000..8a000c21651c --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _pwr_10_0_SH_MASK_HEADER +#define _pwr_10_0_SH_MASK_HEADER + +//PWR_MISC_CNTL_STATUS +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h new file mode 100644 index 000000000000..9bf73284ad73 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _smuio_12_0_0_OFFSET_HEADER +#define _smuio_12_0_0_OFFSET_HEADER + +#define mmSMUIO_GFX_MISC_CNTL 0x00c8 +#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 + +#define mmPWR_MISC_CNTL_STATUS 0x0183 +#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h new file mode 100644 index 000000000000..26556fa3d054 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _smuio_12_0_0_SH_MASK_HEADER +#define _smuio_12_0_0_SH_MASK_HEADER + +//SMUIO_GFX_MISC_CNTL +#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L +#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 +//PWR_MISC_CNTL_STATUS +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L + +#endif diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 70146518174c..a6f6e6bf5992 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1876,6 +1876,108 @@ struct atom_smc_dpm_info_v4_6 uint32_t boardreserved[10]; }; +struct atom_smc_dpm_info_v4_7 +{ + struct atom_common_table_header table_header; + // SECTION: BOARD PARAMETERS + // I2C Control + struct smudpm_i2c_controller_config_v2 I2cControllers[8]; + + // SVI2 Board Parameters + uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN) + uint8_t Padding8_V; + + // Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + uint8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + uint16_t SocMaxCurrent; // in Amps + uint8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; // in Amps + uint8_t Mem0Offset; // in Amps + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; // in Amps + uint8_t Mem1Offset; // in Amps + uint8_t Padding_TelemetryMem1; + + // GPIO Settings + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t padding8_4; + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // GFXCLK DFLL Spread Spectrum + uint8_t DfllGfxclkSpreadEnabled; // on or off + uint8_t DfllGfxclkSpreadPercent; // Q4.4 + uint16_t DfllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadEnabled; // on or off + uint8_t UclkSpreadPercent; // Q4.4 + uint16_t UclkSpreadFreq; // kHz + + // SOCCLK Spread Spectrum + uint8_t SoclkSpreadEnabled; // on or off + uint8_t SocclkSpreadPercent; // Q4.4 + uint16_t SocclkSpreadFreq; // kHz + + // Total board power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPadding; + + // Mvdd Svi2 Div Ratio Setting + uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16) + + // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence + uint8_t GpioI2cScl; // Serial Clock + uint8_t GpioI2cSda; // Serial Data + uint16_t GpioPadding; + + // Additional LED Display Settings + uint8_t LedPin3; // GPIO number for LedPin[3] - PCIE GEN Speed + uint8_t LedPin4; // GPIO number for LedPin[4] - PMFW Error Status + uint16_t LedEnableMask; + + // Power Limit Scalars + uint8_t PowerLimitScalar[4]; //[PPT_THROTTLER_COUNT] + + uint8_t MvddUlvPhaseSheddingMask; + uint8_t VddciUlvPhaseSheddingMask; + uint8_t Padding8_Psi1; + uint8_t Padding8_Psi2; + + uint32_t BoardReserved[5]; +}; + /* *************************************************************************** Data Table asic_profiling_info structure diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index a69deb3a2ac0..60a6536ff656 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -32,7 +32,6 @@ struct cgs_device; * enum cgs_ind_reg - Indirect register spaces */ enum cgs_ind_reg { - CGS_IND_REG__MMIO, CGS_IND_REG__PCIE, CGS_IND_REG__SMC, CGS_IND_REG__UVD_CTX, diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 2a12614a12c2..fdff3e1c5e95 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -50,6 +50,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->not_vf = !amdgpu_sriov_vf(adev); hwmgr->device = amdgpu_cgs_create_device(adev); mutex_init(&hwmgr->smu_lock); + mutex_init(&hwmgr->msg_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; hwmgr->feature_mask = adev->pm.pp_feature; @@ -64,6 +65,8 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev) { struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + mutex_destroy(&hwmgr->msg_lock); + kfree(hwmgr->hardcode_pp_table); hwmgr->hardcode_pp_table = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index e8b27fab6aa1..88b4e5642302 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) { + struct amdgpu_device *adev = smu->adev; size_t size = 0; int ret = 0, i = 0; uint32_t feature_mask[2] = { 0 }; @@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) uint32_t sort_feature[SMU_FEATURE_COUNT]; uint64_t hw_feature_count = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); @@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu, uint32_t feature_low = 0, feature_high = 0; int ret = 0; - if (!smu->pm_enabled) - return ret; - feature_low = (feature_mask >> 0 ) & 0xffffffff; feature_high = (feature_mask >> 32) & 0xffffffff; @@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) uint64_t feature_2_enabled = 0; uint64_t feature_2_disabled = 0; uint64_t feature_enables = 0; + struct amdgpu_device *adev = smu->adev; + + if (!adev->pm.dpm_enabled) + return -EINVAL; mutex_lock(&smu->mutex); @@ -191,16 +196,31 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t if (!if_version && !smu_version) return -EINVAL; + if (smu->smc_fw_if_version && smu->smc_fw_version) + { + if (if_version) + *if_version = smu->smc_fw_if_version; + + if (smu_version) + *smu_version = smu->smc_fw_version; + + return 0; + } + if (if_version) { ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); if (ret) return ret; + + smu->smc_fw_if_version = *if_version; } if (smu_version) { ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); if (ret) return ret; + + smu->smc_fw_version = *smu_version; } return ret; @@ -417,8 +437,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, bool gate) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: ret = smu_dpm_set_uvd_enable(smu, !gate); @@ -547,7 +571,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) return (amdgpu_dpm == 2) ? true : false; else if (adev->asic_type >= CHIP_ARCTURUS) { - if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_vf(adev) && + !(adev->asic_type == CHIP_ARCTURUS && + amdgpu_sriov_is_pp_one_vf(adev))) + return false; else return true; @@ -569,8 +596,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) int smu_sys_get_pp_table(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; uint32_t powerplay_table_size; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (!smu_table->power_play_table && !smu_table->hardcode_pptable) return -EINVAL; @@ -591,11 +622,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table) int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) { struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; int ret = 0; - if (!smu->pm_enabled) + if (!adev->pm.dpm_enabled) return -EINVAL; + if (header->usStructureSize != size) { pr_err("pp table size not matched !\n"); return -EIO; @@ -636,8 +669,6 @@ int smu_feature_init_dpm(struct smu_context *smu) int ret = 0; uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; - if (!smu->pm_enabled) - return ret; mutex_lock(&feature->mutex); bitmap_zero(feature->allowed, SMU_FEATURE_MAX); mutex_unlock(&feature->mutex); @@ -932,13 +963,6 @@ static int smu_sw_init(void *handle) return ret; } - if (adev->smu.ppt_funcs->i2c_eeprom_init) { - ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); - - if (ret) - return ret; - } - return 0; } @@ -948,9 +972,6 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; - if (adev->smu.ppt_funcs->i2c_eeprom_fini) - smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); - kfree(smu->irq_source); smu->irq_source = NULL; @@ -1366,10 +1387,11 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - if (!smu->pm_enabled) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */ + ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); + if (ret) + goto failed; + + adev->pm.dpm_enabled = true; pr_info("SMU is initialized successfully!\n"); @@ -1403,6 +1425,10 @@ static int smu_hw_fini(void *handle) if (!smu->pm_enabled) return 0; + adev->pm.dpm_enabled = false; + + smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); + if (!amdgpu_sriov_vf(adev)){ ret = smu_stop_thermal_control(smu); if (ret) { @@ -1542,6 +1568,10 @@ static int smu_suspend(void *handle) if (!smu->pm_enabled) return 0; + adev->pm.dpm_enabled = false; + + smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); + if(!amdgpu_sriov_vf(adev)) { ret = smu_disable_dpm(smu); if (ret) @@ -1587,11 +1617,17 @@ static int smu_resume(void *handle) if (ret) goto failed; + ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); + if (ret) + goto failed; + if (smu->is_apu) smu_set_gfx_cgpg(&adev->smu, true); smu->disable_uclk_switch = 0; + adev->pm.dpm_enabled = true; + pr_info("SMU is resumed successfully!\n"); return 0; @@ -1603,10 +1639,14 @@ failed: int smu_display_configuration_change(struct smu_context *smu, const struct amd_pp_display_configuration *display_config) { + struct amdgpu_device *adev = smu->adev; int index = 0; int num_of_active_display = 0; - if (!smu->pm_enabled || !is_support_sw_smu(smu->adev)) + if (!adev->pm.dpm_enabled) + return -EINVAL; + + if (!is_support_sw_smu(smu->adev)) return -EINVAL; if (!display_config) @@ -1668,12 +1708,16 @@ int smu_get_current_clocks(struct smu_context *smu, struct amd_pp_clock_info *clocks) { struct amd_pp_simple_clock_info simple_clocks = {0}; + struct amdgpu_device *adev = smu->adev; struct smu_clock_info hw_clocks; int ret = 0; if (!is_support_sw_smu(smu->adev)) return -EINVAL; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); smu_get_dal_power_level(smu, &simple_clocks); @@ -1736,7 +1780,7 @@ static int smu_enable_umd_pstate(void *handle, struct smu_context *smu = (struct smu_context*)(handle); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { @@ -1778,9 +1822,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, long workload; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu->pm_enabled) - return -EINVAL; - if (!skip_display_settings) { ret = smu_display_config_changed(smu); if (ret) { @@ -1831,8 +1872,12 @@ int smu_handle_task(struct smu_context *smu, enum amd_pp_task task_id, bool lock_needed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (lock_needed) mutex_lock(&smu->mutex); @@ -1866,10 +1911,11 @@ int smu_switch_power_profile(struct smu_context *smu, bool en) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; long workload; uint32_t index; - if (!smu->pm_enabled) + if (!adev->pm.dpm_enabled) return -EINVAL; if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) @@ -1900,8 +1946,12 @@ int smu_switch_power_profile(struct smu_context *smu, enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; enum amd_dpm_forced_level level; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; @@ -1915,8 +1965,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; @@ -1939,8 +1993,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev int smu_set_display_count(struct smu_context *smu, uint32_t count) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); ret = smu_init_display_count(smu, count); mutex_unlock(&smu->mutex); @@ -1954,8 +2012,12 @@ int smu_force_clk_levels(struct smu_context *smu, bool lock_needed) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("force clock level is for dpm manual mode only.\n"); return -EINVAL; @@ -1973,20 +2035,19 @@ int smu_force_clk_levels(struct smu_context *smu, return ret; } +/* + * On system suspending or resetting, the dpm_enabled + * flag will be cleared. So that those SMU services which + * are not supported will be gated. + * However, the mp1 state setting should still be granted + * even if the dpm_enabled cleared. + */ int smu_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { uint16_t msg; int ret; - /* - * The SMC is not fully ready. That may be - * expected as the IP may be masked. - * So, just return without error. - */ - if (!smu->pm_enabled) - return 0; - mutex_lock(&smu->mutex); switch (mp1_state) { @@ -2023,15 +2084,11 @@ int smu_set_mp1_state(struct smu_context *smu, int smu_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; int ret = 0; - /* - * The SMC is not fully ready. That may be - * expected as the IP may be masked. - * So, just return without error. - */ - if (!smu->pm_enabled) - return 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) return 0; @@ -2065,6 +2122,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) { void *table = smu->smu_table.watermarks_table; + struct amdgpu_device *adev = smu->adev; + + if (!adev->pm.dpm_enabled) + return -EINVAL; if (!table) return -EINVAL; @@ -2089,8 +2150,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, int smu_set_ac_dc(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + /* controlled by firmware */ if (smu->dc_controlled_by_gpio) return 0; @@ -2149,8 +2214,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = int smu_load_microcode(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->load_microcode) @@ -2163,8 +2232,12 @@ int smu_load_microcode(struct smu_context *smu) int smu_check_fw_status(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->check_fw_status) @@ -2191,8 +2264,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_speed_rpm) @@ -2208,10 +2285,15 @@ int smu_get_power_limit(struct smu_context *smu, bool def, bool lock_needed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; - if (lock_needed) + if (lock_needed) { + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); + } if (smu->ppt_funcs->get_power_limit) ret = smu->ppt_funcs->get_power_limit(smu, limit, def); @@ -2224,8 +2306,12 @@ int smu_get_power_limit(struct smu_context *smu, int smu_set_power_limit(struct smu_context *smu, uint32_t limit) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_power_limit) @@ -2238,8 +2324,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit) int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->print_clk_levels) @@ -2252,8 +2342,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_od_percentage) @@ -2266,8 +2360,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_od_percentage) @@ -2282,8 +2380,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long *input, uint32_t size) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->od_edit_dpm_table) @@ -2298,8 +2400,12 @@ int smu_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->read_sensor) @@ -2312,8 +2418,12 @@ int smu_read_sensor(struct smu_context *smu, int smu_get_power_profile_mode(struct smu_context *smu, char *buf) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_power_profile_mode) @@ -2329,8 +2439,12 @@ int smu_set_power_profile_mode(struct smu_context *smu, uint32_t param_size, bool lock_needed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (lock_needed) mutex_lock(&smu->mutex); @@ -2346,8 +2460,12 @@ int smu_set_power_profile_mode(struct smu_context *smu, int smu_get_fan_control_mode(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_fan_control_mode) @@ -2360,8 +2478,12 @@ int smu_get_fan_control_mode(struct smu_context *smu) int smu_set_fan_control_mode(struct smu_context *smu, int value) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_control_mode) @@ -2374,8 +2496,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value) int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_fan_speed_percent) @@ -2388,8 +2514,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_speed_percent) @@ -2402,8 +2532,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_fan_speed_rpm) @@ -2416,8 +2550,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_deep_sleep_dcefclk) @@ -2430,8 +2568,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) int smu_set_active_display_count(struct smu_context *smu, uint32_t count) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (smu->ppt_funcs->set_active_display_count) ret = smu->ppt_funcs->set_active_display_count(smu, count); @@ -2442,8 +2584,12 @@ int smu_get_clock_by_type(struct smu_context *smu, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_clock_by_type) @@ -2457,8 +2603,12 @@ int smu_get_clock_by_type(struct smu_context *smu, int smu_get_max_high_clocks(struct smu_context *smu, struct amd_pp_simple_clock_info *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_max_high_clocks) @@ -2473,8 +2623,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu, enum smu_clk_type clk_type, struct pp_clock_levels_with_latency *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_clock_by_type_with_latency) @@ -2489,8 +2643,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu, enum amd_pp_clock_type type, struct pp_clock_levels_with_voltage *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_clock_by_type_with_voltage) @@ -2505,8 +2663,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu, int smu_display_clock_voltage_request(struct smu_context *smu, struct pp_display_clock_request *clock_req) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->display_clock_voltage_request) @@ -2520,8 +2682,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu, int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) { + struct amdgpu_device *adev = smu->adev; int ret = -EINVAL; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->display_disable_memory_clock_switch) @@ -2534,8 +2700,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl int smu_notify_smu_enable_pwe(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->notify_smu_enable_pwe) @@ -2549,8 +2719,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu) int smu_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_xgmi_pstate) @@ -2563,8 +2737,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu, int smu_set_azalia_d3_pme(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_azalia_d3_pme) @@ -2575,6 +2753,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu) return ret; } +/* + * On system suspending or resetting, the dpm_enabled + * flag will be cleared. So that those SMU services which + * are not supported will be gated. + * + * However, the baco/mode1 reset should still be granted + * as they are still supported and necessary. + */ bool smu_baco_is_support(struct smu_context *smu) { bool ret = false; @@ -2646,8 +2832,12 @@ int smu_mode2_reset(struct smu_context *smu) int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) @@ -2662,8 +2852,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, unsigned int *clock_values_in_khz, unsigned int *num_states) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_uclk_dpm_states) @@ -2677,6 +2871,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) { enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; + struct amdgpu_device *adev = smu->adev; + + if (!adev->pm.dpm_enabled) + return -EINVAL; mutex_lock(&smu->mutex); @@ -2691,8 +2889,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) int smu_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_dpm_clock_table) diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 1ef0923f7190..1c66b7d7139c 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -2226,12 +2226,8 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = { static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); - struct smu_context *smu = &adev->smu; int res; - if (!smu->pm_enabled) - return -EOPNOTSUPP; - control->owner = THIS_MODULE; control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; @@ -2247,12 +2243,6 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); - struct smu_context *smu = &adev->smu; - - if (!smu->pm_enabled) - return; - i2c_del_adapter(control); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 77c14671866c..719597c5d27d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -984,6 +984,32 @@ static int init_thermal_controller( struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) { + hwmgr->thermal_controller.ucType = + powerplay_table->sThermalController.ucType; + hwmgr->thermal_controller.ucI2cLine = + powerplay_table->sThermalController.ucI2cLine; + hwmgr->thermal_controller.ucI2cAddress = + powerplay_table->sThermalController.ucI2cAddress; + + hwmgr->thermal_controller.fanInfo.bNoFan = + (0 != (powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN)); + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + + hwmgr->thermal_controller.fanInfo.ulMinRPM + = powerplay_table->sThermalController.ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM + = powerplay_table->sThermalController.ucFanMaxRPM * 100UL; + + set_hw_cap(hwmgr, + ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController); + + hwmgr->thermal_controller.use_hw_fan_control = 1; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 689072a312a7..1cc30f750c26 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -36,6 +36,8 @@ #include "power_state.h" #include "soc15_common.h" #include "smu10.h" +#include "asic_reg/pwr/pwr_10_0_offset.h" +#include "asic_reg/pwr/pwr_10_0_sh_mask.h" #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ @@ -43,13 +45,6 @@ #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ #define SMC_RAM_END 0x40000 -#define mmPWR_MISC_CNTL_STATUS 0x0183 -#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 -#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 -#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 -#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L -#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L - static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; @@ -81,7 +76,7 @@ static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); return -EINVAL; } - smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); + smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL); return 0; } @@ -214,7 +209,8 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo smu10_data->deep_sleep_dcefclk = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - smu10_data->deep_sleep_dcefclk); + smu10_data->deep_sleep_dcefclk, + NULL); } return 0; } @@ -228,7 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c smu10_data->dcf_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinDcefclkByFreq, - smu10_data->dcf_actual_hard_min_freq); + smu10_data->dcf_actual_hard_min_freq, + NULL); } return 0; } @@ -242,7 +239,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc smu10_data->f_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - smu10_data->f_actual_hard_min_freq); + smu10_data->f_actual_hard_min_freq, + NULL); } return 0; } @@ -255,7 +253,8 @@ static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count smu10_data->num_active_display = count; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDisplayCount, - smu10_data->num_active_display); + smu10_data->num_active_display, + NULL); } return 0; @@ -278,7 +277,8 @@ static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr) if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetGfxCGPG, - true); + true, + NULL); else return 0; } @@ -324,7 +324,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; if (adev->pm.pp_feature & PP_GFXOFF_MASK) { - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL); /* confirm gfx is back to "on" state */ while (!smu10_is_gfx_on(hwmgr)) @@ -344,7 +344,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; if (adev->pm.pp_feature & PP_GFXOFF_MASK) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL); return 0; } @@ -479,12 +479,10 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr) smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk, ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency); - result = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result); smu10_data->gfx_min_freq_limit = result / 10 * 1000; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency); - result = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result); smu10_data->gfx_max_freq_limit = result / 10 * 1000; return 0; @@ -588,116 +586,148 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - data->gfx_max_freq_limit/100); + data->gfx_max_freq_limit/100, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - SMU10_UMD_PSTATE_PEAK_FCLK); + SMU10_UMD_PSTATE_PEAK_FCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - SMU10_UMD_PSTATE_PEAK_SOCCLK); + SMU10_UMD_PSTATE_PEAK_SOCCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, - SMU10_UMD_PSTATE_VCE); + SMU10_UMD_PSTATE_VCE, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - data->gfx_max_freq_limit/100); + data->gfx_max_freq_limit/100, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - SMU10_UMD_PSTATE_PEAK_FCLK); + SMU10_UMD_PSTATE_PEAK_FCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - SMU10_UMD_PSTATE_PEAK_SOCCLK); + SMU10_UMD_PSTATE_PEAK_SOCCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, - SMU10_UMD_PSTATE_VCE); + SMU10_UMD_PSTATE_VCE, + NULL); break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - min_sclk); + min_sclk, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - min_sclk); + min_sclk, + NULL); break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - min_mclk); + min_mclk, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - min_mclk); + min_mclk, + NULL); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_GFXCLK); + SMU10_UMD_PSTATE_GFXCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - SMU10_UMD_PSTATE_FCLK); + SMU10_UMD_PSTATE_FCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - SMU10_UMD_PSTATE_SOCCLK); + SMU10_UMD_PSTATE_SOCCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, - SMU10_UMD_PSTATE_VCE); + SMU10_UMD_PSTATE_VCE, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_GFXCLK); + SMU10_UMD_PSTATE_GFXCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - SMU10_UMD_PSTATE_FCLK); + SMU10_UMD_PSTATE_FCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - SMU10_UMD_PSTATE_SOCCLK); + SMU10_UMD_PSTATE_SOCCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, - SMU10_UMD_PSTATE_VCE); + SMU10_UMD_PSTATE_VCE, + NULL); break; case AMD_DPM_FORCED_LEVEL_AUTO: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - min_sclk); + min_sclk, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? SMU10_UMD_PSTATE_PEAK_FCLK : - min_mclk); + min_mclk, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - SMU10_UMD_PSTATE_MIN_SOCCLK); + SMU10_UMD_PSTATE_MIN_SOCCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, - SMU10_UMD_PSTATE_MIN_VCE); + SMU10_UMD_PSTATE_MIN_VCE, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - data->gfx_max_freq_limit/100); + data->gfx_max_freq_limit/100, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - SMU10_UMD_PSTATE_PEAK_FCLK); + SMU10_UMD_PSTATE_PEAK_FCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - SMU10_UMD_PSTATE_PEAK_SOCCLK); + SMU10_UMD_PSTATE_PEAK_SOCCLK, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, - SMU10_UMD_PSTATE_VCE); + SMU10_UMD_PSTATE_VCE, + NULL); break; case AMD_DPM_FORCED_LEVEL_LOW: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - data->gfx_min_freq_limit/100); + data->gfx_min_freq_limit/100, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - data->gfx_min_freq_limit/100); + data->gfx_min_freq_limit/100, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - min_mclk); + min_mclk, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - min_mclk); + min_mclk, + NULL); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -849,13 +879,15 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr, PPSMC_MSG_SetHardMinGfxClk, low == 2 ? data->gfx_max_freq_limit/100 : low == 1 ? SMU10_UMD_PSTATE_GFXCLK : - data->gfx_min_freq_limit/100); + data->gfx_min_freq_limit/100, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, high == 0 ? data->gfx_min_freq_limit/100 : high == 1 ? SMU10_UMD_PSTATE_GFXCLK : - data->gfx_max_freq_limit/100); + data->gfx_max_freq_limit/100, + NULL); break; case PP_MCLK: @@ -864,11 +896,13 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - mclk_table->entries[low].clk/100); + mclk_table->entries[low].clk/100, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - mclk_table->entries[high].clk/100); + mclk_table->entries[high].clk/100, + NULL); break; case PP_PCIE: @@ -888,8 +922,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); - now = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ if (now == data->gfx_max_freq_limit/100) @@ -910,8 +943,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, i == 2 ? "*" : ""); break; case PP_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); - now = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); for (i = 0; i < mclk_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -1122,15 +1154,13 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); - sclk = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk); /* in units of 10KHZ */ *((uint32_t *)value) = sclk * 100; *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); - mclk = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk); /* in units of 10KHZ */ *((uint32_t *)value) = mclk * 100; *size = 4; @@ -1166,20 +1196,20 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) { - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL); } static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr) { - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL); } static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) { if (gate) - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL); else - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL); } static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) @@ -1191,11 +1221,11 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PowerDownVcn, 0); + PPSMC_MSG_PowerDownVcn, 0, NULL); smu10_data->vcn_power_gated = true; } else { smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PowerUpVcn, 0); + PPSMC_MSG_PowerUpVcn, 0, NULL); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); @@ -1304,7 +1334,8 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin hwmgr->gfxoff_state_changed_by_workload = true; } result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify, - 1 << workload_type); + 1 << workload_type, + NULL); if (!result) hwmgr->power_profile_mode = input[size]; if (workload_type && hwmgr->gfxoff_state_changed_by_workload) { @@ -1319,7 +1350,8 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod { return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DeviceDriverReset, - mode); + mode, + NULL); } static const struct pp_hwmgr_func smu10_hwmgr_funcs = { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 683b29a99366..f2bda3bcbbde 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -29,14 +29,16 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) { return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_UVDDPM_Enable : - PPSMC_MSG_UVDDPM_Disable); + PPSMC_MSG_UVDDPM_Disable, + NULL); } static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) { return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_VCEDPM_Enable : - PPSMC_MSG_VCEDPM_Disable); + PPSMC_MSG_VCEDPM_Disable, + NULL); } static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) @@ -57,7 +59,8 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_UVDPowerOFF); + PPSMC_MSG_UVDPowerOFF, + NULL); return 0; } @@ -67,10 +70,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDynamicPowerGating)) { return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_UVDPowerON, 1); + PPSMC_MSG_UVDPowerON, 1, NULL); } else { return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_UVDPowerON, 0); + PPSMC_MSG_UVDPowerON, 0, NULL); } } @@ -81,7 +84,8 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_VCEPowerOFF); + PPSMC_MSG_VCEPowerOFF, + NULL); return 0; } @@ -89,7 +93,8 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_VCEPowerON); + PPSMC_MSG_VCEPowerON, + NULL); return 0; } @@ -181,7 +186,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -191,7 +196,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -204,7 +209,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_3DCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } @@ -215,7 +220,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_3DLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -228,7 +233,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_RLC_LS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -241,7 +246,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CP_LS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -255,7 +260,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, CG_GFX_OTHERS_MGCG_MASK); if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -275,7 +280,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_BIF_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -285,7 +290,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_BIF_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -298,7 +303,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_MC_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } @@ -309,7 +314,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_MC_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -322,7 +327,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_DRM_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -332,7 +337,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_DRM_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -345,7 +350,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_HDP_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } @@ -356,7 +361,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_HDP_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -369,7 +374,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_SDMA_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } @@ -380,7 +385,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_SDMA_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -393,7 +398,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_ROM_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr, msg, value)) + hwmgr, msg, value, NULL)) return -EINVAL; } break; @@ -423,8 +428,10 @@ int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable) if (enable) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GFX_CU_PG_ENABLE, - adev->gfx.cu_info.number); + adev->gfx.cu_info.number, + NULL); else return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_GFX_CU_PG_DISABLE); + PPSMC_MSG_GFX_CU_PG_DISABLE, + NULL); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 4795eb66b2b2..753cb2cf6b77 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -186,7 +186,7 @@ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) } if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL); return 0; } @@ -493,7 +493,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) { - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL); } /** @@ -979,7 +979,8 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)) return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_EnableVRHotGPIOInterrupt); + PPSMC_MSG_EnableVRHotGPIOInterrupt, + NULL); return 0; } @@ -996,7 +997,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->ulv_supported) - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL); return 0; } @@ -1006,7 +1007,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->ulv_supported) - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL); return 0; } @@ -1015,13 +1016,14 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON)) + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL)) PP_ASSERT_WITH_CODE(false, "Attempt to enable Master Deep Sleep switch failed!", return -EINVAL); } else { if (smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PPSMC_MSG_MASTER_DeepSleep_OFF, + NULL)) { PP_ASSERT_WITH_CODE(false, "Attempt to disable Master Deep Sleep switch failed!", return -EINVAL); @@ -1036,7 +1038,8 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { if (smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PPSMC_MSG_MASTER_DeepSleep_OFF, + NULL)) { PP_ASSERT_WITH_CODE(false, "Attempt to disable Master Deep Sleep switch failed!", return -EINVAL); @@ -1089,7 +1092,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) smu7_disable_sclk_vce_handshake(hwmgr); PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)), "Failed to enable SCLK DPM during DPM Start Function!", return -EINVAL); } @@ -1101,7 +1104,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_MCLKDPM_Enable)), + PPSMC_MSG_MCLKDPM_Enable, + NULL)), "Failed to enable MCLK DPM during DPM Start Function!", return -EINVAL); @@ -1172,7 +1176,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) if (0 == data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_PCIeDPM_Enable)), + PPSMC_MSG_PCIeDPM_Enable, + NULL)), "Failed to enable pcie DPM during DPM Start Function!", return -EINVAL); } @@ -1180,7 +1185,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Falcon_QuickTransition)) { PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_EnableACDCGPIOInterrupt)), + PPSMC_MSG_EnableACDCGPIOInterrupt, + NULL)), "Failed to enable AC DC GPIO Interrupt!", ); } @@ -1197,7 +1203,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to disable SCLK DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL); } /* disable MCLK dpm */ @@ -1205,7 +1211,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to disable MCLK DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL); } return 0; @@ -1226,7 +1232,8 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) if (!data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE( (smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_PCIeDPM_Disable) == 0), + PPSMC_MSG_PCIeDPM_Disable, + NULL) == 0), "Failed to disable pcie DPM during DPM Stop Function!", return -EINVAL); } @@ -1237,7 +1244,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) "Trying to disable voltage DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL); return 0; } @@ -1388,7 +1395,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay); + smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL); tmp_result = smu7_enable_sclk_control(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), @@ -1446,14 +1453,14 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr, PPSMC_MSG_EnableAvfs), + hwmgr, PPSMC_MSG_EnableAvfs, NULL), "Failed to enable AVFS!", return -EINVAL); } } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr, PPSMC_MSG_DisableAvfs), + hwmgr, PPSMC_MSG_DisableAvfs, NULL), "Failed to disable AVFS!", return -EINVAL); } @@ -2609,7 +2616,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) if (level) smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PCIeDPM_ForceLevel, level); + PPSMC_MSG_PCIeDPM_ForceLevel, level, + NULL); } } @@ -2623,7 +2631,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) if (level) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); + (1 << level), + NULL); } } @@ -2637,7 +2646,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) if (level) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); + (1 << level), + NULL); } } @@ -2656,14 +2666,16 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); + data->dpm_level_enable_mask.sclk_dpm_enable_mask, + NULL); } if (!data->mclk_dpm_key_disabled) { if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); + data->dpm_level_enable_mask.mclk_dpm_enable_mask, + NULL); } return 0; @@ -2678,7 +2690,8 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) if (!data->pcie_dpm_key_disabled) { smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_PCIeDPM_UnForceLevel); + PPSMC_MSG_PCIeDPM_UnForceLevel, + NULL); } return smu7_upload_dpm_level_enable_mask(hwmgr); @@ -2696,7 +2709,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) data->dpm_level_enable_mask.sclk_dpm_enable_mask); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); + (1 << level), + NULL); } @@ -2706,7 +2720,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) data->dpm_level_enable_mask.mclk_dpm_enable_mask); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); + (1 << level), + NULL); } } @@ -2716,7 +2731,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) data->dpm_level_enable_mask.pcie_dpm_enable_mask); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, - (level)); + (level), + NULL); } } @@ -3495,21 +3511,20 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) (adev->asic_type != CHIP_BONAIRE) && (adev->asic_type != CHIP_FIJI) && (adev->asic_type != CHIP_TONGA)) { - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); - tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp); *query = tmp; if (tmp != 0) return 0; } - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_PM_STATUS_95, 0); for (i = 0; i < 10; i++) { msleep(500); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL); tmp = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_PM_STATUS_95); @@ -3534,14 +3549,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk); *((uint32_t *)value) = sclk; *size = 4; return 0; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk); *((uint32_t *)value) = mclk; *size = 4; return 0; @@ -3730,7 +3743,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), + PPSMC_MSG_SCLKDPM_FreezeLevel, + NULL), "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", return -EINVAL); } @@ -3742,7 +3756,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), + PPSMC_MSG_MCLKDPM_FreezeLevel, + NULL), "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", return -EINVAL); } @@ -3884,7 +3899,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), + PPSMC_MSG_SCLKDPM_UnfreezeLevel, + NULL), "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", return -EINVAL); } @@ -3896,7 +3912,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_MCLKDPM_UnfreezeLevel), + PPSMC_MSG_MCLKDPM_UnfreezeLevel, + NULL), "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", return -EINVAL); } @@ -3949,12 +3966,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { if (hwmgr->chip_id == CHIP_VEGAM) smum_send_msg_to_smc_with_parameter(hwmgr, - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2); + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2, + NULL); else smum_send_msg_to_smc_with_parameter(hwmgr, - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2, + NULL); } - return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; + return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL; } static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) @@ -4040,7 +4059,8 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); + PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm, + NULL); } static int @@ -4048,7 +4068,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) { PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1; + return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ? 0 : -1; } static int @@ -4132,7 +4152,8 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); + PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm, + NULL); } static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { @@ -4262,14 +4283,14 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) if ((hwmgr->chip_id == CHIP_POLARIS10) || (hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL); } else { data->mem_latency_high = 330; data->mem_latency_low = 330; if ((hwmgr->chip_id == CHIP_POLARIS10) || (hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL); } return 0; @@ -4413,13 +4434,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, if (!data->sclk_dpm_key_disabled) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask, + NULL); break; case PP_MCLK: if (!data->mclk_dpm_key_disabled) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask, + NULL); break; case PP_PCIE: { @@ -4427,11 +4450,13 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, if (!data->pcie_dpm_key_disabled) { if (fls(tmp) != ffs(tmp)) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel, + NULL); else smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, - fls(tmp) - 1); + fls(tmp) - 1, + NULL); } break; } @@ -4457,8 +4482,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); for (i = 0; i < sclk_table->count; i++) { if (clock > sclk_table->dpm_levels[i].value) @@ -4473,8 +4497,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, (i == now) ? "*" : ""); break; case PP_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock); for (i = 0; i < mclk_table->count; i++) { if (clock > mclk_table->dpm_levels[i].value) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 58f5589aaf12..5d4971576111 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -887,7 +887,10 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) didt_block |= block_en << TCP_Enable_SHIFT; if (enable) - result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block); + result = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_Didt_Block_Function, + didt_block, + NULL); return result; } @@ -1009,7 +1012,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) if (hwmgr->chip_id == CHIP_POLARIS11) { result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_EnableDpmDidt)); + (uint16_t)(PPSMC_MSG_EnableDpmDidt), + NULL); PP_ASSERT_WITH_CODE((0 == result), "Failed to enable DPM DIDT.", goto error); } @@ -1042,7 +1046,8 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) goto error); if (hwmgr->chip_id == CHIP_POLARIS11) { result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_DisableDpmDidt)); + (uint16_t)(PPSMC_MSG_DisableDpmDidt), + NULL); PP_ASSERT_WITH_CODE((0 == result), "Failed to disable DPM DIDT.", goto error); } @@ -1063,7 +1068,8 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_CAC)) { int smc_result; smc_result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_EnableCac)); + (uint16_t)(PPSMC_MSG_EnableCac), + NULL); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable CAC in SMC.", result = -1); @@ -1079,7 +1085,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) { int smc_result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_DisableCac)); + (uint16_t)(PPSMC_MSG_DisableCac), + NULL); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable CAC in SMC.", result = -1); @@ -1095,7 +1102,9 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PkgPwrSetLimit, n<<8); + PPSMC_MSG_PkgPwrSetLimit, + n<<8, + NULL); return 0; } @@ -1103,7 +1112,9 @@ static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr, uint32_t target_tdp) { return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); + PPSMC_MSG_OverDriveSetTargetTdp, + target_tdp, + NULL); } int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) @@ -1124,7 +1135,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->enable_tdc_limit_feature) { smc_result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + (uint16_t)(PPSMC_MSG_TDCLimitEnable), + NULL); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable TDCLimit in SMC.", result = -1;); if (0 == smc_result) @@ -1134,7 +1146,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) if (data->enable_pkg_pwr_tracking_feature) { smc_result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable), + NULL); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable PkgPwrTracking in SMC.", result = -1;); if (0 == smc_result) { @@ -1163,7 +1176,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) { smc_result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + (uint16_t)(PPSMC_MSG_TDCLimitDisable), + NULL); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable TDCLimit in SMC.", result = smc_result); @@ -1172,7 +1186,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_DTE) { smc_result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_DisableDTE)); + (uint16_t)(PPSMC_MSG_DisableDTE), + NULL); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable DTE in SMC.", result = smc_result); @@ -1181,7 +1196,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { smc_result = smum_send_msg_to_smc(hwmgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable), + NULL); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable PkgPwrTracking in SMC.", result = smc_result); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index 5bdc0df5a9f4..0b30f73649a8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -151,8 +151,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) int result; if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl, + FAN_CONTROL_FUZZY, NULL); if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM)) hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, @@ -164,8 +164,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) advanceFanControlParameters.usMaxFanPWM); } else { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl, + FAN_CONTROL_TABLE, NULL); } if (!result && hwmgr->thermal_controller. @@ -173,7 +173,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, hwmgr->thermal_controller. - advanceFanControlParameters.ucTargetTemperature); + advanceFanControlParameters.ucTargetTemperature, + NULL); hwmgr->fan_ctrl_enabled = true; return result; @@ -183,7 +184,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) { hwmgr->fan_ctrl_enabled = false; - return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl); + return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl, NULL); } /** @@ -372,7 +373,7 @@ static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) CG_THERMAL_INT, THERM_INT_MASK, alert); /* send message to SMU to enable internal thermal interrupts */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable, NULL); } /** @@ -390,7 +391,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr) CG_THERMAL_INT, THERM_INT_MASK, alert); /* send message to SMU to disable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable, NULL); } /** diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 019d6a206492..a6c6a793e98e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -162,8 +162,10 @@ static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr) struct smu8_hwmgr *data = hwmgr->backend; if (data->max_sclk_level == 0) { - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); - data->max_sclk_level = smum_get_argument(hwmgr) + 1; + smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetMaxSclkLevel, + &data->max_sclk_level); + data->max_sclk_level += 1; } return data->max_sclk_level; @@ -580,7 +582,8 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) struct smu8_hwmgr *data = hwmgr->backend; struct phm_uvd_clock_voltage_dependency_table *table = hwmgr->dyn_state.uvd_clock_voltage_dependency_table; - unsigned long clock = 0, level; + unsigned long clock = 0; + uint32_t level; if (NULL == table || table->count <= 0) return -EINVAL; @@ -588,8 +591,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) data->uvd_dpm.soft_min_clk = 0; data->uvd_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); - level = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level); if (level < table->count) clock = table->entries[level].vclk; @@ -607,7 +609,8 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) struct smu8_hwmgr *data = hwmgr->backend; struct phm_vce_clock_voltage_dependency_table *table = hwmgr->dyn_state.vce_clock_voltage_dependency_table; - unsigned long clock = 0, level; + unsigned long clock = 0; + uint32_t level; if (NULL == table || table->count <= 0) return -EINVAL; @@ -615,8 +618,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) data->vce_dpm.soft_min_clk = 0; data->vce_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); - level = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level); if (level < table->count) clock = table->entries[level].ecclk; @@ -634,7 +636,8 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) struct smu8_hwmgr *data = hwmgr->backend; struct phm_acp_clock_voltage_dependency_table *table = hwmgr->dyn_state.acp_clock_voltage_dependency_table; - unsigned long clock = 0, level; + unsigned long clock = 0; + uint32_t level; if (NULL == table || table->count <= 0) return -EINVAL; @@ -642,8 +645,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) data->acp_dpm.soft_min_clk = 0; data->acp_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); - level = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level); if (level < table->count) clock = table->entries[level].acpclk; @@ -665,7 +667,7 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr) #ifdef CONFIG_DRM_AMD_ACP data->acp_power_gated = false; #else - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL); data->acp_power_gated = true; #endif @@ -708,7 +710,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetSclkHardMin, smu8_get_sclk_level(hwmgr, data->sclk_dpm.hard_min_clk, - PPSMC_MSG_SetSclkHardMin)); + PPSMC_MSG_SetSclkHardMin), + NULL); } clock = data->sclk_dpm.soft_min_clk; @@ -731,7 +734,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetSclkSoftMin, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); + PPSMC_MSG_SetSclkSoftMin), + NULL); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -742,7 +746,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetSclkSoftMax, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); + PPSMC_MSG_SetSclkSoftMax), + NULL); } return 0; @@ -760,7 +765,8 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepSclk, - clks); + clks, + NULL); } return 0; @@ -773,7 +779,8 @@ static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWatermarkFrequency, - data->sclk_dpm.soft_max_clk); + data->sclk_dpm.soft_max_clk, + NULL); return 0; } @@ -788,13 +795,15 @@ static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableLowMemoryPstate, - (lock ? 1 : 0)); + (lock ? 1 : 0), + NULL); } else { PP_DBG_LOG("disable Low Memory PState.\n"); return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableLowMemoryPstate, - (lock ? 1 : 0)); + (lock ? 1 : 0), + NULL); } } @@ -814,7 +823,8 @@ static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr) ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_DisableAllSmuFeatures, - dpm_features); + dpm_features, + NULL); if (ret == 0) data->is_nb_dpm_enabled = false; } @@ -835,7 +845,8 @@ static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr) ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_EnableAllSmuFeatures, - dpm_features); + dpm_features, + NULL); if (ret == 0) data->is_nb_dpm_enabled = true; } @@ -953,7 +964,8 @@ static int smu8_start_dpm(struct pp_hwmgr *hwmgr) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, - SCLK_DPM_MASK); + SCLK_DPM_MASK, + NULL); } static int smu8_stop_dpm(struct pp_hwmgr *hwmgr) @@ -967,7 +979,8 @@ static int smu8_stop_dpm(struct pp_hwmgr *hwmgr) data->dpm_flags &= ~DPMFlags_SCLK_Enabled; ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, - dpm_features); + dpm_features, + NULL); } return ret; } @@ -983,13 +996,15 @@ static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetSclkSoftMin, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); + PPSMC_MSG_SetSclkSoftMin), + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); + PPSMC_MSG_SetSclkSoftMax), + NULL); return 0; } @@ -1127,13 +1142,15 @@ static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetSclkSoftMin, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMin)); + PPSMC_MSG_SetSclkSoftMin), + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); + PPSMC_MSG_SetSclkSoftMax), + NULL); return 0; } @@ -1167,13 +1184,15 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetSclkSoftMin, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); + PPSMC_MSG_SetSclkSoftMin), + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); + PPSMC_MSG_SetSclkSoftMax), + NULL); return 0; } @@ -1186,13 +1205,15 @@ static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetSclkSoftMax, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMax)); + PPSMC_MSG_SetSclkSoftMax), + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, smu8_get_sclk_level(hwmgr, data->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); + PPSMC_MSG_SetSclkSoftMin), + NULL); return 0; } @@ -1227,7 +1248,7 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL); return 0; } @@ -1237,7 +1258,8 @@ static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) return smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_UVDPowerON, - PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0); + PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0, + NULL); } return 0; @@ -1259,15 +1281,20 @@ static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) PPSMC_MSG_SetEclkHardMin, smu8_get_eclk_level(hwmgr, data->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); + PPSMC_MSG_SetEclkHardMin), + NULL); } else { smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetEclkHardMin, 0); + PPSMC_MSG_SetEclkHardMin, + 0, + NULL); /* disable ECLK DPM 0. Otherwise VCE could hang if * switching SCLK from DPM 0 to 6/7 */ smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetEclkSoftMin, 1); + PPSMC_MSG_SetEclkSoftMin, + 1, + NULL); } return 0; } @@ -1276,7 +1303,8 @@ static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) { if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_VCEPowerOFF); + PPSMC_MSG_VCEPowerOFF, + NULL); return 0; } @@ -1284,7 +1312,8 @@ static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr) { if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_VCEPowerON); + PPSMC_MSG_VCEPowerON, + NULL); return 0; } @@ -1435,7 +1464,8 @@ static void smu8_hw_print_display_cfg( smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDisplaySizePowerParams, - data); + data, + NULL); } return 0; @@ -1497,10 +1527,12 @@ static int smu8_force_clock_level(struct pp_hwmgr *hwmgr, case PP_SCLK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, - mask); + mask, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, - mask); + mask, + NULL); break; default: break; @@ -1753,9 +1785,10 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = 0; return 0; case AMDGPU_PP_SENSOR_GPU_LOAD: - result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity); + result = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetAverageGraphicsActivity, + &activity_percent); if (0 == result) { - activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); activity_percent = activity_percent > 100 ? 100 : activity_percent; } else { activity_percent = 50; @@ -1785,20 +1818,25 @@ static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramAddrHiVirtual, - mc_addr_hi); + mc_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramAddrLoVirtual, - mc_addr_low); + mc_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramAddrHiPhysical, - virtual_addr_hi); + virtual_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramAddrLoPhysical, - virtual_addr_low); + virtual_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramBufferSize, - size); + size, + NULL); return 0; } @@ -1827,12 +1865,16 @@ static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) data->dpm_flags |= DPMFlags_UVD_Enabled; dpm_features |= UVD_DPM_MASK; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnableAllSmuFeatures, dpm_features); + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features, + NULL); } else { dpm_features |= UVD_DPM_MASK; data->dpm_flags &= ~DPMFlags_UVD_Enabled; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_DisableAllSmuFeatures, dpm_features); + PPSMC_MSG_DisableAllSmuFeatures, + dpm_features, + NULL); } return 0; } @@ -1854,7 +1896,8 @@ int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) PPSMC_MSG_SetUvdHardMin, smu8_get_uvd_level(hwmgr, data->uvd_dpm.hard_min_clk, - PPSMC_MSG_SetUvdHardMin)); + PPSMC_MSG_SetUvdHardMin), + NULL); smu8_enable_disable_uvd_dpm(hwmgr, true); } else { @@ -1878,12 +1921,16 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) data->dpm_flags |= DPMFlags_VCE_Enabled; dpm_features |= VCE_DPM_MASK; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnableAllSmuFeatures, dpm_features); + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features, + NULL); } else { dpm_features |= VCE_DPM_MASK; data->dpm_flags &= ~DPMFlags_VCE_Enabled; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_DisableAllSmuFeatures, dpm_features); + PPSMC_MSG_DisableAllSmuFeatures, + dpm_features, + NULL); } return 0; @@ -1898,9 +1945,9 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate) return; if (bgate) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL); else - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL); } static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index d09690fca452..91b5d96db674 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -557,7 +557,9 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) if (req_vddc <= vddc_table->entries[i].vddc) { req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_VddC_Request, req_volt); + PPSMC_MSG_VddC_Request, + req_volt, + NULL); return; } } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c index d168af4a4d78..46bb16c29cf6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c @@ -98,7 +98,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) if (state == BACO_STATE_IN) { if (soc15_baco_program_registers(hwmgr, pre_baco_tbl, ARRAY_SIZE(pre_baco_tbl))) { - if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco)) + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco, NULL)) return -EINVAL; if (soc15_baco_program_registers(hwmgr, enter_baco_tbl, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f29f95be1e56..675c7cab7cfc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -484,8 +484,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (data->registry_data.vr0hot_enabled) data->smu_features[GNLD_VR0HOT].supported = true; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); - hwmgr->smu_version = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetSmuVersion, + &hwmgr->smu_version); /* ACG firmware has major version 5 */ if ((hwmgr->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; @@ -503,10 +504,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_PCC_LIMIT].supported = true; /* Get the SN to turn into a Unique ID */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); - top32 = smum_get_argument(hwmgr); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); - bottom32 = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); adev->unique_id = ((uint64_t)bottom32 << 32) | top32; } @@ -993,7 +992,10 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) "Failed to set up led dpm config!", return -EINVAL); - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_NumOfDisplays, + 0, + NULL); return 0; } @@ -2303,16 +2305,15 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc); - agc_btc_response = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response); if (1 == agc_btc_response) { if (1 == data->acg_loop_state) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL); else if (2 == data->acg_loop_state) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL); if (0 == vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_ACG].smu_feature_bitmap)) data->smu_features[GNLD_ACG].enabled = true; @@ -2429,11 +2430,9 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = hwmgr->backend; AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); - top32 = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); - bottom32 = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); serial_number = ((uint64_t)bottom32 << 32) | top32; @@ -2610,14 +2609,16 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) if (0 != boot_up_values.usVddc) { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFloorSocVoltage, - (boot_up_values.usVddc * 4)); + (boot_up_values.usVddc * 4), + NULL); data->vbios_boot_state.bsoc_vddc_lock = true; } else { data->vbios_boot_state.bsoc_vddc_lock = false; } smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); + (uint32_t)(data->vbios_boot_state.dcef_clock / 100), + NULL); } result = vega10_populate_avfs_parameters(hwmgr); @@ -2904,7 +2905,8 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) if (data->vbios_boot_state.bsoc_vddc_lock) { smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetFloorSocVoltage, 0); + PPSMC_MSG_SetFloorSocVoltage, 0, + NULL); data->vbios_boot_state.bsoc_vddc_lock = false; } @@ -2947,7 +2949,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) vega10_enable_disable_PCC_limit_feature(hwmgr, true); smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry, + NULL); tmp_result = vega10_construct_voltage_tables(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, @@ -3528,7 +3531,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) data->dpm_table.gfx_table.dpm_state.soft_min_level) { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinGfxclkByIndex, - data->smc_state_table.gfx_boot_level); + data->smc_state_table.gfx_boot_level, + NULL); data->dpm_table.gfx_table.dpm_state.soft_min_level = data->smc_state_table.gfx_boot_level; @@ -3543,11 +3547,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinSocclkByIndex, - socclk_idx); + socclk_idx, + NULL); } else { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, - data->smc_state_table.mem_boot_level); + data->smc_state_table.mem_boot_level, + NULL); } data->dpm_table.mem_table.dpm_state.soft_min_level = data->smc_state_table.mem_boot_level; @@ -3562,7 +3568,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) data->dpm_table.soc_table.dpm_state.soft_min_level) { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinSocclkByIndex, - data->smc_state_table.soc_boot_level); + data->smc_state_table.soc_boot_level, + NULL); data->dpm_table.soc_table.dpm_state.soft_min_level = data->smc_state_table.soc_boot_level; } @@ -3582,7 +3589,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) data->dpm_table.gfx_table.dpm_state.soft_max_level) { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxclkByIndex, - data->smc_state_table.gfx_max_level); + data->smc_state_table.gfx_max_level, + NULL); data->dpm_table.gfx_table.dpm_state.soft_max_level = data->smc_state_table.gfx_max_level; } @@ -3593,7 +3601,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) data->dpm_table.mem_table.dpm_state.soft_max_level) { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxUclkByIndex, - data->smc_state_table.mem_max_level); + data->smc_state_table.mem_max_level, + NULL); data->dpm_table.mem_table.dpm_state.soft_max_level = data->smc_state_table.mem_max_level; } @@ -3607,7 +3616,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) data->dpm_table.soc_table.dpm_state.soft_max_level) { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByIndex, - data->smc_state_table.soc_max_level); + data->smc_state_table.soc_max_level, + NULL); data->dpm_table.soc_table.dpm_state.soft_max_level = data->smc_state_table.soc_max_level; } @@ -3694,7 +3704,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) /* This message will also enable SmcToHost Interrupt */ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetLowGfxclkInterruptThreshold, - (uint32_t)low_sclk_interrupt_threshold); + (uint32_t)low_sclk_interrupt_threshold, + NULL); } return 0; @@ -3801,8 +3812,7 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, if (!query) return -EINVAL; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr); - value = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value); /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */ *query = value << 8; @@ -3822,13 +3832,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency); - sclk_mhz = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz); *((uint32_t *)value) = sclk_mhz * 100; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); - mclk_idx = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx); if (mclk_idx < dpm_table->mem_table.count) { *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; *size = 4; @@ -3837,8 +3845,8 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, } break; case AMDGPU_PP_SENSOR_GPU_LOAD: - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); - activity_percent = smum_get_argument(hwmgr); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0, + &activity_percent); *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; *size = 4; break; @@ -3847,14 +3855,14 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot); - *((uint32_t *)value) = smum_get_argument(hwmgr) * + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value); + *((uint32_t *)value) = *((uint32_t *)value) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; *size = 4; break; case AMDGPU_PP_SENSOR_MEM_TEMP: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM); - *((uint32_t *)value) = smum_get_argument(hwmgr) * + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value); + *((uint32_t *)value) = *((uint32_t *)value) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; *size = 4; break; @@ -3893,7 +3901,8 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, - has_disp ? 1 : 0); + has_disp ? 1 : 0, + NULL); } int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, @@ -3928,7 +3937,8 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, clk_request = (clk_freq << 16) | clk_select; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_RequestDisplayClockByFreq, - clk_request); + clk_request, + NULL); } return result; @@ -3990,7 +4000,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcefClockInSR / 100); + min_clocks.dcefClockInSR / 100, + NULL); } else { pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); } @@ -4000,7 +4011,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( if (min_clocks.memoryClock != 0) { idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock); - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx, + NULL); data->dpm_table.mem_table.dpm_state.soft_min_level= idx; } @@ -4541,8 +4553,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.sclk_dpm_key_disabled) break; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); - now = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now); if (hwmgr->pp_one_vf && (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) @@ -4558,8 +4569,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.mclk_dpm_key_disabled) break; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); - now = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); for (i = 0; i < mclk_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -4570,8 +4580,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.socclk_dpm_key_disabled) break; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex); - now = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); for (i = 0; i < soc_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -4583,8 +4592,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, break; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK); - now = smum_get_argument(hwmgr); + PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); for (i = 0; i < dcef_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -4593,8 +4601,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, "*" : ""); break; case PP_PCIE: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex); - now = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); for (i = 0; i < pcie_table->count; i++) size += sprintf(buf + size, "%d: %s %s\n", i, @@ -4658,7 +4665,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if (data->water_marks_bitmap & WaterMarksLoaded) { smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display, + NULL); } return result; @@ -4924,21 +4932,26 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSystemVirtualDramAddrHigh, - virtual_addr_hi); + virtual_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSystemVirtualDramAddrLow, - virtual_addr_low); + virtual_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramAddrHigh, - mc_addr_hi); + mc_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramAddrLow, - mc_addr_low); + mc_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramSize, - size); + size, + NULL); return 0; } @@ -5040,12 +5053,14 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetCustomGfxDpmParameters, busy_set_point | FPS<<8 | - use_rlc_busy << 16 | min_active_level<<24); + use_rlc_busy << 16 | min_active_level<<24, + NULL); } out: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, - 1 << power_profile_mode); + 1 << power_profile_mode, + NULL); hwmgr->power_profile_mode = power_profile_mode; return 0; @@ -5302,7 +5317,7 @@ static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr, return 0; } - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, "[PrepareMp1] Failed!", return ret); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 0a677d4bc87b..9757d47dd6b8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -651,18 +651,6 @@ static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] = -{ -/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - * Offset Mask Shift Value - * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - */ - /* SQ EDC THRESHOLD */ - { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 }, - - { 0xFFFFFFFF } /* End of list */ -}; - static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -707,17 +695,6 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] = -{ -/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - * Offset Mask Shift Value - * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - */ - { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 }, - - { 0xFFFFFFFF } /* End of list */ -}; - static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -925,7 +902,8 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) /* For Vega10, SMC does not support any mask yet. */ if (enable) - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info, + NULL); } @@ -1327,7 +1305,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) if (data->registry_data.enable_pkg_pwr_tracking_feature) smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetPptLimit, n); + PPSMC_MSG_SetPptLimit, n, + NULL); return 0; } @@ -1393,7 +1372,8 @@ static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, uint32_t adjust_percent) { smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_OverDriveSetPercentage, adjust_percent); + PPSMC_MSG_OverDriveSetPercentage, adjust_percent, + NULL); } int vega10_power_control_set_level(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index ba8763daa380..7783c7fd7ccb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -31,8 +31,7 @@ static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) { - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm); - *current_rpm = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm, current_rpm); return 0; } @@ -520,7 +519,8 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, - (uint32_t)table->FanTargetTemperature); + (uint32_t)table->FanTargetTemperature, + NULL); table->FanPwmMin = hwmgr->thermal_controller. advanceFanControlParameters.usPWMMin * 255 / 100; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c index 9d8ca94a8f0c..bc53cce4f32d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c @@ -96,7 +96,7 @@ int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) if (state == BACO_STATE_IN) { if (soc15_baco_program_registers(hwmgr, pre_baco_tbl, ARRAY_SIZE(pre_baco_tbl))) { - if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0)) + if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL)) return -EINVAL; if (soc15_baco_program_registers(hwmgr, enter_baco_tbl, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index aca61d1ff3c2..f4d1692cccf3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -357,10 +357,8 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr) } /* Get the SN to turn into a Unique ID */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); - top32 = smum_get_argument(hwmgr); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); - bottom32 = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); adev->unique_id = ((uint64_t)bottom32 << 32) | top32; } @@ -483,16 +481,12 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + num_of_levels); PP_ASSERT_WITH_CODE(!ret, "[GetNumOfDpmLevel] failed to get dpm levels!", return ret); - *num_of_levels = smum_get_argument(hwmgr); - PP_ASSERT_WITH_CODE(*num_of_levels > 0, - "[GetNumOfDpmLevel] number of clk levels is invalid!", - return -EINVAL); - return ret; } @@ -504,12 +498,11 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, *Lower 16 bits specify the level */ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0, + PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index), + clock) == 0, "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!", return -EINVAL); - *clock = smum_get_argument(hwmgr); - return 0; } @@ -749,7 +742,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.vclock = boot_up_values.ulVClk; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); + (uint32_t)(data->vbios_boot_state.dcef_clock / 100), + NULL); } memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); @@ -767,11 +761,10 @@ static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) uint32_t result; PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0, "[Run_ACG_BTC] Attempt to run ACG BTC failed!", return -EINVAL); - result = smum_get_argument(hwmgr); PP_ASSERT_WITH_CODE(result == 1, "Failed to run ACG BTC!", return -EINVAL); @@ -792,12 +785,14 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF)); PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, + NULL) == 0, "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!", return -1); PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, + NULL) == 0, "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", return -1); @@ -828,7 +823,7 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr) bool enabled; PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0, "[EnableAllSMUFeatures] Failed to enable all smu features!", return -1); @@ -854,7 +849,7 @@ static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr) bool enabled; PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0, "[DisableAllSMUFeatures] Failed to disable all smu features!", return -1); @@ -879,7 +874,8 @@ static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, uint32_t adjust_percent) { return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_OverDriveSetPercentage, adjust_percent); + PPSMC_MSG_OverDriveSetPercentage, adjust_percent, + NULL); } static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr) @@ -902,24 +898,24 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr, { /* AC Max */ PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16), + &(clock->ACMax)) == 0, "[GetClockRanges] Failed to get max ac clock from SMC!", return -EINVAL); - clock->ACMax = smum_get_argument(hwmgr); /* AC Min */ PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16), + &(clock->ACMin)) == 0, "[GetClockRanges] Failed to get min ac clock from SMC!", return -EINVAL); - clock->ACMin = smum_get_argument(hwmgr); /* DC Max */ PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16), + &(clock->DCMax)) == 0, "[GetClockRanges] Failed to get max dc clock from SMC!", return -EINVAL); - clock->DCMax = smum_get_argument(hwmgr); return 0; } @@ -944,7 +940,7 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) int tmp_result, result = 0; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, 0); + PPSMC_MSG_NumOfDisplays, 0, NULL); result = vega12_set_allowed_featuresmask(hwmgr); PP_ASSERT_WITH_CODE(result == 0, @@ -1043,7 +1039,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))), + (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min gfxclk !", return ret); } @@ -1052,14 +1049,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), + (PPCLK_UCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min memclk !", return ret); min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), + (PPCLK_UCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set hard min memclk !", return ret); } @@ -1069,7 +1068,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_VCLK << 16) | (min_freq & 0xffff))), + (PPCLK_VCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min vclk!", return ret); @@ -1077,7 +1077,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_DCLK << 16) | (min_freq & 0xffff))), + (PPCLK_DCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min dclk!", return ret); } @@ -1087,7 +1088,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_ECLK << 16) | (min_freq & 0xffff))), + (PPCLK_ECLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min eclk!", return ret); } @@ -1097,7 +1099,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))), + (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min socclk!", return ret); } @@ -1107,7 +1110,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))), + (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set hard min dcefclk!", return ret); } @@ -1127,7 +1131,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))), + (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max gfxclk!", return ret); } @@ -1137,7 +1142,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_UCLK << 16) | (max_freq & 0xffff))), + (PPCLK_UCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max memclk!", return ret); } @@ -1147,14 +1153,16 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_VCLK << 16) | (max_freq & 0xffff))), + (PPCLK_VCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max vclk!", return ret); max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_DCLK << 16) | (max_freq & 0xffff))), + (PPCLK_DCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max dclk!", return ret); } @@ -1164,7 +1172,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_ECLK << 16) | (max_freq & 0xffff))), + (PPCLK_ECLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max eclk!", return ret); } @@ -1174,7 +1183,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))), + (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max socclk!", return ret); } @@ -1287,10 +1297,10 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx *gfx_freq = 0; PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16), + &gfx_clk) == 0, "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", return -EINVAL); - gfx_clk = smum_get_argument(hwmgr); *gfx_freq = gfx_clk * 100; @@ -1304,10 +1314,10 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f *mclk_freq = 0; PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16), + &mem_clk) == 0, "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", return -EINVAL); - mem_clk = smum_get_argument(hwmgr); *mclk_freq = mem_clk * 100; @@ -1420,7 +1430,8 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr, if (data->smu_features[GNLD_DPM_UCLK].enabled) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, - has_disp ? 1 : 0); + has_disp ? 1 : 0, + NULL); return 0; } @@ -1459,7 +1470,8 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr, clk_request = (clk_select << 16) | clk_freq; result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - clk_request); + clk_request, + NULL); } } @@ -1493,7 +1505,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( PP_ASSERT_WITH_CODE( !smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcefClockInSR /100), + min_clocks.dcefClockInSR /100, + NULL), "Attempt to set divider for DCEFCLK Failed!", return -1); } else { @@ -2124,10 +2137,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, case PP_SOCCLK: PP_ASSERT_WITH_CODE( smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16), + &now) == 0, "Attempt to get Current SOCCLK Frequency Failed!", return -EINVAL); - now = smum_get_argument(hwmgr); PP_ASSERT_WITH_CODE( vega12_get_socclocks(hwmgr, &clocks) == 0, @@ -2142,10 +2155,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, case PP_DCEFCLK: PP_ASSERT_WITH_CODE( smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16), + &now) == 0, "Attempt to get Current DCEFCLK Frequency Failed!", return -EINVAL); - now = smum_get_argument(hwmgr); PP_ASSERT_WITH_CODE( vega12_get_dcefclocks(hwmgr, &clocks) == 0, @@ -2343,7 +2356,8 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, + NULL)), "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", return ret); } @@ -2357,7 +2371,8 @@ static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) int ret = 0; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, 0); + PPSMC_MSG_NumOfDisplays, 0, + NULL); ret = vega12_set_uclk_to_highest_dpm_level(hwmgr, &data->dpm_table.mem_table); @@ -2383,7 +2398,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_DPM_DCEFCLK].supported && data->smu_features[GNLD_DPM_SOCCLK].supported) smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display, + NULL); return result; } @@ -2555,21 +2571,26 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSystemVirtualDramAddrHigh, - virtual_addr_hi); + virtual_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSystemVirtualDramAddrLow, - virtual_addr_low); + virtual_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramAddrHigh, - mc_addr_hi); + mc_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramAddrLow, - mc_addr_low); + mc_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramSize, - size); + size, + NULL); return 0; } @@ -2605,7 +2626,7 @@ static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr) int ret = 0; if (data->gfxoff_controlled_by_driver) - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL); return ret; } @@ -2617,7 +2638,7 @@ static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr) int ret = 0; if (data->gfxoff_controlled_by_driver) - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL); return ret; } @@ -2654,7 +2675,7 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr, return 0; } - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, "[PrepareMp1] Failed!", return ret); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index 904eb2c9155b..c85806a6f62e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -32,10 +32,10 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetCurrentRpm), + PPSMC_MSG_GetCurrentRpm, + current_rpm), "Attempt to get current RPM from SMC Failed!", return -EINVAL); - *current_rpm = smum_get_argument(hwmgr); return 0; } @@ -259,7 +259,8 @@ int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, - (uint32_t)table->FanTargetTemperature); + (uint32_t)table->FanTargetTemperature, + NULL); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c index 9b5e72bdceca..2a28c9df15a0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c @@ -91,16 +91,16 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); if(smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnterBaco, 0)) + PPSMC_MSG_EnterBaco, 0, NULL)) return -EINVAL; } else { if(smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnterBaco, 1)) + PPSMC_MSG_EnterBaco, 1, NULL)) return -EINVAL; } } else if (state == BACO_STATE_OUT) { - if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco)) + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco, NULL)) return -EINVAL; if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl, ARRAY_SIZE(clean_baco_tbl))) @@ -118,5 +118,5 @@ int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr) if (ret) return ret; - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI, NULL); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 08b6ba39a6d7..9ff470f1b826 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -92,8 +92,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) */ data->registry_data.disallowed_features = 0xE0041C00; /* ECC feature should be disabled on old SMUs */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); - hwmgr->smu_version = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); if (hwmgr->smu_version < 0x282100) data->registry_data.disallowed_features |= FEATURE_ECC_MASK; @@ -400,10 +399,8 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) } /* Get the SN to turn into a Unique ID */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); - top32 = smum_get_argument(hwmgr); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); - bottom32 = smum_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); adev->unique_id = ((uint64_t)bottom32 << 32) | top32; } @@ -527,16 +524,12 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + num_of_levels); PP_ASSERT_WITH_CODE(!ret, "[GetNumOfDpmLevel] failed to get dpm levels!", return ret); - *num_of_levels = smum_get_argument(hwmgr); - PP_ASSERT_WITH_CODE(*num_of_levels > 0, - "[GetNumOfDpmLevel] number of clk levels is invalid!", - return -EINVAL); - return ret; } @@ -547,16 +540,12 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmFreqByIndex, - (clk_id << 16 | index)); + (clk_id << 16 | index), + clk); PP_ASSERT_WITH_CODE(!ret, "[GetDpmFreqByIndex] failed to get dpm freq by index!", return ret); - *clk = smum_get_argument(hwmgr); - PP_ASSERT_WITH_CODE(*clk, - "[GetDpmFreqByIndex] clk value is invalid!", - return -EINVAL); - return ret; } @@ -813,7 +802,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); + (uint32_t)(data->vbios_boot_state.dcef_clock / 100), + NULL); memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); @@ -868,7 +858,8 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) */ smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_OverridePcieParameters, smu_pcie_arg); + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); PP_ASSERT_WITH_CODE(!ret, "[OverridePcieParameters] Attempt to override pcie params failed!", return ret); @@ -899,13 +890,13 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) & 0xFFFFFFFF)); ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high); + PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL); PP_ASSERT_WITH_CODE(!ret, "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!", return ret); ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low); + PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL); PP_ASSERT_WITH_CODE(!ret, "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", return ret); @@ -915,12 +906,12 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) static int vega20_run_btc(struct pp_hwmgr *hwmgr) { - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL); } static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) { - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL); } static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) @@ -933,7 +924,8 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) int ret = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_EnableAllSmuFeatures)) == 0, + PPSMC_MSG_EnableAllSmuFeatures, + NULL)) == 0, "[EnableAllSMUFeatures] Failed to enable all smu features!", return ret); @@ -966,7 +958,8 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_DPM_UCLK].enabled) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, - 1); + 1, + NULL); return 0; } @@ -978,7 +971,8 @@ static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFclkGfxClkRatio, - data->registry_data.fclk_gfxclk_ratio); + data->registry_data.fclk_gfxclk_ratio, + NULL); } static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) @@ -991,7 +985,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) int ret = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_DisableAllSmuFeatures)) == 0, + PPSMC_MSG_DisableAllSmuFeatures, + NULL)) == 0, "[DisableAllSMUFeatures] Failed to disable all smu features!", return ret); @@ -1199,12 +1194,12 @@ static int vega20_od8_get_gfx_clock_base_voltage( ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAVFSVoltageByDpm, - ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq)); + ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), + voltage); PP_ASSERT_WITH_CODE(!ret, "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", return ret); - *voltage = smum_get_argument(hwmgr); *voltage = *voltage / VOLTAGE_SCALE; return 0; @@ -1560,19 +1555,19 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, - (clock_select << 16))) == 0, + (clock_select << 16), + clock)) == 0, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", return ret); - *clock = smum_get_argument(hwmgr); /* if DC limit is zero, return AC limit */ if (*clock == 0) { PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, - (clock_select << 16))) == 0, + (clock_select << 16), + clock)) == 0, "[GetMaxSustainableClock] failed to get max AC clock from SMC!", return ret); - *clock = smum_get_argument(hwmgr); } return 0; @@ -1641,7 +1636,8 @@ static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr) int result; result = smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SetMGpuFanBoostLimitRpm); + PPSMC_MSG_SetMGpuFanBoostLimitRpm, + NULL); PP_ASSERT_WITH_CODE(!result, "[EnableMgpuFan] Failed to enable mgpu fan boost!", return result); @@ -1669,7 +1665,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) int result = 0; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, 0); + PPSMC_MSG_NumOfDisplays, 0, NULL); result = vega20_set_allowed_featuresmask(hwmgr); PP_ASSERT_WITH_CODE(!result, @@ -1740,12 +1736,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) return result); result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, - POWER_SOURCE_AC << 16); + POWER_SOURCE_AC << 16, &hwmgr->default_power_limit); PP_ASSERT_WITH_CODE(!result, "[GetPptLimit] get default PPT limit failed!", return result); hwmgr->power_limit = - hwmgr->default_power_limit = smum_get_argument(hwmgr); + hwmgr->default_power_limit; return 0; } @@ -1806,7 +1802,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))), + (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min gfxclk !", return ret); } @@ -1816,7 +1813,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), + (PPCLK_UCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min memclk !", return ret); } @@ -1827,7 +1825,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_VCLK << 16) | (min_freq & 0xffff))), + (PPCLK_VCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min vclk!", return ret); @@ -1835,7 +1834,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_DCLK << 16) | (min_freq & 0xffff))), + (PPCLK_DCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min dclk!", return ret); } @@ -1846,7 +1846,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_ECLK << 16) | (min_freq & 0xffff))), + (PPCLK_ECLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min eclk!", return ret); } @@ -1857,7 +1858,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))), + (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min socclk!", return ret); } @@ -1868,7 +1870,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_FCLK << 16) | (min_freq & 0xffff))), + (PPCLK_FCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set soft min fclk!", return ret); } @@ -1879,7 +1882,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))), + (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), + NULL)), "Failed to set hard min dcefclk!", return ret); } @@ -1900,7 +1904,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))), + (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max gfxclk!", return ret); } @@ -1911,7 +1916,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_UCLK << 16) | (max_freq & 0xffff))), + (PPCLK_UCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max memclk!", return ret); } @@ -1922,14 +1928,16 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_VCLK << 16) | (max_freq & 0xffff))), + (PPCLK_VCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max vclk!", return ret); max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_DCLK << 16) | (max_freq & 0xffff))), + (PPCLK_DCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max dclk!", return ret); } @@ -1940,7 +1948,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_ECLK << 16) | (max_freq & 0xffff))), + (PPCLK_ECLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max eclk!", return ret); } @@ -1951,7 +1960,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))), + (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max socclk!", return ret); } @@ -1962,7 +1972,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - (PPCLK_FCLK << 16) | (max_freq & 0xffff))), + (PPCLK_FCLK << 16) | (max_freq & 0xffff), + NULL)), "Failed to set soft max fclk!", return ret); } @@ -2006,17 +2017,17 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr, if (max) { PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0, + PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16), + clock)) == 0, "[GetClockRanges] Failed to get max clock from SMC!", return ret); - *clock = smum_get_argument(hwmgr); } else { PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, - (clock_select << 16))) == 0, + (clock_select << 16), + clock)) == 0, "[GetClockRanges] Failed to get min clock from SMC!", return ret); - *clock = smum_get_argument(hwmgr); } return 0; @@ -2122,10 +2133,10 @@ static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr, *clk_freq = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0, + PPSMC_MSG_GetDpmClockFreq, (clk_id << 16), + clk_freq)) == 0, "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", return ret); - *clk_freq = smum_get_argument(hwmgr); *clk_freq = *clk_freq * 100; @@ -2276,7 +2287,8 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, clk_request = (clk_select << 16) | clk_freq; result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - clk_request); + clk_request, + NULL); } } @@ -2312,7 +2324,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( if (data->smu_features[GNLD_DS_DCEFCLK].supported) PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcefClockInSR / 100)) == 0, + min_clocks.dcefClockInSR / 100, + NULL)) == 0, "Attempt to set divider for DCEFCLK Failed!", return ret); } else { @@ -2324,7 +2337,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, + NULL)), "[SetHardMinFreq] Set hard min uclk failed!", return ret); } @@ -2656,7 +2670,8 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, return -EINVAL; ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level); + PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level, + NULL); PP_ASSERT_WITH_CODE(!ret, "Failed to set min link dpm level!", return ret); @@ -3140,7 +3155,7 @@ static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr, return 0; } - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, "[PrepareMp1] Failed!", return ret); @@ -3495,7 +3510,8 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, + NULL)), "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", return ret); } @@ -3520,7 +3536,8 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)), + (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level, + NULL)), "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", return ret); } @@ -3534,7 +3551,7 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) int ret = 0; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, 0); + PPSMC_MSG_NumOfDisplays, 0, NULL); ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, &data->dpm_table.mem_table); @@ -3565,7 +3582,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_DPM_SOCCLK].supported) { result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, - hwmgr->display_config->num_display); + hwmgr->display_config->num_display, + NULL); } return result; @@ -4082,7 +4100,8 @@ out: workload_type = conv_power_profile_to_pplib_workload(power_profile_mode); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, + NULL); hwmgr->power_profile_mode = power_profile_mode; @@ -4098,21 +4117,26 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSystemVirtualDramAddrHigh, - virtual_addr_hi); + virtual_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSystemVirtualDramAddrLow, - virtual_addr_low); + virtual_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramAddrHigh, - mc_addr_hi); + mc_addr_hi, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramAddrLow, - mc_addr_low); + mc_addr_low, + NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DramLogSetDramSize, - size); + size, + NULL); return 0; } @@ -4153,7 +4177,8 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) (acquire ? PPSMC_MSG_RequestI2CBus : PPSMC_MSG_ReleaseI2CBus), - 0); + 0, + NULL); PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res); return res; @@ -4170,7 +4195,8 @@ static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, return -EINVAL; } - ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state); + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state, + NULL); if (ret) pr_err("SetDfCstate failed!\n"); @@ -4184,7 +4210,8 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetXgmiMode, - pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, + NULL); if (ret) pr_err("SetXgmiPstate failed!\n"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c index a0bfb65cc5d6..d7cc3d2d9e17 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c @@ -36,7 +36,8 @@ int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) if (data->smu_features[GNLD_PPT].enabled) return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetPptLimit, n); + PPSMC_MSG_SetPptLimit, n, + NULL); return 0; } @@ -51,7 +52,8 @@ static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, uint32_t adjust_percent) { return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_OverDriveSetPercentage, adjust_percent); + PPSMC_MSG_OverDriveSetPercentage, adjust_percent, + NULL); } int vega20_power_control_set_level(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c index ede54e87e287..7add2f60f49c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c @@ -106,10 +106,10 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) int ret = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetCurrentRpm)) == 0, + PPSMC_MSG_GetCurrentRpm, + current_rpm)) == 0, "Attempt to get current RPM from SMC Failed!", return ret); - *current_rpm = smum_get_argument(hwmgr); return 0; } @@ -329,7 +329,8 @@ static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, - (uint32_t)table->FanTargetTemperature); + (uint32_t)table->FanTargetTemperature, + NULL); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index ae2c318dd6fa..928eed220f93 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -405,7 +405,9 @@ struct smu_context bool pm_enabled; bool is_apu; - uint32_t smc_if_version; + uint32_t smc_driver_if_version; + uint32_t smc_fw_if_version; + uint32_t smc_fw_version; bool uploading_custom_pp_table; bool dc_controlled_by_gpio; @@ -580,11 +582,6 @@ int smu_check_fw_status(struct smu_context *smu); int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); -#define smu_i2c_eeprom_init(smu, control) \ - ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL) -#define smu_i2c_eeprom_fini(smu, control) \ - ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL) - int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); int smu_get_power_limit(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 2ffb666b97e6..15ed6cbdf366 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -743,6 +743,7 @@ struct pp_hwmgr { bool pm_en; bool pp_one_vf; struct mutex smu_lock; + struct mutex msg_lock; uint32_t pp_table_version; void *device; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h index ce5b5011c122..8b82059d97e7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h @@ -82,8 +82,8 @@ // Other #define FEATURE_OUT_OF_BAND_MONITOR_BIT 24 #define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25 +#define FEATURE_PER_PART_VMIN_BIT 26 -#define FEATURE_SPARE_26_BIT 26 #define FEATURE_SPARE_27_BIT 27 #define FEATURE_SPARE_28_BIT 28 #define FEATURE_SPARE_29_BIT 29 @@ -154,6 +154,7 @@ #define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT ) #define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT ) +#define FEATURE_PER_PART_VMIN_MASK (1 << FEATURE_PER_PART_VMIN_BIT ) //FIXME need updating @@ -628,8 +629,14 @@ typedef struct { uint16_t BasePerformanceFrequencyCap; //In Mhz uint16_t MaxPerformanceFrequencyCap; //In Mhz + // Per-Part Vmin + uint16_t VDDGFX_VminLow; // mv Q2 + uint16_t VDDGFX_TVminLow; //Celcius + uint16_t VDDGFX_VminLow_HiTemp; // mv Q2 + uint16_t VDDGFX_VminLow_LoTemp; // mv Q2 + // SECTION: Reserved - uint32_t Reserved[9]; + uint32_t Reserved[7]; // SECTION: BOARD PARAMETERS @@ -869,6 +876,10 @@ typedef struct { uint8_t Mem_DownHystLimit; uint16_t Mem_Fps; + uint32_t BusyThreshold; // Q16 + uint32_t BusyHyst; + uint32_t IdleHyst; + uint32_t MmHubPadding[8]; // SMU internal use } DpmActivityMonitorCoeffInt_t; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h index 2f85a34c0591..e9315eb5b48e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU12_DRIVER_IF_VERSION 11 +#define SMU12_DRIVER_IF_VERSION 14 typedef struct { int32_t value; @@ -154,15 +154,19 @@ typedef enum { } CLOCK_IDs_e; // Throttler Status Bitmask -#define THROTTLER_STATUS_BIT_SPL 0 -#define THROTTLER_STATUS_BIT_FPPT 1 -#define THROTTLER_STATUS_BIT_SPPT 2 -#define THROTTLER_STATUS_BIT_SPPT_APU 3 -#define THROTTLER_STATUS_BIT_THM_CORE 4 -#define THROTTLER_STATUS_BIT_THM_GFX 5 -#define THROTTLER_STATUS_BIT_THM_SOC 6 -#define THROTTLER_STATUS_BIT_TDC_VDD 7 -#define THROTTLER_STATUS_BIT_TDC_SOC 8 +#define THROTTLER_STATUS_BIT_SPL 0 +#define THROTTLER_STATUS_BIT_FPPT 1 +#define THROTTLER_STATUS_BIT_SPPT 2 +#define THROTTLER_STATUS_BIT_SPPT_APU 3 +#define THROTTLER_STATUS_BIT_THM_CORE 4 +#define THROTTLER_STATUS_BIT_THM_GFX 5 +#define THROTTLER_STATUS_BIT_THM_SOC 6 +#define THROTTLER_STATUS_BIT_TDC_VDD 7 +#define THROTTLER_STATUS_BIT_TDC_SOC 8 +#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9 +#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10 +#define THROTTLER_STATUS_BIT_EDC_CPU 11 +#define THROTTLER_STATUS_BIT_EDC_GFX 12 typedef struct { uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz] @@ -180,7 +184,7 @@ typedef struct { uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC uint16_t FanPwm; //[milli] - uint16_t CurrentSocketPower; //[mW] + uint16_t CurrentSocketPower; //[W] uint16_t CoreFrequency[8]; //[MHz] uint16_t CorePower[8]; //[mW] @@ -193,10 +197,16 @@ typedef struct { uint16_t ThrottlerStatus; uint16_t spare; - uint16_t StapmOriginalLimit; //[mW] - uint16_t StapmCurrentLimit; //[mW] - uint16_t ApuPower; //[mW] - uint16_t dGpuPower; //[mW] + uint16_t StapmOriginalLimit; //[W] + uint16_t StapmCurrentLimit; //[W] + uint16_t ApuPower; //[W] + uint16_t dGpuPower; //[W] + + uint16_t VddTdcValue; //[mA] + uint16_t SocTdcValue; //[mA] + uint16_t VddEdcValue; //[mA] + uint16_t SocEdcValue; //[mA] + uint16_t reserve[2]; } SmuMetrics_t; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 674e426ed59b..6b3b451a8018 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -27,8 +27,8 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_VG20 0x13 -#define SMU11_DRIVER_IF_VERSION_ARCT 0x12 -#define SMU11_DRIVER_IF_VERSION_NV10 0x35 +#define SMU11_DRIVER_IF_VERSION_ARCT 0x14 +#define SMU11_DRIVER_IF_VERSION_NV10 0x36 #define SMU11_DRIVER_IF_VERSION_NV12 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x36 @@ -37,7 +37,6 @@ #define MP0_SRAM 0x03900000 #define MP1_Public 0x03b00000 #define MP1_SRAM 0x03c00004 -#define MP1_SMC_SIZE 0x40000 /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index c5288831aa15..ad100b533d04 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -81,16 +81,15 @@ enum SMU10_TABLE_ID { SMU10_CLOCKTABLE, }; -extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr); - extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr); -extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp); extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter); + uint16_t msg, uint32_t parameter, + uint32_t *resp); extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 15030284b444..c94270f7c198 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -423,6 +423,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; PPTable_t *smc_pptable = table_context->driver_pptable; struct atom_smc_dpm_info_v4_5 *smc_dpm_table; + struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7; int index, ret; index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, @@ -433,77 +434,33 @@ static int navi10_append_powerplay_table(struct smu_context *smu) if (ret) return ret; - memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers, - sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS); - - /* SVI2 Board Parameters */ - smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx; - smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc; - smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping; - smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping; - smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping; - smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping; - smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask; - smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask; - smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent; - smc_pptable->Padding8_V = smc_dpm_table->Padding8_V; - - /* Telemetry Settings */ - smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent; - smc_pptable->GfxOffset = smc_dpm_table->GfxOffset; - smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx; - smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent; - smc_pptable->SocOffset = smc_dpm_table->SocOffset; - smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc; - smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent; - smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset; - smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0; - smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent; - smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset; - smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1; - - /* GPIO Settings */ - smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio; - smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity; - smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio; - smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity; - smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio; - smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity; - smc_pptable->GthrGpio = smc_dpm_table->GthrGpio; - smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity; - - /* LED Display Settings */ - smc_pptable->LedPin0 = smc_dpm_table->LedPin0; - smc_pptable->LedPin1 = smc_dpm_table->LedPin1; - smc_pptable->LedPin2 = smc_dpm_table->LedPin2; - smc_pptable->padding8_4 = smc_dpm_table->padding8_4; - - /* GFXCLK PLL Spread Spectrum */ - smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled; - smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent; - smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq; - - /* GFXCLK DFLL Spread Spectrum */ - smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled; - smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent; - smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq; - - /* UCLK Spread Spectrum */ - smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled; - smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent; - smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq; - - /* SOCCLK Spread Spectrum */ - smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled; - smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent; - smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq; - - /* Total board power */ - smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower; - smc_pptable->BoardPadding = smc_dpm_table->BoardPadding; - - /* Mvdd Svi2 Div Ratio Setting */ - smc_pptable->MvddRatio = smc_dpm_table->MvddRatio; + pr_info("smc_dpm_info table revision(format.content): %d.%d\n", + smc_dpm_table->table_header.format_revision, + smc_dpm_table->table_header.content_revision); + + if (smc_dpm_table->table_header.format_revision != 4) { + pr_err("smc_dpm_info table format revision is not 4!\n"); + return -EINVAL; + } + + switch (smc_dpm_table->table_header.content_revision) { + case 5: /* nv10 and nv14 */ + memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers, + sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header)); + break; + case 7: /* nv12 */ + ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL, + (uint8_t **)&smc_dpm_table_v4_7); + if (ret) + return ret; + memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers, + sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header)); + break; + default: + pr_err("smc_dpm_info with unsupported content revision %d!\n", + smc_dpm_table->table_header.content_revision); + return -EINVAL; + } if (adev->pm.pp_feature & PP_GFXOFF_MASK) { /* TODO: remove it once SMU fw fix it */ diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index ff73a735b888..67476047c067 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -296,6 +296,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, for (i = 0; i < count; i++) { GET_DPM_CUR_FREQ(clk_table, clk_type, i, value); + if (!value) + continue; size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); if (cur_value == value) @@ -847,7 +849,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu, uint32_t i, size = 0; int16_t workload_type = 0; - if (!smu->pm_enabled || !buf) + if (!buf) return -EINVAL; for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { @@ -895,12 +897,17 @@ static int renoir_read_sensor(struct smu_context *smu, static bool renoir_is_dpm_running(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; + /* - * Util now, the pmfw hasn't exported the interface of SMU + * Until now, the pmfw hasn't exported the interface of SMU * feature mask to APU SKU so just force on all the feature * at early initial stage. */ - return true; + if (adev->in_suspend) + return false; + else + return true; } @@ -950,6 +957,6 @@ static const struct pptable_funcs renoir_ppt_funcs = { void renoir_set_ppt_funcs(struct smu_context *smu) { smu->ppt_funcs = &renoir_ppt_funcs; - smu->smc_if_version = SMU12_DRIVER_IF_VERSION; + smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION; smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 40c35bcc5a0a..c97444841abc 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -214,4 +214,9 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ #define smu_set_power_source(smu, power_src) \ ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0) +#define smu_i2c_eeprom_init(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : 0) +#define smu_i2c_eeprom_fini(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : 0) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 655ba4fb05dc..0045b54b19ed 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -201,13 +201,15 @@ int smu_v11_0_load_microcode(struct smu_context *smu) const struct smc_firmware_header_v1_0 *hdr; uint32_t addr_start = MP1_SRAM; uint32_t i; + uint32_t smc_fw_size; uint32_t mp1_fw_flags; hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; src = (const uint32_t *)(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + smc_fw_size = hdr->header.ucode_size_bytes; - for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) { + for (i = 1; i < smc_fw_size/4 - 1; i++) { WREG32_PCIE(addr_start, src[i]); addr_start += 4; } @@ -264,23 +266,23 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) switch (smu->adev->asic_type) { case CHIP_VEGA20: - smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20; + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20; break; case CHIP_ARCTURUS: - smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT; + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; break; case CHIP_NAVI10: - smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10; + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; break; case CHIP_NAVI12: - smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12; + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; break; case CHIP_NAVI14: - smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14; + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; break; default: pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type); - smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV; + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; break; } @@ -292,10 +294,10 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) * Considering above, we just leave user a warning message instead * of halt driver loading. */ - if (if_version != smu->smc_if_version) { + if (if_version != smu->smc_driver_if_version) { pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, " "smu fw version = 0x%08x (%d.%d.%d)\n", - smu->smc_if_version, if_version, + smu->smc_driver_if_version, if_version, smu_version, smu_major, smu_minor, smu_debug); pr_warn("SMU driver if version not matched\n"); } @@ -479,8 +481,6 @@ int smu_v11_0_init_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; - if (!smu->pm_enabled) - return 0; if (smu_power->power_context || smu_power->power_context_size != 0) return -EINVAL; @@ -497,8 +497,6 @@ int smu_v11_0_fini_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; - if (!smu->pm_enabled) - return 0; if (!smu_power->power_context || smu_power->power_context_size == 0) return -EINVAL; @@ -783,8 +781,6 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; - if (!smu->pm_enabled) - return 0; if (!table_context) return -EINVAL; @@ -835,9 +831,6 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) { int ret = 0; - if (!smu->pm_enabled) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); return ret; } @@ -932,8 +925,6 @@ int smu_v11_0_notify_display_change(struct smu_context *smu) { int ret = 0; - if (!smu->pm_enabled) - return ret; if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); @@ -948,9 +939,6 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, int ret = 0; int clk_id; - if (!smu->pm_enabled) - return ret; - if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0)) return 0; @@ -1205,9 +1193,6 @@ int smu_v11_0_start_thermal_control(struct smu_context *smu) struct smu_temperature_range range; struct amdgpu_device *adev = smu->adev; - if (!smu->pm_enabled) - return ret; - memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range)); ret = smu_get_thermal_temperature_range(smu, &range); @@ -1321,9 +1306,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, enum smu_clk_type clk_select = 0; uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; - if (!smu->pm_enabled) - return -EINVAL; - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { switch (clk_type) { diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 169ebdad87b8..4023d10fb49b 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -32,13 +32,15 @@ #include "asic_reg/mp/mp_12_0_0_offset.h" #include "asic_reg/mp/mp_12_0_0_sh_mask.h" +#include "asic_reg/smuio/smuio_12_0_0_offset.h" +#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h" -#define smnMP1_FIRMWARE_FLAGS 0x3010024 +// because some SMU12 based ASICs use older ip offset tables +// we should undefine this register from the smuio12 header +// to prevent confusion down the road +#undef mmPWR_MISC_CNTL_STATUS -#define mmSMUIO_GFX_MISC_CNTL 0x00c8 -#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 -#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L -#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 +#define smnMP1_FIRMWARE_FLAGS 0x3010024 int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg) @@ -158,10 +160,10 @@ int smu_v12_0_check_fw_version(struct smu_context *smu) * Considering above, we just leave user a warning message instead * of halt driver loading. */ - if (if_version != smu->smc_if_version) { + if (if_version != smu->smc_driver_if_version) { pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, " "smu fw version = 0x%08x (%d.%d.%d)\n", - smu->smc_if_version, if_version, + smu->smc_driver_if_version, if_version, smu_version, smu_major, smu_minor, smu_debug); pr_warn("SMU driver if version not matched\n"); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 868e2d5f6e62..85e5b1ed22c2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2780,7 +2780,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr, if (setting->bupdate_sclk) { if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { if (levels[i].ActivityLevel != cpu_to_be16(setting->sclk_activity)) { @@ -2810,12 +2810,12 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); } if (setting->bupdate_mclk) { if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { if (mclk_levels[i].ActivityLevel != cpu_to_be16(setting->mclk_activity)) { @@ -2845,7 +2845,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); } return 0; } @@ -2881,8 +2881,9 @@ static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr) if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM)) break; } - ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, - data->dpm_level_enable_mask.uvd_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, + data->dpm_level_enable_mask.uvd_dpm_enable_mask, + NULL); return 0; } @@ -2912,8 +2913,9 @@ static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr) if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM)) break; } - ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, - data->dpm_level_enable_mask.vce_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, + data->dpm_level_enable_mask.vce_dpm_enable_mask, + NULL); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 32ebb383c456..ecb9ee46d6b3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -137,9 +137,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL); /* Wait for done bit to be set */ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, @@ -203,8 +201,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) { + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param, + NULL)) { pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); result = -EINVAL; } @@ -1913,7 +1912,8 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) if (mask) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LedConfig, - mask); + mask, + NULL); return 0; } @@ -2220,14 +2220,16 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit); + advanceFanControlParameters.ucMinimumPWMLimit, + NULL); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + advanceFanControlParameters.ulMinFanSCLKAcousticLimit, + NULL); if (res) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -2242,7 +2244,7 @@ static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) if (!hwmgr->avfs_supported) return 0; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL); return 0; } @@ -2390,7 +2392,8 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), + NULL); return 0; } @@ -2422,7 +2425,8 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, + NULL); return 0; } @@ -2569,7 +2573,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr, if (setting->bupdate_sclk) { if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { if (levels[i].ActivityLevel != cpu_to_be16(setting->sclk_activity)) { @@ -2599,12 +2603,12 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); } if (setting->bupdate_mclk) { if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { if (mclk_levels[i].ActivityLevel != cpu_to_be16(setting->mclk_activity)) { @@ -2634,7 +2638,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); } return 0; } @@ -2649,6 +2653,7 @@ const struct pp_smumgr_func fiji_smu_funcs = { .request_smu_load_specific_fw = NULL, .send_msg_to_smc = &smu7_send_msg_to_smc, .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, + .get_argument = smu7_get_argument, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .update_smc_table = fiji_update_smc_table, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 732005c03a82..431ad2fd38df 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -2669,6 +2669,7 @@ const struct pp_smumgr_func iceland_smu_funcs = { .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, .send_msg_to_smc = &smu7_send_msg_to_smc, .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, + .get_argument = smu7_get_argument, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .get_offsetof = iceland_get_offsetof, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 23c12018dbc1..c3d2e6dcf62a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -99,7 +99,8 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr) struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) { + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param, + NULL)) { pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); result = -1; } @@ -2049,15 +2050,16 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) return 0; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); + PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting, + NULL); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL); /* Apply avfs cks-off voltages to avoid the overshoot * when switching to the highest sclk frequency */ if (data->apply_avfs_cks_off_voltage) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage, NULL); return 0; } @@ -2158,14 +2160,16 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit); + advanceFanControlParameters.ucMinimumPWMLimit, + NULL); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + advanceFanControlParameters.ulMinFanSCLKAcousticLimit, + NULL); if (res) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -2202,7 +2206,8 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), + NULL); return 0; } @@ -2234,7 +2239,8 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, + NULL); return 0; } @@ -2485,7 +2491,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr, if (setting->bupdate_sclk) { if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { if (levels[i].ActivityLevel != cpu_to_be16(setting->sclk_activity)) { @@ -2515,12 +2521,12 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); } if (setting->bupdate_mclk) { if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { if (mclk_levels[i].ActivityLevel != cpu_to_be16(setting->mclk_activity)) { @@ -2550,7 +2556,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); } return 0; } @@ -2565,6 +2571,7 @@ const struct pp_smumgr_func polaris10_smu_funcs = { .request_smu_load_specific_fw = NULL, .send_msg_to_smc = smu7_send_msg_to_smc, .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter, + .get_argument = smu7_get_argument, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .update_smc_table = polaris10_update_smc_table, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 2319400a3fcb..801ba9ca6094 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -126,15 +126,18 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, "Invalid SMU Table version!", return -EINVAL;); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL;); - smu10_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu10_send_msg_to_smc_with_parameter(hwmgr, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu10_send_msg_to_smc_with_parameter(hwmgr, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, - priv->smu_tables.entry[table_id].table_id); + priv->smu_tables.entry[table_id].table_id, + NULL); /* flush hdp cache */ amdgpu_asic_flush_hdp(adev, NULL); @@ -164,15 +167,18 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, amdgpu_asic_flush_hdp(adev, NULL); - smu10_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu10_send_msg_to_smc_with_parameter(hwmgr, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu10_send_msg_to_smc_with_parameter(hwmgr, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, - priv->smu_tables.entry[table_id].table_id); + priv->smu_tables.entry[table_id].table_id, + NULL); return 0; } @@ -181,9 +187,9 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; - smu10_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetDriverIfVersion); - smc_driver_if_version = smu10_read_arg_from_smc(hwmgr); + smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetDriverIfVersion, + &smc_driver_if_version); if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) && (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) { @@ -217,8 +223,7 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); - hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); adev->pm.fw_version = hwmgr->smu_version >> 8; if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 && diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 3f51d545e8ff..aae25243eb10 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -191,13 +191,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) return 0; } -int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) -{ - cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); - - return 0; -} - int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); @@ -207,25 +200,14 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, ui return smu7_send_msg_to_smc(hwmgr, msg); } -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) +uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); - - return smu7_send_msg_to_smc_without_waiting(hwmgr, msg); + return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); } int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); - - cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - - if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP)) - pr_info("Failed to send Message.\n"); - - return 0; + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL); } enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) @@ -353,12 +335,14 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ if (hwmgr->not_vf) { - smu7_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, - upper_32_bits(smu_data->smu_buffer.mc_addr)); - smu7_send_msg_to_smc_with_parameter(hwmgr, + upper_32_bits(smu_data->smu_buffer.mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, - lower_32_bits(smu_data->smu_buffer.mc_addr)); + lower_32_bits(smu_data->smu_buffer.mc_addr), + NULL); } fw_to_load = UCODE_ID_RLC_G_MASK + UCODE_ID_SDMA0_MASK @@ -423,10 +407,16 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) } memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc, sizeof(struct SMU_DRAMData_TOC)); - smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); - smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); - - smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DRV_DRAM_ADDR_HI, + upper_32_bits(smu_data->header_buffer.mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DRV_DRAM_ADDR_LO, + lower_32_bits(smu_data->header_buffer.mc_addr), + NULL); + + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL); r = smu7_check_fw_load_finish(hwmgr, fw_to_load); if (!r) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 01f0538fba6b..e7303dc8c260 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -60,11 +60,9 @@ int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr); bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr); int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); -int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg); int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter); +uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr); int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr); enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index 7dca04a89217..76d4f12ceedf 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -610,18 +610,21 @@ static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr; - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, - upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); + upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr), + NULL); - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, - lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); + lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr), + NULL); - smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, - smu8_smu->toc_entry_clock_table); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, + smu8_smu->toc_entry_clock_table, + NULL); - smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL); return 0; } @@ -637,18 +640,21 @@ static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr) break; } - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, - upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); + upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr), + NULL); - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, - lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); + lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr), + NULL); - smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, - smu8_smu->toc_entry_clock_table); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, + smu8_smu->toc_entry_clock_table, + NULL); - smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL); return 0; } @@ -671,25 +677,30 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr) smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4); - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrHi, - upper_32_bits(smu8_smu->toc_buffer.mc_addr)); + upper_32_bits(smu8_smu->toc_buffer.mc_addr), + NULL); - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrLo, - lower_32_bits(smu8_smu->toc_buffer.mc_addr)); + lower_32_bits(smu8_smu->toc_buffer.mc_addr), + NULL); - smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL); - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, - smu8_smu->toc_entry_aram); - smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, - smu8_smu->toc_entry_power_profiling_index); + smu8_smu->toc_entry_aram, + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, + smu8_smu->toc_entry_power_profiling_index, + NULL); - smu8_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, - smu8_smu->toc_entry_initialize_index); + smu8_smu->toc_entry_initialize_index, + NULL); fw_to_check = UCODE_ID_RLC_G_MASK | UCODE_ID_SDMA0_MASK | @@ -860,11 +871,13 @@ static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr, unsigned long check_feature) { int result; - unsigned long features; + uint32_t features; - result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0); + result = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetFeatureStatus, + 0, + &features); if (result == 0) { - features = smum_get_argument(hwmgr); if (features & check_feature) return true; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 4240aeec9000..b6fb48066841 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -103,14 +103,6 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr) return 0; } -uint32_t smum_get_argument(struct pp_hwmgr *hwmgr) -{ - if (NULL != hwmgr->smumgr_funcs->get_argument) - return hwmgr->smumgr_funcs->get_argument(hwmgr); - - return 0; -} - uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value) { if (NULL != hwmgr->smumgr_funcs->get_mac_definition) @@ -135,22 +127,58 @@ int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr) return 0; } -int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) +int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp) { - if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL) + int ret = 0; + + if (hwmgr == NULL || + hwmgr->smumgr_funcs->send_msg_to_smc == NULL || + (resp && !hwmgr->smumgr_funcs->get_argument)) return -EINVAL; - return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); + mutex_lock(&hwmgr->msg_lock); + + ret = hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); + if (ret) { + mutex_unlock(&hwmgr->msg_lock); + return ret; + } + + if (resp) + *resp = hwmgr->smumgr_funcs->get_argument(hwmgr); + + mutex_unlock(&hwmgr->msg_lock); + + return ret; } int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter) + uint16_t msg, + uint32_t parameter, + uint32_t *resp) { + int ret = 0; + if (hwmgr == NULL || - hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) + hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL || + (resp && !hwmgr->smumgr_funcs->get_argument)) return -EINVAL; - return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter( + + mutex_lock(&hwmgr->msg_lock); + + ret = hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter( hwmgr, msg, parameter); + if (ret) { + mutex_unlock(&hwmgr->msg_lock); + return ret; + } + + if (resp) + *resp = hwmgr->smumgr_funcs->get_argument(hwmgr); + + mutex_unlock(&hwmgr->msg_lock); + + return ret; } int smum_init_smc_table(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index f19bac7ef7ba..398e7e3587de 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -2702,7 +2702,8 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), + NULL); return 0; } @@ -2733,7 +2734,8 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, + NULL); return 0; } @@ -3168,7 +3170,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr, if (setting->bupdate_sclk) { if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { if (levels[i].ActivityLevel != cpu_to_be16(setting->sclk_activity)) { @@ -3198,12 +3200,12 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); } if (setting->bupdate_mclk) { if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { if (mclk_levels[i].ActivityLevel != cpu_to_be16(setting->mclk_activity)) { @@ -3233,7 +3235,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr, } } if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); } return 0; } @@ -3248,6 +3250,7 @@ const struct pp_smumgr_func tonga_smu_funcs = { .request_smu_load_specific_fw = NULL, .send_msg_to_smc = &smu7_send_msg_to_smc, .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, + .get_argument = smu7_get_argument, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .update_smc_table = tonga_update_smc_table, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 715564009089..1e222c5d91a4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -47,15 +47,18 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - smu9_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu9_send_msg_to_smc_with_parameter(hwmgr, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu9_send_msg_to_smc_with_parameter(hwmgr, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, - priv->smu_tables.entry[table_id].table_id); + priv->smu_tables.entry[table_id].table_id, + NULL); /* flush hdp cache */ amdgpu_asic_flush_hdp(adev, NULL); @@ -90,15 +93,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, amdgpu_asic_flush_hdp(adev, NULL); - smu9_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu9_send_msg_to_smc_with_parameter(hwmgr, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - smu9_send_msg_to_smc_with_parameter(hwmgr, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, - priv->smu_tables.entry[table_id].table_id); + priv->smu_tables.entry[table_id].table_id, + NULL); return 0; } @@ -118,17 +124,21 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, return 0; return smum_send_msg_to_smc_with_parameter(hwmgr, - msg, feature_mask); + msg, feature_mask, NULL); } int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr, uint64_t *features_enabled) { + uint32_t enabled_features; + if (features_enabled == NULL) return -EINVAL; - smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures); - *features_enabled = smu9_get_argument(hwmgr); + smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeatures, + &enabled_features); + *features_enabled = enabled_features; return 0; } @@ -150,12 +160,14 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr) struct vega10_smumgr *priv = hwmgr->smu_backend; if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) { - smu9_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); - smu9_send_msg_to_smc_with_parameter(hwmgr, + upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr), + NULL); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, - lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); + lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr), + NULL); } return 0; } @@ -167,11 +179,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr) uint32_t dev_id; uint32_t rev_id; - PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetDriverIfVersion), + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetDriverIfVersion, + &smc_driver_if_version), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - smc_driver_if_version = smu9_get_argument(hwmgr); dev_id = adev->pdev->device; rev_id = adev->pdev->revision; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index 275dbf65f1a0..f54df76537e4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -50,18 +50,21 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, - table_id) == 0, + table_id, + NULL) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return -EINVAL); @@ -98,19 +101,22 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, amdgpu_asic_flush_hdp(adev, NULL); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, - table_id) == 0, + table_id, + NULL) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", return -EINVAL); @@ -126,21 +132,21 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); if (enable) { - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL) == 0, "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL) == 0, "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return -EINVAL); } else { - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL) == 0, "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL) == 0, "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return -EINVAL); } @@ -156,17 +162,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, if (features_enabled == NULL) return -EINVAL; - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeaturesLow, + &smc_features_low) == 0, "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return -EINVAL); - smc_features_low = smu9_get_argument(hwmgr); - PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeaturesHigh, + &smc_features_high) == 0, "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return -EINVAL); - smc_features_high = smu9_get_argument(hwmgr); *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); @@ -192,12 +198,14 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr) (struct vega12_smumgr *)(hwmgr->smu_backend); if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { - if (!smu9_send_msg_to_smc_with_parameter(hwmgr, + if (!smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr))) - smu9_send_msg_to_smc_with_parameter(hwmgr, + upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), + NULL)) + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, - lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); + lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), + NULL); } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index 16aa171971d3..2fb97554134f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -175,18 +175,20 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL)) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL)) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return ret); @@ -224,18 +226,20 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, amdgpu_asic_flush_hdp(adev, NULL); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL)) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), + NULL)) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", return ret); @@ -255,18 +259,22 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, amdgpu_asic_flush_hdp(adev, NULL); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), + NULL)) == 0, "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), + NULL)) == 0, "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_TransferTableDram2Smu, + TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), + NULL)) == 0, "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!", return ret); @@ -281,19 +289,21 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, struct amdgpu_device *adev = hwmgr->adev; int ret = 0; - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), + NULL)) == 0, "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), + NULL)) == 0, "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, - TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, + TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0, "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!", return ret); @@ -316,21 +326,21 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); if (enable) { - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0, "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0, "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return ret); } else { - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0, "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0, "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return ret); } @@ -347,16 +357,16 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, if (features_enabled == NULL) return -EINVAL; - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeaturesLow, + &smc_features_low)) == 0, "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return ret); - smc_features_low = vega20_get_argument(hwmgr); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeaturesHigh, + &smc_features_high)) == 0, "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return ret); - smc_features_high = vega20_get_argument(hwmgr); *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); @@ -371,13 +381,15 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr) int ret = 0; if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { - ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); + upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), + NULL); if (!ret) - ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, - lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); + lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), + NULL); } return ret; @@ -389,14 +401,16 @@ int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr) (struct vega20_smumgr *)(hwmgr->smu_backend); int ret = 0; - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, - upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0, + upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr), + NULL)) == 0, "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, - lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0, + lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr), + NULL)) == 0, "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!", return ret); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index b0e0d67cd54b..3da71a088b92 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -356,7 +356,8 @@ static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), + NULL); return 0; } @@ -388,7 +389,8 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, + NULL); return 0; } @@ -1906,7 +1908,8 @@ static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableModeSwitchRLCNotification, - adev->gfx.cu_info.number); + adev->gfx.cu_info.number, + NULL); return 0; } @@ -2060,7 +2063,7 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr) table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_AutomaticDCTransition) && - !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme)) + !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme, NULL)) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); } else { @@ -2250,10 +2253,12 @@ int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr) if (!hwmgr->avfs_supported) return 0; - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL); if (!ret) { if (data->apply_avfs_cks_off_voltage) - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); + ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_ApplyAvfsCksOffVoltage, + NULL); } return ret; @@ -2279,6 +2284,7 @@ const struct pp_smumgr_func vegam_smu_funcs = { .request_smu_load_specific_fw = NULL, .send_msg_to_smc = smu7_send_msg_to_smc, .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter, + .get_argument = smu7_get_argument, .process_firmware_header = vegam_process_firmware_header, .is_dpm_running = vegam_is_dpm_running, .get_mac_definition = vegam_get_mac_definition, diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 3f1044326dcb..61923530b2e4 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -1796,7 +1796,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf) "PD_Data_error_rate_coeff"}; int result = 0; - if (!smu->pm_enabled || !buf) + if (!buf) return -EINVAL; size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", @@ -1887,8 +1887,6 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode = input[size]; - if (!smu->pm_enabled) - return ret; if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index c693b2ca0329..11c97edde54d 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -3,42 +3,13 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ccflags-y := -Idrivers/gpu/drm/amd/include - hostprogs := mkregtable -clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h +targets := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h -quiet_cmd_mkregtable = MKREGTABLE $@ +quiet_cmd_mkregtable = MKREG $@ cmd_mkregtable = $(obj)/mkregtable $< > $@ -$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable - $(call if_changed,mkregtable) - -$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable +$(obj)/%_reg_safe.h: $(src)/reg_srcs/% $(obj)/mkregtable FORCE $(call if_changed,mkregtable) $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index a9257bed3484..134aa2b01f90 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -65,13 +65,6 @@ static const struct ci_pt_defaults defaults_bonaire_xt = { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }; -static const struct ci_pt_defaults defaults_bonaire_pro = -{ - 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, - { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, - { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } -}; - static const struct ci_pt_defaults defaults_saturn_xt = { 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, @@ -79,13 +72,6 @@ static const struct ci_pt_defaults defaults_saturn_xt = { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } }; -static const struct ci_pt_defaults defaults_saturn_pro = -{ - 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, - { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, - { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } -}; - static const struct ci_pt_config_reg didt_config_ci[] = { { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 2cb85dbe728f..a167e1c36d24 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -252,24 +252,6 @@ static const struct si_dte_data dte_data_tahiti = false }; -static const struct si_dte_data dte_data_tahiti_le = -{ - { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 }, - { 0x7D, 0x7D, 0x4E4, 0xB00, 0 }, - 0x5, - 0xAFC8, - 0x64, - 0x32, - 1, - 0, - 0x10, - { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 }, - { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 }, - { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 }, - 85, - true -}; - static const struct si_dte_data dte_data_tahiti_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, |